For bug #162275, make doebuild validate *DEPEND and bail out if necessary.
[portage.git] / pym / portage.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6
7 VERSION="$Rev$"[6:-2] + "-svn"
8
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
12
13 try:
14         import sys
15 except ImportError:
16         print "Failed to import sys! Something is _VERY_ wrong with python."
17         raise
18
19 try:
20         import copy, errno, os, re, shutil, time, types
21         try:
22                 import cPickle
23         except ImportError:
24                 import pickle as cPickle
25
26         import stat
27         import commands
28         from time import sleep
29         from random import shuffle
30         import UserDict
31         if getattr(__builtins__, "set", None) is None:
32                 from sets import Set as set
33         from itertools import chain, izip
34 except ImportError, e:
35         sys.stderr.write("\n\n")
36         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
39
40         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42         sys.stderr.write("    "+str(e)+"\n\n");
43         raise
44
45 try:
46         # XXX: This should get renamed to bsd_chflags, I think.
47         import chflags
48         bsd_chflags = chflags
49 except ImportError:
50         bsd_chflags = None
51
52 try:
53         from cache.cache_errors import CacheError
54         import cvstree
55         import xpak
56         import getbinpkg
57         import portage_dep
58         from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59                 isjustname, isspecific, isvalidatom, \
60                 match_from_list, match_to_list, best_match_to_list
61
62         # XXX: This needs to get cleaned up.
63         import output
64         from output import bold, colorize, green, red, yellow
65
66         import portage_const
67         from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68           USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69           PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70           EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71           MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72           DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73           INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74           INCREMENTALS, EAPI, MISC_SH_BINARY
75
76         from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77                                  portage_uid, portage_gid, userpriv_groups
78         from portage_manifest import Manifest
79
80         import portage_util
81         from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82                 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83                 map_dictlist_vals, new_protect_filename, normalize_path, \
84                 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85                 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86         import portage_exception
87         import portage_gpg
88         import portage_locks
89         import portage_exec
90         from portage_exec import atexit_register, run_exitfuncs
91         from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92         import portage_checksum
93         from portage_checksum import perform_md5,perform_checksum,prelink_capable
94         import eclass_cache
95         from portage_localization import _
96         from portage_update import dep_transform, fixdbentries, grab_updates, \
97                 parse_updates, update_config_files, update_dbentries
98
99         # Need these functions directly in portage namespace to not break every external tool in existence
100         from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101                 pkgsplit, vercmp, ververify
102
103         # endversion and endversion_keys are for backward compatibility only.
104         from portage_versions import endversion_keys
105         from portage_versions import suffix_value as endversion
106
107 except ImportError, e:
108         sys.stderr.write("\n\n")
109         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114         sys.stderr.write("!!! a recovery of portage.\n")
115         sys.stderr.write("    "+str(e)+"\n\n")
116         raise
117
118
119 try:
120         import portage_selinux as selinux
121 except OSError, e:
122         writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
123         del e
124 except ImportError:
125         pass
126
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
130
131
132 def load_mod(name):
133         modname = ".".join(name.split(".")[:-1])
134         mod = __import__(modname)
135         components = name.split('.')
136         for comp in components[1:]:
137                 mod = getattr(mod, comp)
138         return mod
139
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
141         for x in key_order:
142                 if top_dict.has_key(x) and top_dict[x].has_key(key):
143                         if FullCopy:
144                                 return copy.deepcopy(top_dict[x][key])
145                         else:
146                                 return top_dict[x][key]
147         if EmptyOnError:
148                 return ""
149         else:
150                 raise KeyError, "Key not found in list; '%s'" % key
151
152 def getcwd():
153         "this fixes situations where the current directory doesn't exist"
154         try:
155                 return os.getcwd()
156         except OSError: #dir doesn't exist
157                 os.chdir("/")
158                 return "/"
159 getcwd()
160
161 def abssymlink(symlink):
162         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163         mylink=os.readlink(symlink)
164         if mylink[0] != '/':
165                 mydir=os.path.dirname(symlink)
166                 mylink=mydir+"/"+mylink
167         return os.path.normpath(mylink)
168
169 dircache = {}
170 cacheHit=0
171 cacheMiss=0
172 cacheStale=0
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174         global cacheHit,cacheMiss,cacheStale
175         mypath = normalize_path(my_original_path)
176         if dircache.has_key(mypath):
177                 cacheHit += 1
178                 cached_mtime, list, ftype = dircache[mypath]
179         else:
180                 cacheMiss += 1
181                 cached_mtime, list, ftype = -1, [], []
182         try:
183                 pathstat = os.stat(mypath)
184                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185                         mtime = pathstat[stat.ST_MTIME]
186                 else:
187                         raise portage_exception.DirectoryNotFound(mypath)
188         except (IOError,OSError,portage_exception.PortageException):
189                 if EmptyOnError:
190                         return [], []
191                 return None, None
192         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193         if mtime != cached_mtime or time.time() - mtime < 4:
194                 if dircache.has_key(mypath):
195                         cacheStale += 1
196                 list = os.listdir(mypath)
197                 ftype = []
198                 for x in list:
199                         try:
200                                 if followSymlinks:
201                                         pathstat = os.stat(mypath+"/"+x)
202                                 else:
203                                         pathstat = os.lstat(mypath+"/"+x)
204
205                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
206                                         ftype.append(0)
207                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
208                                         ftype.append(1)
209                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
210                                         ftype.append(2)
211                                 else:
212                                         ftype.append(3)
213                         except (IOError, OSError):
214                                 ftype.append(3)
215                 dircache[mypath] = mtime, list, ftype
216
217         ret_list = []
218         ret_ftype = []
219         for x in range(0, len(list)):
220                 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221                         ret_list.append(list[x])
222                         ret_ftype.append(ftype[x])
223                 elif (list[x] not in ignorelist):
224                         ret_list.append(list[x])
225                         ret_ftype.append(ftype[x])
226
227         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228         return ret_list, ret_ftype
229
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231         EmptyOnError=False, dirsonly=False):
232         """
233         Portage-specific implementation of os.listdir
234
235         @param mypath: Path whose contents you wish to list
236         @type mypath: String
237         @param recursive: Recursively scan directories contained within mypath
238         @type recursive: Boolean
239         @param filesonly; Only return files, not more directories
240         @type filesonly: Boolean
241         @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242         @type ignorecvs: Boolean
243         @param ignorelist: List of filenames/directories to exclude
244         @type ignorelist: List
245         @param followSymlinks: Follow Symlink'd files and directories
246         @type followSymlinks: Boolean
247         @param EmptyOnError: Return [] if an error occurs.
248         @type EmptyOnError: Boolean
249         @param dirsonly: Only return directories.
250         @type dirsonly: Boolean
251         @rtype: List
252         @returns: A list of files and directories (or just files or just directories) or an empty list.
253         """
254
255         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
256
257         if list is None:
258                 list=[]
259         if ftype is None:
260                 ftype=[]
261
262         if not (filesonly or dirsonly or recursive):
263                 return list
264
265         if recursive:
266                 x=0
267                 while x<len(ftype):
268                         if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
270                                         followSymlinks)
271
272                                 l=l[:]
273                                 for y in range(0,len(l)):
274                                         l[y]=list[x]+"/"+l[y]
275                                 list=list+l
276                                 ftype=ftype+f
277                         x+=1
278         if filesonly:
279                 rlist=[]
280                 for x in range(0,len(ftype)):
281                         if ftype[x]==0:
282                                 rlist=rlist+[list[x]]
283         elif dirsonly:
284                 rlist = []
285                 for x in range(0, len(ftype)):
286                         if ftype[x] == 1:
287                                 rlist = rlist + [list[x]]       
288         else:
289                 rlist=list
290
291         return rlist
292
293 def flatten(mytokens):
294         """this function now turns a [1,[2,3]] list into
295         a [1,2,3] list and returns it."""
296         newlist=[]
297         for x in mytokens:
298                 if type(x)==types.ListType:
299                         newlist.extend(flatten(x))
300                 else:
301                         newlist.append(x)
302         return newlist
303
304 #beautiful directed graph object
305
306 class digraph:
307         def __init__(self):
308                 """Create an empty digraph"""
309                 
310                 # { node : ( { child : priority } , { parent : priority } ) }
311                 self.nodes = {}
312                 self.order = []
313
314         def add(self, node, parent, priority=0):
315                 """Adds the specified node with the specified parent.
316                 
317                 If the dep is a soft-dep and the node already has a hard
318                 relationship to the parent, the relationship is left as hard."""
319                 
320                 if node not in self.nodes:
321                         self.nodes[node] = ({}, {})
322                         self.order.append(node)
323                 
324                 if not parent:
325                         return
326                 
327                 if parent not in self.nodes:
328                         self.nodes[parent] = ({}, {})
329                         self.order.append(parent)
330                 
331                 if parent in self.nodes[node][1]:
332                         if priority > self.nodes[node][1][parent]:
333                                 self.nodes[node][1][parent] = priority
334                 else:
335                         self.nodes[node][1][parent] = priority
336                 
337                 if node in self.nodes[parent][0]:
338                         if priority > self.nodes[parent][0][node]:
339                                 self.nodes[parent][0][node] = priority
340                 else:
341                         self.nodes[parent][0][node] = priority
342
343         def remove(self, node):
344                 """Removes the specified node from the digraph, also removing
345                 and ties to other nodes in the digraph. Raises KeyError if the
346                 node doesn't exist."""
347                 
348                 if node not in self.nodes:
349                         raise KeyError(node)
350                 
351                 for parent in self.nodes[node][1]:
352                         del self.nodes[parent][0][node]
353                 for child in self.nodes[node][0]:
354                         del self.nodes[child][1][node]
355                 
356                 del self.nodes[node]
357                 self.order.remove(node)
358
359         def contains(self, node):
360                 """Checks if the digraph contains mynode"""
361                 return node in self.nodes
362
363         def all_nodes(self):
364                 """Return a list of all nodes in the graph"""
365                 return self.order[:]
366
367         def child_nodes(self, node, ignore_priority=None):
368                 """Return all children of the specified node"""
369                 if ignore_priority is None:
370                         return self.nodes[node][0].keys()
371                 children = []
372                 for child, priority in self.nodes[node][0].iteritems():
373                         if priority > ignore_priority:
374                                 children.append(child)
375                 return children
376
377         def parent_nodes(self, node):
378                 """Return all parents of the specified node"""
379                 return self.nodes[node][1].keys()
380
381         def leaf_nodes(self, ignore_priority=None):
382                 """Return all nodes that have no children
383                 
384                 If ignore_soft_deps is True, soft deps are not counted as
385                 children in calculations."""
386                 
387                 leaf_nodes = []
388                 for node in self.order:
389                         is_leaf_node = True
390                         for child in self.nodes[node][0]:
391                                 if self.nodes[node][0][child] > ignore_priority:
392                                         is_leaf_node = False
393                                         break
394                         if is_leaf_node:
395                                 leaf_nodes.append(node)
396                 return leaf_nodes
397
398         def root_nodes(self, ignore_priority=None):
399                 """Return all nodes that have no parents.
400                 
401                 If ignore_soft_deps is True, soft deps are not counted as
402                 parents in calculations."""
403                 
404                 root_nodes = []
405                 for node in self.order:
406                         is_root_node = True
407                         for parent in self.nodes[node][1]:
408                                 if self.nodes[node][1][parent] > ignore_priority:
409                                         is_root_node = False
410                                         break
411                         if is_root_node:
412                                 root_nodes.append(node)
413                 return root_nodes
414
415         def is_empty(self):
416                 """Checks if the digraph is empty"""
417                 return len(self.nodes) == 0
418
419         def clone(self):
420                 clone = digraph()
421                 clone.nodes = copy.deepcopy(self.nodes)
422                 clone.order = self.order[:]
423                 return clone
424
425         # Backward compatibility
426         addnode = add
427         allnodes = all_nodes
428         allzeros = leaf_nodes
429         hasnode = contains
430         empty = is_empty
431         copy = clone
432
433         def delnode(self, node):
434                 try:
435                         self.remove(node)
436                 except KeyError:
437                         pass
438
439         def firstzero(self):
440                 leaf_nodes = self.leaf_nodes()
441                 if leaf_nodes:
442                         return leaf_nodes[0]
443                 return None
444
445         def hasallzeros(self, ignore_priority=None):
446                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
447                         len(self.order)
448
449         def debug_print(self):
450                 for node in self.nodes:
451                         print node,
452                         if self.nodes[node][0]:
453                                 print "depends on"
454                         else:
455                                 print "(no children)"
456                         for child in self.nodes[node][0]:
457                                 print "  ",child,
458                                 print "(%s)" % self.nodes[node][0][child]
459
460
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463         mylogfiles = listdir(mysettings["T"]+"/logging/")
464         # shortcut for packages without any messages
465         if len(mylogfiles) == 0:
466                 return
467         # exploit listdir() file order so we process log entries in chronological order
468         mylogfiles.reverse()
469         mylogentries = {}
470         my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
471         for f in mylogfiles:
472                 msgfunction, msgtype = f.split(".")
473                 if msgtype.upper() not in my_elog_classes \
474                                 and msgtype.lower() not in my_elog_classes:
475                         continue
476                 if msgfunction not in portage_const.EBUILD_PHASES:
477                         writemsg("!!! can't process invalid log file: %s\n" % f,
478                                 noiselevel=-1)
479                         continue
480                 if not msgfunction in mylogentries:
481                         mylogentries[msgfunction] = []
482                 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483                 mylogentries[msgfunction].append((msgtype, msgcontent))
484
485         # in case the filters matched all messages
486         if len(mylogentries) == 0:
487                 return
488
489         # generate a single string with all log messages
490         fulllog = ""
491         for phase in portage_const.EBUILD_PHASES:
492                 if not phase in mylogentries:
493                         continue
494                 for msgtype,msgcontent in mylogentries[phase]:
495                         fulllog += "%s: %s\n" % (msgtype, phase)
496                         for line in msgcontent:
497                                 fulllog += line
498                         fulllog += "\n"
499
500         # pass the processing to the individual modules
501         logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
502         for s in logsystems:
503                 # - is nicer than _ for module names, so allow people to use it.
504                 s = s.replace("-", "_")
505                 try:
506                         # FIXME: ugly ad.hoc import code
507                         # TODO:  implement a common portage module loader
508                         logmodule = __import__("elog_modules.mod_"+s)
509                         m = getattr(logmodule, "mod_"+s)
510                         def timeout_handler(signum, frame):
511                                 raise portage_exception.PortageException(
512                                         "Timeout in elog_process for system '%s'" % s)
513                         import signal
514                         signal.signal(signal.SIGALRM, timeout_handler)
515                         # Timeout after one minute (in case something like the mail
516                         # module gets hung).
517                         signal.alarm(60)
518                         try:
519                                 m.process(mysettings, cpv, mylogentries, fulllog)
520                         finally:
521                                 signal.alarm(0)
522                         if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523                                 _elog_atexit_handlers.append(m.finalize)
524                                 atexit_register(m.finalize, mysettings)
525                 except (ImportError, AttributeError), e:
526                         writemsg("!!! Error while importing logging modules " + \
527                                 "while loading \"mod_%s\":\n" % str(s))
528                         writemsg("%s\n" % str(e), noiselevel=-1)
529                 except portage_exception.PortageException, e:
530                         writemsg("%s\n" % str(e), noiselevel=-1)
531
532         # clean logfiles to avoid repetitions
533         for f in mylogfiles:
534                 try:
535                         os.unlink(os.path.join(mysettings["T"], "logging", f))
536                 except OSError:
537                         pass
538
539 #parse /etc/env.d and generate /etc/profile.env
540
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542         if target_root is None:
543                 global root
544                 target_root = root
545         if prev_mtimes is None:
546                 global mtimedb
547                 prev_mtimes = mtimedb["ldpath"]
548         envd_dir = os.path.join(target_root, "etc", "env.d")
549         portage_util.ensure_dirs(envd_dir, mode=0755)
550         fns = listdir(envd_dir, EmptyOnError=1)
551         fns.sort()
552         templist = []
553         for x in fns:
554                 if len(x) < 3:
555                         continue
556                 if not x[0].isdigit() or not x[1].isdigit():
557                         continue
558                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
559                         continue
560                 templist.append(x)
561         fns = templist
562         del templist
563
564         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568                   "PYTHONPATH", "ROOTPATH"])
569
570         config_list = []
571
572         for x in fns:
573                 file_path = os.path.join(envd_dir, x)
574                 try:
575                         myconfig = getconfig(file_path, expand=False)
576                 except portage_exception.ParseError, e:
577                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
578                         del e
579                         continue
580                 if myconfig is None:
581                         # broken symlink or file removed by a concurrent process
582                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
583                         continue
584                 config_list.append(myconfig)
585                 if "SPACE_SEPARATED" in myconfig:
586                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
587                         del myconfig["SPACE_SEPARATED"]
588                 if "COLON_SEPARATED" in myconfig:
589                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
590                         del myconfig["COLON_SEPARATED"]
591
592         env = {}
593         specials = {}
594         for var in space_separated:
595                 mylist = []
596                 for myconfig in config_list:
597                         if var in myconfig:
598                                 mylist.extend(filter(None, myconfig[var].split()))
599                                 del myconfig[var] # prepare for env.update(myconfig)
600                 if mylist:
601                         env[var] = " ".join(mylist)
602                 specials[var] = mylist
603
604         for var in colon_separated:
605                 mylist = []
606                 for myconfig in config_list:
607                         if var in myconfig:
608                                 mylist.extend(filter(None, myconfig[var].split(":")))
609                                 del myconfig[var] # prepare for env.update(myconfig)
610                 if mylist:
611                         env[var] = ":".join(mylist)
612                 specials[var] = mylist
613
614         for myconfig in config_list:
615                 """Cumulative variables have already been deleted from myconfig so that
616                 they won't be overwritten by this dict.update call."""
617                 env.update(myconfig)
618
619         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
620         try:
621                 myld = open(ldsoconf_path)
622                 myldlines=myld.readlines()
623                 myld.close()
624                 oldld=[]
625                 for x in myldlines:
626                         #each line has at least one char (a newline)
627                         if x[0]=="#":
628                                 continue
629                         oldld.append(x[:-1])
630         except (IOError, OSError), e:
631                 if e.errno != errno.ENOENT:
632                         raise
633                 oldld = None
634
635         ld_cache_update=False
636
637         newld = specials["LDPATH"]
638         if (oldld!=newld):
639                 #ld.so.conf needs updating and ldconfig needs to be run
640                 myfd = atomic_ofstream(ldsoconf_path)
641                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642                 myfd.write("# contents of /etc/env.d directory\n")
643                 for x in specials["LDPATH"]:
644                         myfd.write(x+"\n")
645                 myfd.close()
646                 ld_cache_update=True
647
648         # Update prelink.conf if we are prelink-enabled
649         if prelink_capable:
650                 newprelink = atomic_ofstream(
651                         os.path.join(target_root, "etc", "prelink.conf"))
652                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653                 newprelink.write("# contents of /etc/env.d directory\n")
654
655                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656                         newprelink.write("-l "+x+"\n");
657                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
658                         if not x:
659                                 continue
660                         if x[-1]!='/':
661                                 x=x+"/"
662                         plmasked=0
663                         for y in specials["PRELINK_PATH_MASK"]:
664                                 if not y:
665                                         continue
666                                 if y[-1]!='/':
667                                         y=y+"/"
668                                 if y==x[0:len(y)]:
669                                         plmasked=1
670                                         break
671                         if not plmasked:
672                                 newprelink.write("-h "+x+"\n")
673                 for x in specials["PRELINK_PATH_MASK"]:
674                         newprelink.write("-b "+x+"\n")
675                 newprelink.close()
676
677         mtime_changed = False
678         lib_dirs = set()
679         for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
680                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
681                 try:
682                         newldpathtime = os.stat(x)[stat.ST_MTIME]
683                         lib_dirs.add(normalize_path(x))
684                 except OSError, oe:
685                         if oe.errno == errno.ENOENT:
686                                 try:
687                                         del prev_mtimes[x]
688                                 except KeyError:
689                                         pass
690                                 # ignore this path because it doesn't exist
691                                 continue
692                         raise
693                 if x in prev_mtimes:
694                         if prev_mtimes[x] == newldpathtime:
695                                 pass
696                         else:
697                                 prev_mtimes[x] = newldpathtime
698                                 mtime_changed = True
699                 else:
700                         prev_mtimes[x] = newldpathtime
701                         mtime_changed = True
702
703         if mtime_changed:
704                 ld_cache_update = True
705
706         if makelinks and \
707                 not ld_cache_update and \
708                 contents is not None:
709                 libdir_contents_changed = False
710                 for mypath, mydata in contents.iteritems():
711                         if mydata[0] not in ("obj","sym"):
712                                 continue
713                         head, tail = os.path.split(mypath)
714                         if head in lib_dirs:
715                                 libdir_contents_changed = True
716                                 break
717                 if not libdir_contents_changed:
718                         makelinks = False
719
720         # Only run ldconfig as needed
721         if (ld_cache_update or makelinks):
722                 # ldconfig has very different behaviour between FreeBSD and Linux
723                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
724                         # We can't update links if we haven't cleaned other versions first, as
725                         # an older package installed ON TOP of a newer version will cause ldconfig
726                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
727                         # we can safely create links.
728                         writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
729                         if makelinks:
730                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
731                         else:
732                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
733                 elif ostype in ("FreeBSD","DragonFly"):
734                         writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
735                         commands.getstatusoutput(
736                                 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
737                                 (target_root, target_root))
738
739         del specials["LDPATH"]
740
741         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
742         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
743         cenvnotice  = penvnotice[:]
744         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
745         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
746
747         #create /etc/profile.env for bash support
748         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
749         outfile.write(penvnotice)
750
751         env_keys = [ x for x in env if x != "LDPATH" ]
752         env_keys.sort()
753         for x in env_keys:
754                 outfile.write("export %s='%s'\n" % (x, env[x]))
755         outfile.close()
756
757         #create /etc/csh.env for (t)csh support
758         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
759         outfile.write(cenvnotice)
760         for x in env_keys:
761                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
762         outfile.close()
763
764 def ExtractKernelVersion(base_dir):
765         """
766         Try to figure out what kernel version we are running
767         @param base_dir: Path to sources (usually /usr/src/linux)
768         @type base_dir: string
769         @rtype: tuple( version[string], error[string])
770         @returns:
771         1. tuple( version[string], error[string])
772         Either version or error is populated (but never both)
773
774         """
775         lines = []
776         pathname = os.path.join(base_dir, 'Makefile')
777         try:
778                 f = open(pathname, 'r')
779         except OSError, details:
780                 return (None, str(details))
781         except IOError, details:
782                 return (None, str(details))
783
784         try:
785                 for i in range(4):
786                         lines.append(f.readline())
787         except OSError, details:
788                 return (None, str(details))
789         except IOError, details:
790                 return (None, str(details))
791
792         lines = [l.strip() for l in lines]
793
794         version = ''
795
796         #XXX: The following code relies on the ordering of vars within the Makefile
797         for line in lines:
798                 # split on the '=' then remove annoying whitespace
799                 items = line.split("=")
800                 items = [i.strip() for i in items]
801                 if items[0] == 'VERSION' or \
802                         items[0] == 'PATCHLEVEL':
803                         version += items[1]
804                         version += "."
805                 elif items[0] == 'SUBLEVEL':
806                         version += items[1]
807                 elif items[0] == 'EXTRAVERSION' and \
808                         items[-1] != items[0]:
809                         version += items[1]
810
811         # Grab a list of files named localversion* and sort them
812         localversions = os.listdir(base_dir)
813         for x in range(len(localversions)-1,-1,-1):
814                 if localversions[x][:12] != "localversion":
815                         del localversions[x]
816         localversions.sort()
817
818         # Append the contents of each to the version string, stripping ALL whitespace
819         for lv in localversions:
820                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
821
822         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
823         kernelconfig = getconfig(base_dir+"/.config")
824         if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
825                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
826
827         return (version,None)
828
829 def autouse(myvartree, use_cache=1, mysettings=None):
830         """
831         autuse returns a list of USE variables auto-enabled to packages being installed
832
833         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
834         @type myvartree: vartree
835         @param use_cache: read values from cache
836         @type use_cache: Boolean
837         @param mysettings: Instance of config
838         @type mysettings: config
839         @rtype: string
840         @returns: A string containing a list of USE variables that are enabled via use.defaults
841         """
842         if mysettings is None:
843                 global settings
844                 mysettings = settings
845         if mysettings.profile_path is None:
846                 return ""
847         myusevars=""
848         usedefaults = mysettings.use_defs
849         for myuse in usedefaults:
850                 dep_met = True
851                 for mydep in usedefaults[myuse]:
852                         if not myvartree.dep_match(mydep,use_cache=True):
853                                 dep_met = False
854                                 break
855                 if dep_met:
856                         myusevars += " "+myuse
857         return myusevars
858
859 def check_config_instance(test):
860         if not test or (str(test.__class__) != 'portage.config'):
861                 raise TypeError, "Invalid type for config object: %s" % test.__class__
862
863 class config:
864         """
865         This class encompasses the main portage configuration.  Data is pulled from
866         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
867         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
868         overrides.
869         
870         Generally if you need data like USE flags, FEATURES, environment variables,
871         virtuals ...etc you look in here.
872         """
873         
874         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
875                 config_incrementals=None, config_root=None, target_root=None,
876                 local_config=True):
877                 """
878                 @param clone: If provided, init will use deepcopy to copy by value the instance.
879                 @type clone: Instance of config class.
880                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
881                 and then calling instance.setcpv(mycpv).
882                 @type mycpv: String
883                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
884                 @type config_profile_path: String
885                 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
886                 @type config_incrementals: List
887                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
888                 @type config_root: String
889                 @param target_root: __init__ override of $ROOT env variable.
890                 @type target_root: String
891                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
892                 ignore local config (keywording and unmasking)
893                 @type local_config: Boolean
894                 """
895
896                 debug = os.environ.get("PORTAGE_DEBUG") == "1"
897
898                 self.already_in_regenerate = 0
899
900                 self.locked   = 0
901                 self.mycpv    = None
902                 self.puse     = []
903                 self.modifiedkeys = []
904                 self.uvlist = []
905
906                 self.virtuals = {}
907                 self.virts_p = {}
908                 self.dirVirtuals = None
909                 self.v_count  = 0
910
911                 # Virtuals obtained from the vartree
912                 self.treeVirtuals = {}
913                 # Virtuals by user specification. Includes negatives.
914                 self.userVirtuals = {}
915                 # Virtual negatives from user specifications.
916                 self.negVirtuals  = {}
917
918                 self.user_profile_dir = None
919                 self.local_config = local_config
920
921                 if clone:
922                         self.incrementals = copy.deepcopy(clone.incrementals)
923                         self.profile_path = copy.deepcopy(clone.profile_path)
924                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
925                         self.local_config = copy.deepcopy(clone.local_config)
926
927                         self.module_priority = copy.deepcopy(clone.module_priority)
928                         self.modules         = copy.deepcopy(clone.modules)
929
930                         self.depcachedir = copy.deepcopy(clone.depcachedir)
931
932                         self.packages = copy.deepcopy(clone.packages)
933                         self.virtuals = copy.deepcopy(clone.virtuals)
934
935                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
936                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
937                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
938
939                         self.use_defs = copy.deepcopy(clone.use_defs)
940                         self.usemask  = copy.deepcopy(clone.usemask)
941                         self.usemask_list = copy.deepcopy(clone.usemask_list)
942                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
943                         self.useforce      = copy.deepcopy(clone.useforce)
944                         self.useforce_list = copy.deepcopy(clone.useforce_list)
945                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
946                         self.puse     = copy.deepcopy(clone.puse)
947                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
948                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
949                         self.mycpv    = copy.deepcopy(clone.mycpv)
950
951                         self.configlist = copy.deepcopy(clone.configlist)
952                         self.lookuplist = self.configlist[:]
953                         self.lookuplist.reverse()
954                         self.configdict = {
955                                 "env.d":     self.configlist[0],
956                                 "pkginternal": self.configlist[1],
957                                 "globals":     self.configlist[2],
958                                 "defaults":    self.configlist[3],
959                                 "conf":        self.configlist[4],
960                                 "pkg":         self.configlist[5],
961                                 "auto":        self.configlist[6],
962                                 "backupenv":   self.configlist[7],
963                                 "env":         self.configlist[8] }
964                         self.profiles = copy.deepcopy(clone.profiles)
965                         self.backupenv  = self.configdict["backupenv"]
966                         self.pusedict   = copy.deepcopy(clone.pusedict)
967                         self.categories = copy.deepcopy(clone.categories)
968                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
969                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
970                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
971                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
972                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
973                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
974                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
975                         self.features = copy.deepcopy(clone.features)
976                 else:
977
978                         # backupenv is for calculated incremental variables.
979                         self.backupenv = os.environ.copy()
980
981                         def check_var_directory(varname, var):
982                                 if not os.path.isdir(var):
983                                         writemsg(("!!! Error: %s='%s' is not a directory. " + \
984                                                 "Please correct this.\n") % (varname, var),
985                                                 noiselevel=-1)
986                                         raise portage_exception.DirectoryNotFound(var)
987
988                         if config_root is None:
989                                 config_root = "/"
990
991                         config_root = \
992                                 normalize_path(config_root).rstrip(os.path.sep) + os.path.sep
993
994                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
995
996                         self.depcachedir = DEPCACHE_PATH
997
998                         if not config_profile_path:
999                                 config_profile_path = \
1000                                         os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1001                                 if os.path.isdir(config_profile_path):
1002                                         self.profile_path = config_profile_path
1003                                 else:
1004                                         self.profile_path = None
1005                         else:
1006                                 self.profile_path = config_profile_path[:]
1007
1008                         if not config_incrementals:
1009                                 writemsg("incrementals not specified to class config\n")
1010                                 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1011                         else:
1012                                 self.incrementals = copy.deepcopy(config_incrementals)
1013
1014                         self.module_priority    = ["user","default"]
1015                         self.modules            = {}
1016                         self.modules["user"] = getconfig(
1017                                 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1018                         if self.modules["user"] is None:
1019                                 self.modules["user"] = {}
1020                         self.modules["default"] = {
1021                                 "portdbapi.metadbmodule": "cache.metadata.database",
1022                                 "portdbapi.auxdbmodule":  "cache.flat_hash.database",
1023                         }
1024
1025                         self.usemask=[]
1026                         self.configlist=[]
1027
1028                         # back up our incremental variables:
1029                         self.configdict={}
1030                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1031                         self.configlist.append({})
1032                         self.configdict["env.d"] = self.configlist[-1]
1033
1034                         self.configlist.append({})
1035                         self.configdict["pkginternal"] = self.configlist[-1]
1036
1037                         # The symlink might not exist or might not be a symlink.
1038                         if self.profile_path is None:
1039                                 self.profiles = []
1040                         else:
1041                                 self.profiles = []
1042                                 def addProfile(currentPath):
1043                                         parentsFile = os.path.join(currentPath, "parent")
1044                                         if os.path.exists(parentsFile):
1045                                                 parents = grabfile(parentsFile)
1046                                                 if not parents:
1047                                                         raise portage_exception.ParseError(
1048                                                                 "Empty parent file: '%s'" % parents_file)
1049                                                 for parentPath in parents:
1050                                                         parentPath = normalize_path(os.path.join(
1051                                                                 currentPath, parentPath))
1052                                                         if os.path.exists(parentPath):
1053                                                                 addProfile(parentPath)
1054                                                         else:
1055                                                                 raise portage_exception.ParseError(
1056                                                                         "Parent '%s' not found: '%s'" %  \
1057                                                                         (parentPath, parentsFile))
1058                                         self.profiles.append(currentPath)
1059                                 addProfile(os.path.realpath(self.profile_path))
1060                         if local_config:
1061                                 custom_prof = os.path.join(
1062                                         config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1063                                 if os.path.exists(custom_prof):
1064                                         self.user_profile_dir = custom_prof
1065                                         self.profiles.append(custom_prof)
1066                                 del custom_prof
1067
1068                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1069                         self.packages      = stack_lists(self.packages_list, incremental=1)
1070                         del self.packages_list
1071                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1072
1073                         # revmaskdict
1074                         self.prevmaskdict={}
1075                         for x in self.packages:
1076                                 mycatpkg=dep_getkey(x)
1077                                 if not self.prevmaskdict.has_key(mycatpkg):
1078                                         self.prevmaskdict[mycatpkg]=[x]
1079                                 else:
1080                                         self.prevmaskdict[mycatpkg].append(x)
1081
1082                         # get profile-masked use flags -- INCREMENTAL Child over parent
1083                         self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1084                                 for x in self.profiles]
1085                         self.usemask  = set(stack_lists(
1086                                 self.usemask_list, incremental=True))
1087                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1088                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1089                         del use_defs_lists
1090
1091                         self.pusemask_list = []
1092                         rawpusemask = [grabdict_package(
1093                                 os.path.join(x, "package.use.mask")) \
1094                                 for x in self.profiles]
1095                         for i in xrange(len(self.profiles)):
1096                                 cpdict = {}
1097                                 for k, v in rawpusemask[i].iteritems():
1098                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1099                                 self.pusemask_list.append(cpdict)
1100                         del rawpusemask
1101
1102                         self.pkgprofileuse = []
1103                         rawprofileuse = [grabdict_package(
1104                                 os.path.join(x, "package.use"), juststrings=True) \
1105                                 for x in self.profiles]
1106                         for i in xrange(len(self.profiles)):
1107                                 cpdict = {}
1108                                 for k, v in rawprofileuse[i].iteritems():
1109                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1110                                 self.pkgprofileuse.append(cpdict)
1111                         del rawprofileuse
1112
1113                         self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1114                                 for x in self.profiles]
1115                         self.useforce  = set(stack_lists(
1116                                 self.useforce_list, incremental=True))
1117
1118                         self.puseforce_list = []
1119                         rawpuseforce = [grabdict_package(
1120                                 os.path.join(x, "package.use.force")) \
1121                                 for x in self.profiles]
1122                         for i in xrange(len(self.profiles)):
1123                                 cpdict = {}
1124                                 for k, v in rawpuseforce[i].iteritems():
1125                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1126                                 self.puseforce_list.append(cpdict)
1127                         del rawpuseforce
1128
1129                         try:
1130                                 self.mygcfg   = getconfig(os.path.join(config_root, "etc", "make.globals"))
1131
1132                                 if self.mygcfg is None:
1133                                         self.mygcfg = {}
1134                         except SystemExit, e:
1135                                 raise
1136                         except Exception, e:
1137                                 if debug:
1138                                         raise
1139                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1140                                 if not isinstance(e, EnvironmentError):
1141                                         writemsg("!!! Incorrect multiline literals can cause " + \
1142                                                 "this. Do not use them.\n", noiselevel=-1)
1143                                 sys.exit(1)
1144                         self.configlist.append(self.mygcfg)
1145                         self.configdict["globals"]=self.configlist[-1]
1146
1147                         self.make_defaults_use = []
1148                         self.mygcfg = {}
1149                         if self.profiles:
1150                                 try:
1151                                         mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1152                                         for cfg in mygcfg_dlists:
1153                                                 if cfg:
1154                                                         self.make_defaults_use.append(cfg.get("USE", ""))
1155                                                 else:
1156                                                         self.make_defaults_use.append("")
1157                                         self.mygcfg   = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1158                                         #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1159                                         if self.mygcfg is None:
1160                                                 self.mygcfg = {}
1161                                 except SystemExit, e:
1162                                         raise
1163                                 except Exception, e:
1164                                         if debug:
1165                                                 raise
1166                                         writemsg("!!! %s\n" % (e), noiselevel=-1)
1167                                         if not isinstance(e, EnvironmentError):
1168                                                 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1169                                                         "emerge sync' may fix this. If it does\n",
1170                                                         noiselevel=-1)
1171                                                 writemsg("!!! not then please report this to " + \
1172                                                         "bugs.gentoo.org and, if possible, a dev\n",
1173                                                                 noiselevel=-1)
1174                                                 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1175                                                         noiselevel=-1)
1176                                         sys.exit(1)
1177                         self.configlist.append(self.mygcfg)
1178                         self.configdict["defaults"]=self.configlist[-1]
1179
1180                         try:
1181                                 self.mygcfg = getconfig(
1182                                         os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1183                                         allow_sourcing=True)
1184                                 if self.mygcfg is None:
1185                                         self.mygcfg = {}
1186                         except SystemExit, e:
1187                                 raise
1188                         except Exception, e:
1189                                 if debug:
1190                                         raise
1191                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1192                                 if not isinstance(e, EnvironmentError):
1193                                         writemsg("!!! Incorrect multiline literals can cause " + \
1194                                                 "this. Do not use them.\n", noiselevel=-1)
1195                                 sys.exit(1)
1196
1197                         # Allow ROOT setting to come from make.conf if it's not overridden
1198                         # by the constructor argument (from the calling environment).  As a
1199                         # special exception for a very common use case, config_root == "/"
1200                         # implies that ROOT in make.conf should be ignored.  That way, the
1201                         # user can chroot into $ROOT and the ROOT setting in make.conf will
1202                         # be automatically ignored (unless config_root is other than "/").
1203                         if config_root != "/" and \
1204                                 target_root is None and "ROOT" in self.mygcfg:
1205                                 target_root = self.mygcfg["ROOT"]
1206                         
1207                         self.configlist.append(self.mygcfg)
1208                         self.configdict["conf"]=self.configlist[-1]
1209
1210                         self.configlist.append({})
1211                         self.configdict["pkg"]=self.configlist[-1]
1212
1213                         #auto-use:
1214                         self.configlist.append({})
1215                         self.configdict["auto"]=self.configlist[-1]
1216
1217                         self.configlist.append(self.backupenv) # XXX Why though?
1218                         self.configdict["backupenv"]=self.configlist[-1]
1219
1220                         self.configlist.append(os.environ.copy())
1221                         self.configdict["env"]=self.configlist[-1]
1222
1223
1224                         # make lookuplist for loading package.*
1225                         self.lookuplist=self.configlist[:]
1226                         self.lookuplist.reverse()
1227
1228                         # Blacklist vars that could interfere with portage internals.
1229                         for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]:
1230                                 for cfg in self.lookuplist:
1231                                         try:
1232                                                 del cfg[blacklisted]
1233                                         except KeyError:
1234                                                 pass
1235                         del blacklisted, cfg
1236
1237                         if target_root is None:
1238                                 target_root = "/"
1239
1240                         target_root = \
1241                                 normalize_path(target_root).rstrip(os.path.sep) + os.path.sep
1242
1243                         check_var_directory("ROOT", target_root)
1244
1245                         env_d = getconfig(
1246                                 os.path.join(target_root, "etc", "profile.env"), expand=False)
1247                         # env_d will be None if profile.env doesn't exist.
1248                         if env_d:
1249                                 self.configdict["env.d"].update(env_d)
1250                                 # Remove duplicate values so they don't override updated
1251                                 # profile.env values later (profile.env is reloaded in each
1252                                 # call to self.regenerate).
1253                                 for cfg in (self.configdict["backupenv"],
1254                                         self.configdict["env"]):
1255                                         for k, v in env_d.iteritems():
1256                                                 try:
1257                                                         if cfg[k] == v:
1258                                                                 del cfg[k]
1259                                                 except KeyError:
1260                                                         pass
1261                                 del cfg, k, v
1262
1263                         self["PORTAGE_CONFIGROOT"] = config_root
1264                         self.backup_changes("PORTAGE_CONFIGROOT")
1265                         self["ROOT"] = target_root
1266                         self.backup_changes("ROOT")
1267
1268                         self.pusedict = {}
1269                         self.pkeywordsdict = {}
1270                         self.punmaskdict = {}
1271                         abs_user_config = os.path.join(config_root,
1272                                 USER_CONFIG_PATH.lstrip(os.path.sep))
1273
1274                         # locations for "categories" and "arch.list" files
1275                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1276                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1277                         pmask_locations.extend(self.profiles)
1278
1279                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1280                         special cases are needed here."""
1281                         overlay_profiles = []
1282                         for ov in self["PORTDIR_OVERLAY"].split():
1283                                 ov = normalize_path(ov)
1284                                 profiles_dir = os.path.join(ov, "profiles")
1285                                 if os.path.isdir(profiles_dir):
1286                                         overlay_profiles.append(profiles_dir)
1287                         locations += overlay_profiles
1288                         
1289                         pmask_locations.extend(overlay_profiles)
1290
1291                         if local_config:
1292                                 locations.append(abs_user_config)
1293                                 pmask_locations.append(abs_user_config)
1294                                 pusedict = grabdict_package(
1295                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1296                                 for key in pusedict.keys():
1297                                         cp = dep_getkey(key)
1298                                         if not self.pusedict.has_key(cp):
1299                                                 self.pusedict[cp] = {}
1300                                         self.pusedict[cp][key] = pusedict[key]
1301
1302                                 #package.keywords
1303                                 pkgdict = grabdict_package(
1304                                         os.path.join(abs_user_config, "package.keywords"),
1305                                         recursive=1)
1306                                 for key in pkgdict.keys():
1307                                         # default to ~arch if no specific keyword is given
1308                                         if not pkgdict[key]:
1309                                                 mykeywordlist = []
1310                                                 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1311                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1312                                                 else:
1313                                                         groups = []
1314                                                 for keyword in groups:
1315                                                         if not keyword[0] in "~-":
1316                                                                 mykeywordlist.append("~"+keyword)
1317                                                 pkgdict[key] = mykeywordlist
1318                                         cp = dep_getkey(key)
1319                                         if not self.pkeywordsdict.has_key(cp):
1320                                                 self.pkeywordsdict[cp] = {}
1321                                         self.pkeywordsdict[cp][key] = pkgdict[key]
1322
1323                                 #package.unmask
1324                                 pkgunmasklines = grabfile_package(
1325                                         os.path.join(abs_user_config, "package.unmask"),
1326                                         recursive=1)
1327                                 for x in pkgunmasklines:
1328                                         mycatpkg=dep_getkey(x)
1329                                         if self.punmaskdict.has_key(mycatpkg):
1330                                                 self.punmaskdict[mycatpkg].append(x)
1331                                         else:
1332                                                 self.punmaskdict[mycatpkg]=[x]
1333
1334                         #getting categories from an external file now
1335                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1336                         self.categories = stack_lists(categories, incremental=1)
1337                         del categories
1338
1339                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1340                         archlist = stack_lists(archlist, incremental=1)
1341                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1342
1343                         #package.mask
1344                         pkgmasklines = []
1345                         for x in pmask_locations:
1346                                 pkgmasklines.append(grabfile_package(
1347                                         os.path.join(x, "package.mask"), recursive=1))
1348                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1349
1350                         self.pmaskdict = {}
1351                         for x in pkgmasklines:
1352                                 mycatpkg=dep_getkey(x)
1353                                 if self.pmaskdict.has_key(mycatpkg):
1354                                         self.pmaskdict[mycatpkg].append(x)
1355                                 else:
1356                                         self.pmaskdict[mycatpkg]=[x]
1357
1358                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1359                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1360                         has_invalid_data = False
1361                         for x in range(len(pkgprovidedlines)-1, -1, -1):
1362                                 myline = pkgprovidedlines[x]
1363                                 if not isvalidatom("=" + myline):
1364                                         writemsg("Invalid package name in package.provided:" + \
1365                                                 " %s\n" % myline, noiselevel=-1)
1366                                         has_invalid_data = True
1367                                         del pkgprovidedlines[x]
1368                                         continue
1369                                 cpvr = catpkgsplit(pkgprovidedlines[x])
1370                                 if not cpvr or cpvr[0] == "null":
1371                                         writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1372                                                 noiselevel=-1)
1373                                         has_invalid_data = True
1374                                         del pkgprovidedlines[x]
1375                                         continue
1376                                 if cpvr[0] == "virtual":
1377                                         writemsg("Virtual package in package.provided: %s\n" % \
1378                                                 myline, noiselevel=-1)
1379                                         has_invalid_data = True
1380                                         del pkgprovidedlines[x]
1381                                         continue
1382                         if has_invalid_data:
1383                                 writemsg("See portage(5) for correct package.provided usage.\n",
1384                                         noiselevel=-1)
1385                         self.pprovideddict = {}
1386                         for x in pkgprovidedlines:
1387                                 cpv=catpkgsplit(x)
1388                                 if not x:
1389                                         continue
1390                                 mycatpkg=dep_getkey(x)
1391                                 if self.pprovideddict.has_key(mycatpkg):
1392                                         self.pprovideddict[mycatpkg].append(x)
1393                                 else:
1394                                         self.pprovideddict[mycatpkg]=[x]
1395
1396                         # reasonable defaults; this is important as without USE_ORDER,
1397                         # USE will always be "" (nothing set)!
1398                         if "USE_ORDER" not in self:
1399                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1400
1401                         self["PORTAGE_GID"] = str(portage_gid)
1402                         self.backup_changes("PORTAGE_GID")
1403
1404                         if self.get("PORTAGE_DEPCACHEDIR", None):
1405                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1406                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1407                         self.backup_changes("PORTAGE_DEPCACHEDIR")
1408
1409                         overlays = self.get("PORTDIR_OVERLAY","").split()
1410                         if overlays:
1411                                 new_ov = []
1412                                 for ov in overlays:
1413                                         ov = normalize_path(ov)
1414                                         if os.path.isdir(ov):
1415                                                 new_ov.append(ov)
1416                                         else:
1417                                                 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1418                                                         " (not a dir): '%s'\n" % ov, noiselevel=-1)
1419                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1420                                 self.backup_changes("PORTDIR_OVERLAY")
1421
1422                         if "CBUILD" not in self and "CHOST" in self:
1423                                 self["CBUILD"] = self["CHOST"]
1424                                 self.backup_changes("CBUILD")
1425
1426                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1427                         self.backup_changes("PORTAGE_BIN_PATH")
1428                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1429                         self.backup_changes("PORTAGE_PYM_PATH")
1430
1431                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1432                                 try:
1433                                         self[var] = str(int(self.get(var, "0")))
1434                                 except ValueError:
1435                                         writemsg(("!!! %s='%s' is not a valid integer.  " + \
1436                                                 "Falling back to '0'.\n") % (var, self[var]),
1437                                                 noiselevel=-1)
1438                                         self[var] = "0"
1439                                 self.backup_changes(var)
1440
1441                         self.regenerate()
1442                         self.features = portage_util.unique_array(self["FEATURES"].split())
1443
1444                         if "gpg" in self.features:
1445                                 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1446                                         not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1447                                         writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1448                                                 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1449                                         self.features.remove("gpg")
1450
1451                         if not portage_exec.sandbox_capable and \
1452                                 ("sandbox" in self.features or "usersandbox" in self.features):
1453                                 if self.profile_path is not None and \
1454                                         os.path.realpath(self.profile_path) == \
1455                                         os.path.realpath(PROFILE_PATH):
1456                                         """ Don't show this warning when running repoman and the
1457                                         sandbox feature came from a profile that doesn't belong to
1458                                         the user."""
1459                                         writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1460                                                 " binary. Disabling...\n\n"), noiselevel=-1)
1461                                 if "sandbox" in self.features:
1462                                         self.features.remove("sandbox")
1463                                 if "usersandbox" in self.features:
1464                                         self.features.remove("usersandbox")
1465
1466                         self.features.sort()
1467                         self["FEATURES"] = " ".join(self.features)
1468                         self.backup_changes("FEATURES")
1469
1470                         self._init_dirs()
1471
1472                 if mycpv:
1473                         self.setcpv(mycpv)
1474
1475         def _init_dirs(self):
1476                 """
1477                 Create a few directories that are critical to portage operation
1478                 """
1479                 if not os.access(self["ROOT"], os.W_OK):
1480                         return
1481
1482                 dir_mode_map = {
1483                         "tmp"             :(-1,          01777, 0),
1484                         "var/tmp"         :(-1,          01777, 0),
1485                         "var/lib/portage" :(portage_gid, 02750, 02),
1486                         "var/cache/edb"   :(portage_gid,  0755, 02)
1487                 }
1488
1489                 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1490                         try:
1491                                 mydir = os.path.join(self["ROOT"], mypath)
1492                                 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1493                         except portage_exception.PortageException, e:
1494                                 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1495                                         noiselevel=-1)
1496                                 writemsg("!!! %s\n" % str(e),
1497                                         noiselevel=-1)
1498
1499         def validate(self):
1500                 """Validate miscellaneous settings and display warnings if necessary.
1501                 (This code was previously in the global scope of portage.py)"""
1502
1503                 groups = self["ACCEPT_KEYWORDS"].split()
1504                 archlist = self.archlist()
1505                 if not archlist:
1506                         writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1507                 else:
1508                         for group in groups:
1509                                 if group not in archlist and group[0] != '-':
1510                                         writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1511                                                 noiselevel=-1)
1512
1513                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1514                         PROFILE_PATH.lstrip(os.path.sep))
1515                 if not os.path.islink(abs_profile_path) and \
1516                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1517                         os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1518                         writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1519                                 noiselevel=-1)
1520                         writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1521                         writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1522
1523                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1524                         USER_VIRTUALS_FILE.lstrip(os.path.sep))
1525                 if os.path.exists(abs_user_virtuals):
1526                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1527                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1528                         writemsg("!!! this new location.\n\n")
1529
1530         def loadVirtuals(self,root):
1531                 """Not currently used by portage."""
1532                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1533                 self.getvirtuals(root)
1534
1535         def load_best_module(self,property_string):
1536                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1537                 try:
1538                         mod = load_mod(best_mod)
1539                 except ImportError:
1540                         dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1541                         sys.exit(1)
1542                 return mod
1543
1544         def lock(self):
1545                 self.locked = 1
1546
1547         def unlock(self):
1548                 self.locked = 0
1549
1550         def modifying(self):
1551                 if self.locked:
1552                         raise Exception, "Configuration is locked."
1553
1554         def backup_changes(self,key=None):
1555                 self.modifying()
1556                 if key and self.configdict["env"].has_key(key):
1557                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1558                 else:
1559                         raise KeyError, "No such key defined in environment: %s" % key
1560
1561         def reset(self,keeping_pkg=0,use_cache=1):
1562                 """
1563                 Restore environment from self.backupenv, call self.regenerate()
1564                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1565                 @type keeping_pkg: Boolean
1566                 @param use_cache: Should self.regenerate use the cache or not
1567                 @type use_cache: Boolean
1568                 @rype: None
1569                 """
1570                 self.modifying()
1571                 self.configdict["env"].clear()
1572                 self.configdict["env"].update(self.backupenv)
1573
1574                 self.modifiedkeys = []
1575                 if not keeping_pkg:
1576                         self.mycpv = None
1577                         self.puse = ""
1578                         self.configdict["pkg"].clear()
1579                         self.configdict["pkginternal"].clear()
1580                         self.configdict["defaults"]["USE"] = \
1581                                 " ".join(self.make_defaults_use)
1582                         self.usemask  = set(stack_lists(
1583                                 self.usemask_list, incremental=True))
1584                         self.useforce  = set(stack_lists(
1585                                 self.useforce_list, incremental=True))
1586                 self.regenerate(use_cache=use_cache)
1587
1588         def load_infodir(self,infodir):
1589                 self.modifying()
1590                 if self.configdict.has_key("pkg"):
1591                         for x in self.configdict["pkg"].keys():
1592                                 del self.configdict["pkg"][x]
1593                 else:
1594                         writemsg("No pkg setup for settings instance?\n",
1595                                 noiselevel=-1)
1596                         sys.exit(17)
1597
1598                 if os.path.exists(infodir):
1599                         if os.path.exists(infodir+"/environment"):
1600                                 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1601
1602                         myre = re.compile('^[A-Z]+$')
1603                         null_byte = "\0"
1604                         for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1605                                 if myre.match(filename):
1606                                         try:
1607                                                 file_path = os.path.join(infodir, filename)
1608                                                 mydata = open(file_path).read().strip()
1609                                                 if len(mydata) < 2048 or filename == "USE":
1610                                                         if null_byte in mydata:
1611                                                                 writemsg("!!! Null byte found in metadata " + \
1612                                                                         "file: '%s'\n" % file_path, noiselevel=-1)
1613                                                                 continue
1614                                                         if filename == "USE":
1615                                                                 binpkg_flags = "-* " + mydata
1616                                                                 self.configdict["pkg"][filename] = binpkg_flags
1617                                                                 self.configdict["env"][filename] = mydata
1618                                                         else:
1619                                                                 self.configdict["pkg"][filename] = mydata
1620                                                                 self.configdict["env"][filename] = mydata
1621                                                 # CATEGORY is important because it's used in doebuild
1622                                                 # to infer the cpv.  If it's corrupted, it leads to
1623                                                 # strange errors later on, so we'll validate it and
1624                                                 # print a warning if necessary.
1625                                                 if filename == "CATEGORY":
1626                                                         matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1627                                                         if not matchobj or matchobj.start() != 0 or \
1628                                                                 matchobj.end() != len(mydata):
1629                                                                 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1630                                                                         os.path.join(infodir, filename), noiselevel=-1)
1631                                         except (OSError, IOError):
1632                                                 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1633                                                         noiselevel=-1)
1634                                                 pass
1635                         return 1
1636                 return 0
1637
1638         def setcpv(self, mycpv, use_cache=1, mydb=None):
1639                 """
1640                 Load a particular CPV into the config, this lets us see the
1641                 Default USE flags for a particular ebuild as well as the USE
1642                 flags from package.use.
1643
1644                 @param mycpv: A cpv to load
1645                 @type mycpv: string
1646                 @param use_cache: Enables caching
1647                 @type use_cache: Boolean
1648                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1649                 @type mydb: dbapi or derivative.
1650                 @rtype: None
1651                 """
1652
1653                 self.modifying()
1654                 if self.mycpv == mycpv:
1655                         return
1656                 has_changed = False
1657                 self.mycpv = mycpv
1658                 cp = dep_getkey(mycpv)
1659                 pkginternaluse = ""
1660                 if mydb:
1661                         pkginternaluse = " ".join([x[1:] \
1662                                 for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \
1663                                 if x.startswith("+")])
1664                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1665                         self.configdict["pkginternal"]["USE"] = pkginternaluse
1666                         has_changed = True
1667                 defaults = []
1668                 for i in xrange(len(self.profiles)):
1669                         defaults.append(self.make_defaults_use[i])
1670                         cpdict = self.pkgprofileuse[i].get(cp, None)
1671                         if cpdict:
1672                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1673                                 if best_match:
1674                                         defaults.append(cpdict[best_match])
1675                 defaults = " ".join(defaults)
1676                 if defaults != self.configdict["defaults"].get("USE",""):
1677                         self.configdict["defaults"]["USE"] = defaults
1678                         has_changed = True
1679                 useforce = []
1680                 for i in xrange(len(self.profiles)):
1681                         useforce.append(self.useforce_list[i])
1682                         cpdict = self.puseforce_list[i].get(cp, None)
1683                         if cpdict:
1684                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1685                                 if best_match:
1686                                         useforce.append(cpdict[best_match])
1687                 useforce = set(stack_lists(useforce, incremental=True))
1688                 if useforce != self.useforce:
1689                         self.useforce = useforce
1690                         has_changed = True
1691                 usemask = []
1692                 for i in xrange(len(self.profiles)):
1693                         usemask.append(self.usemask_list[i])
1694                         cpdict = self.pusemask_list[i].get(cp, None)
1695                         if cpdict:
1696                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1697                                 if best_match:
1698                                         usemask.append(cpdict[best_match])
1699                 usemask = set(stack_lists(usemask, incremental=True))
1700                 if usemask != self.usemask:
1701                         self.usemask = usemask
1702                         has_changed = True
1703                 oldpuse = self.puse
1704                 self.puse = ""
1705                 if self.pusedict.has_key(cp):
1706                         self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1707                         if self.pusekey:
1708                                 self.puse = " ".join(self.pusedict[cp][self.pusekey])
1709                 if oldpuse != self.puse:
1710                         has_changed = True
1711                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1712                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
1713                 # CATEGORY is essential for doebuild calls
1714                 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1715                 if has_changed:
1716                         self.reset(keeping_pkg=1,use_cache=use_cache)
1717
1718         def setinst(self,mycpv,mydbapi):
1719                 self.modifying()
1720                 if len(self.virtuals) == 0:
1721                         self.getvirtuals()
1722                 # Grab the virtuals this package provides and add them into the tree virtuals.
1723                 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1724                 if isinstance(mydbapi, portdbapi):
1725                         myuse = self["USE"]
1726                 else:
1727                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1728                 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1729
1730                 cp = dep_getkey(mycpv)
1731                 for virt in virts:
1732                         virt = dep_getkey(virt)
1733                         if not self.treeVirtuals.has_key(virt):
1734                                 self.treeVirtuals[virt] = []
1735                         # XXX: Is this bad? -- It's a permanent modification
1736                         if cp not in self.treeVirtuals[virt]:
1737                                 self.treeVirtuals[virt].append(cp)
1738
1739                 self.virtuals = self.__getvirtuals_compile()
1740
1741
1742         def regenerate(self,useonly=0,use_cache=1):
1743                 """
1744                 Regenerate settings
1745                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1746                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1747                 variables.  This also updates the env.d configdict; useful in case an ebuild
1748                 changes the environment.
1749
1750                 If FEATURES has already stacked, it is not stacked twice.
1751
1752                 @param useonly: Only regenerate USE flags (not any other incrementals)
1753                 @type useonly: Boolean
1754                 @param use_cache: Enable Caching (only for autouse)
1755                 @type use_cache: Boolean
1756                 @rtype: None
1757                 """
1758
1759                 self.modifying()
1760                 if self.already_in_regenerate:
1761                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1762                         writemsg("!!! Looping in regenerate.\n",1)
1763                         return
1764                 else:
1765                         self.already_in_regenerate = 1
1766
1767                 # We grab the latest profile.env here since it changes frequently.
1768                 self.configdict["env.d"].clear()
1769                 env_d = getconfig(
1770                         os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1771                 if env_d:
1772                         # env_d will be None if profile.env doesn't exist.
1773                         self.configdict["env.d"].update(env_d)
1774
1775                 if useonly:
1776                         myincrementals=["USE"]
1777                 else:
1778                         myincrementals = self.incrementals
1779                 myincrementals = set(myincrementals)
1780                 # If self.features exists, it has already been stacked and may have
1781                 # been mutated, so don't stack it again or else any mutations will be
1782                 # reverted.
1783                 if "FEATURES" in myincrementals and hasattr(self, "features"):
1784                         myincrementals.remove("FEATURES")
1785
1786                 if "USE" in myincrementals:
1787                         # Process USE last because it depends on USE_EXPAND which is also
1788                         # an incremental!
1789                         myincrementals.remove("USE")
1790
1791                 for mykey in myincrementals:
1792
1793                         mydbs=self.configlist[:-1]
1794
1795                         myflags=[]
1796                         for curdb in mydbs:
1797                                 if mykey not in curdb:
1798                                         continue
1799                                 #variables are already expanded
1800                                 mysplit = curdb[mykey].split()
1801
1802                                 for x in mysplit:
1803                                         if x=="-*":
1804                                                 # "-*" is a special "minus" var that means "unset all settings".
1805                                                 # so USE="-* gnome" will have *just* gnome enabled.
1806                                                 myflags = []
1807                                                 continue
1808
1809                                         if x[0]=="+":
1810                                                 # Not legal. People assume too much. Complain.
1811                                                 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1812                                                         noiselevel=-1)
1813                                                 x=x[1:]
1814                                                 if not x:
1815                                                         continue
1816
1817                                         if (x[0]=="-"):
1818                                                 if (x[1:] in myflags):
1819                                                         # Unset/Remove it.
1820                                                         del myflags[myflags.index(x[1:])]
1821                                                 continue
1822
1823                                         # We got here, so add it now.
1824                                         if x not in myflags:
1825                                                 myflags.append(x)
1826
1827                         myflags.sort()
1828                         #store setting in last element of configlist, the original environment:
1829                         if myflags or mykey in self:
1830                                 self.configlist[-1][mykey] = " ".join(myflags)
1831                         del myflags
1832
1833                 # Do the USE calculation last because it depends on USE_EXPAND.
1834                 if "auto" in self["USE_ORDER"].split(":"):
1835                         self.configdict["auto"]["USE"] = autouse(
1836                                 vartree(root=self["ROOT"], categories=self.categories,
1837                                         settings=self),
1838                                 use_cache=use_cache, mysettings=self)
1839                 else:
1840                         self.configdict["auto"]["USE"] = ""
1841
1842                 use_expand_protected = []
1843                 use_expand = self.get("USE_EXPAND", "").split()
1844                 for var in use_expand:
1845                         var_lower = var.lower()
1846                         for x in self.get(var, "").split():
1847                                 # Any incremental USE_EXPAND variables have already been
1848                                 # processed, so leading +/- operators are invalid here.
1849                                 if x[0] == "+":
1850                                         writemsg(colorize("BAD", "Invalid '+' operator in " + \
1851                                                 "non-incremental variable '%s': '%s'\n" % (var, x)),
1852                                                 noiselevel=-1)
1853                                         x = x[1:]
1854                                 if x[0] == "-":
1855                                         writemsg(colorize("BAD", "Invalid '-' operator in " + \
1856                                                 "non-incremental variable '%s': '%s'\n" % (var, x)),
1857                                                 noiselevel=-1)
1858                                         continue
1859                                 mystr = var_lower + "_" + x
1860                                 if mystr not in use_expand_protected:
1861                                         use_expand_protected.append(mystr)
1862
1863                 if not self.uvlist:
1864                         for x in self["USE_ORDER"].split(":"):
1865                                 if x in self.configdict:
1866                                         self.uvlist.append(self.configdict[x])
1867                         self.uvlist.reverse()
1868
1869                 myflags = use_expand_protected[:]
1870                 for curdb in self.uvlist:
1871                         if "USE" not in curdb:
1872                                 continue
1873                         mysplit = curdb["USE"].split()
1874                         for x in mysplit:
1875                                 if x == "-*":
1876                                         myflags = use_expand_protected[:]
1877                                         continue
1878
1879                                 if x[0] == "+":
1880                                         writemsg(colorize("BAD", "USE flags should not start " + \
1881                                                 "with a '+': %s\n" % x), noiselevel=-1)
1882                                         x = x[1:]
1883                                         if not x:
1884                                                 continue
1885
1886                                 if x[0] == "-":
1887                                         try:
1888                                                 myflags.remove(x[1:])
1889                                         except ValueError:
1890                                                 pass
1891                                         continue
1892
1893                                 if x not in myflags:
1894                                         myflags.append(x)
1895
1896                 myflags = set(myflags)
1897                 myflags.update(self.useforce)
1898
1899                 # FEATURES=test should imply USE=test
1900                 if "test" in self.configlist[-1].get("FEATURES","").split():
1901                         myflags.add("test")
1902
1903                 usesplit = [ x for x in myflags if \
1904                         x not in self.usemask]
1905
1906                 usesplit.sort()
1907
1908                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1909                 # that they are consistent.
1910                 for var in use_expand:
1911                         prefix = var.lower() + "_"
1912                         prefix_len = len(prefix)
1913                         expand_flags = set([ x[prefix_len:] for x in usesplit \
1914                                 if x.startswith(prefix) ])
1915                         var_split = self.get(var, "").split()
1916                         # Preserve the order of var_split because it can matter for things
1917                         # like LINGUAS.
1918                         var_split = [ x for x in var_split if x in expand_flags ]
1919                         var_split.extend(expand_flags.difference(var_split))
1920                         if var_split or var in self:
1921                                 # Don't export empty USE_EXPAND vars unless the user config
1922                                 # exports them as empty.  This is required for vars such as
1923                                 # LINGUAS, where unset and empty have different meanings.
1924                                 self[var] = " ".join(var_split)
1925
1926                 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1927                 if self.configdict["defaults"].has_key("ARCH"):
1928                         if self.configdict["defaults"]["ARCH"]:
1929                                 if self.configdict["defaults"]["ARCH"] not in usesplit:
1930                                         usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1931
1932                 self.configlist[-1]["USE"]= " ".join(usesplit)
1933
1934                 self.already_in_regenerate = 0
1935
1936         def get_virts_p(self, myroot):
1937                 if self.virts_p:
1938                         return self.virts_p
1939                 virts = self.getvirtuals(myroot)
1940                 if virts:
1941                         myvkeys = virts.keys()
1942                         for x in myvkeys:
1943                                 vkeysplit = x.split("/")
1944                                 if not self.virts_p.has_key(vkeysplit[1]):
1945                                         self.virts_p[vkeysplit[1]] = virts[x]
1946                 return self.virts_p
1947
1948         def getvirtuals(self, myroot=None):
1949                 """myroot is now ignored because, due to caching, it has always been
1950                 broken for all but the first call."""
1951                 myroot = self["ROOT"]
1952                 if self.virtuals:
1953                         return self.virtuals
1954
1955                 virtuals_list = []
1956                 for x in self.profiles:
1957                         virtuals_file = os.path.join(x, "virtuals")
1958                         virtuals_dict = grabdict(virtuals_file)
1959                         for k in virtuals_dict.keys():
1960                                 if not isvalidatom(k) or dep_getkey(k) != k:
1961                                         writemsg("--- Invalid virtuals atom in %s: %s\n" % \
1962                                                 (virtuals_file, k), noiselevel=-1)
1963                                         del virtuals_dict[k]
1964                                         continue
1965                                 myvalues = virtuals_dict[k]
1966                                 for x in myvalues:
1967                                         myatom = x
1968                                         if x.startswith("-"):
1969                                                 # allow incrementals
1970                                                 myatom = x[1:]
1971                                         if not isvalidatom(myatom):
1972                                                 writemsg("--- Invalid atom in %s: %s\n" % \
1973                                                         (virtuals_file, x), noiselevel=-1)
1974                                                 myvalues.remove(x)
1975                                 if not myvalues:
1976                                         del virtuals_dict[k]
1977                         if virtuals_dict:
1978                                 virtuals_list.append(virtuals_dict)
1979
1980                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
1981                 del virtuals_list
1982
1983                 for virt in self.dirVirtuals:
1984                         # Preference for virtuals decreases from left to right.
1985                         self.dirVirtuals[virt].reverse()
1986
1987                 # Repoman does not use user or tree virtuals.
1988                 if self.local_config and not self.treeVirtuals:
1989                         temp_vartree = vartree(myroot, None,
1990                                 categories=self.categories, settings=self)
1991                         # Reduce the provides into a list by CP.
1992                         self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
1993
1994                 self.virtuals = self.__getvirtuals_compile()
1995                 return self.virtuals
1996
1997         def __getvirtuals_compile(self):
1998                 """Stack installed and profile virtuals.  Preference for virtuals
1999                 decreases from left to right.
2000                 Order of preference:
2001                 1. installed and in profile
2002                 2. installed only
2003                 3. profile only
2004                 """
2005
2006                 # Virtuals by profile+tree preferences.
2007                 ptVirtuals   = {}
2008
2009                 for virt, installed_list in self.treeVirtuals.iteritems():
2010                         profile_list = self.dirVirtuals.get(virt, None)
2011                         if not profile_list:
2012                                 continue
2013                         for cp in installed_list:
2014                                 if cp in profile_list:
2015                                         ptVirtuals.setdefault(virt, [])
2016                                         ptVirtuals[virt].append(cp)
2017
2018                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2019                         self.dirVirtuals])
2020                 return virtuals
2021
2022         def __delitem__(self,mykey):
2023                 self.modifying()
2024                 for x in self.lookuplist:
2025                         if x != None:
2026                                 if mykey in x:
2027                                         del x[mykey]
2028
2029         def __getitem__(self,mykey):
2030                 match = ''
2031                 for x in self.lookuplist:
2032                         if x is None:
2033                                 writemsg("!!! lookuplist is null.\n")
2034                         elif x.has_key(mykey):
2035                                 match = x[mykey]
2036                                 break
2037                 return match
2038
2039         def has_key(self,mykey):
2040                 for x in self.lookuplist:
2041                         if x.has_key(mykey):
2042                                 return 1
2043                 return 0
2044
2045         def __contains__(self, mykey):
2046                 """Called to implement membership test operators (in and not in)."""
2047                 return bool(self.has_key(mykey))
2048
2049         def setdefault(self, k, x=None):
2050                 if k in self:
2051                         return self[k]
2052                 else:
2053                         self[k] = x
2054                         return x
2055
2056         def get(self, k, x=None):
2057                 if k in self:
2058                         return self[k]
2059                 else:
2060                         return x
2061
2062         def keys(self):
2063                 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2064
2065         def __setitem__(self,mykey,myvalue):
2066                 "set a value; will be thrown away at reset() time"
2067                 if type(myvalue) != types.StringType:
2068                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2069                 self.modifying()
2070                 self.modifiedkeys += [mykey]
2071                 self.configdict["env"][mykey]=myvalue
2072
2073         def environ(self):
2074                 "return our locally-maintained environment"
2075                 mydict={}
2076                 for x in self.keys():
2077                         myvalue = self[x]
2078                         if not isinstance(myvalue, basestring):
2079                                 writemsg("!!! Non-string value in config: %s=%s\n" % \
2080                                         (x, myvalue), noiselevel=-1)
2081                                 continue
2082                         mydict[x] = myvalue
2083                 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2084                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2085                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2086
2087                 return mydict
2088
2089         def thirdpartymirrors(self):
2090                 if getattr(self, "_thirdpartymirrors", None) is None:
2091                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2092                         for x in self["PORTDIR_OVERLAY"].split():
2093                                 profileroots.insert(0, os.path.join(x, "profiles"))
2094                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2095                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2096                 return self._thirdpartymirrors
2097
2098         def archlist(self):
2099                 return flatten([[myarch, "~" + myarch] \
2100                         for myarch in self["PORTAGE_ARCHLIST"].split()])
2101
2102         def selinux_enabled(self):
2103                 if getattr(self, "_selinux_enabled", None) is None:
2104                         self._selinux_enabled = 0
2105                         if "selinux" in self["USE"].split():
2106                                 if "selinux" in globals():
2107                                         if selinux.is_selinux_enabled() == 1:
2108                                                 self._selinux_enabled = 1
2109                                         else:
2110                                                 self._selinux_enabled = 0
2111                                 else:
2112                                         writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2113                                                 noiselevel=-1)
2114                                         self._selinux_enabled = 0
2115                         if self._selinux_enabled == 0:
2116                                 try:    
2117                                         del sys.modules["selinux"]
2118                                 except KeyError:
2119                                         pass
2120                 return self._selinux_enabled
2121
2122 # XXX This would be to replace getstatusoutput completely.
2123 # XXX Issue: cannot block execution. Deadlock condition.
2124 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2125         """
2126         Spawn a subprocess with extra portage-specific options.
2127         Optiosn include:
2128
2129         Sandbox: Sandbox means the spawned process will be limited in its ability t
2130         read and write files (normally this means it is restricted to ${IMAGE}/)
2131         SElinux Sandbox: Enables sandboxing on SElinux
2132         Reduced Privileges: Drops privilages such that the process runs as portage:portage
2133         instead of as root.
2134
2135         Notes: os.system cannot be used because it messes with signal handling.  Instead we
2136         use the portage_exec spawn* family of functions.
2137
2138         This function waits for the process to terminate.
2139
2140         @param mystring: Command to run
2141         @type mystring: String
2142         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2143         @type mysettings: Dictionary or config instance
2144         @param debug: Ignored
2145         @type debug: Boolean
2146         @param free: Enable sandboxing for this process
2147         @type free: Boolean
2148         @param droppriv: Drop to portage:portage when running this command
2149         @type droppriv: Boolean
2150         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2151         @type sesandbox: Boolean
2152         @param keywords: Extra options encoded as a dict, to be passed to spawn
2153         @type keywords: Dictionary
2154         @rtype: Integer
2155         @returns:
2156         1. The return code of the spawned process.
2157         """
2158
2159         if type(mysettings) == types.DictType:
2160                 env=mysettings
2161                 keywords["opt_name"]="[ %s ]" % "portage"
2162         else:
2163                 check_config_instance(mysettings)
2164                 env=mysettings.environ()
2165                 keywords["opt_name"]="[%s]" % mysettings["PF"]
2166
2167         features = mysettings.features
2168         # XXX: Negative RESTRICT word
2169         droppriv=(droppriv and ("userpriv" in features) and not \
2170                 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2171                  ("userpriv" in mysettings["RESTRICT"].split())))
2172
2173         if droppriv and not uid and portage_gid and portage_uid:
2174                 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2175
2176         if not free:
2177                 free=((droppriv and "usersandbox" not in features) or \
2178                         (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2179
2180         if free:
2181                 keywords["opt_name"] += " bash"
2182                 spawn_func = portage_exec.spawn_bash
2183         else:
2184                 keywords["opt_name"] += " sandbox"
2185                 spawn_func = portage_exec.spawn_sandbox
2186
2187         if sesandbox:
2188                 con = selinux.getcontext()
2189                 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2190                 selinux.setexec(con)
2191
2192         retval = spawn_func(mystring, env=env, **keywords)
2193
2194         if sesandbox:
2195                 selinux.setexec(None)
2196
2197         return retval
2198
2199 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2200         "fetch files.  Will use digest file if available."
2201
2202         features = mysettings.features
2203         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2204         if ("mirror" in mysettings["RESTRICT"].split()) or \
2205            ("nomirror" in mysettings["RESTRICT"].split()):
2206                 if ("mirror" in features) and ("lmirror" not in features):
2207                         # lmirror should allow you to bypass mirror restrictions.
2208                         # XXX: This is not a good thing, and is temporary at best.
2209                         print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2210                         return 1
2211
2212         thirdpartymirrors = mysettings.thirdpartymirrors()
2213
2214         check_config_instance(mysettings)
2215
2216         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2217                 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2218
2219         mymirrors=[]
2220
2221         if listonly or ("distlocks" not in features):
2222                 use_locks = 0
2223
2224         fetch_to_ro = 0
2225         if "skiprocheck" in features:
2226                 fetch_to_ro = 1
2227
2228         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2229                 if use_locks:
2230                         writemsg(red("!!! For fetching to a read-only filesystem, " + \
2231                                 "locking should be turned off.\n"), noiselevel=-1)
2232                         writemsg("!!! This can be done by adding -distlocks to " + \
2233                                 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2234 #                       use_locks = 0
2235
2236         # local mirrors are always added
2237         if custommirrors.has_key("local"):
2238                 mymirrors += custommirrors["local"]
2239
2240         if ("nomirror" in mysettings["RESTRICT"].split()) or \
2241            ("mirror"   in mysettings["RESTRICT"].split()):
2242                 # We don't add any mirrors.
2243                 pass
2244         else:
2245                 if try_mirrors:
2246                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2247
2248         mydigests = Manifest(
2249                 mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST")
2250
2251         fsmirrors = []
2252         for x in range(len(mymirrors)-1,-1,-1):
2253                 if mymirrors[x] and mymirrors[x][0]=='/':
2254                         fsmirrors += [mymirrors[x]]
2255                         del mymirrors[x]
2256
2257         restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2258         custom_local_mirrors = custommirrors.get("local", [])
2259         if restrict_fetch:
2260                 # With fetch restriction, a normal uri may only be fetched from
2261                 # custom local mirrors (if available).  A mirror:// uri may also
2262                 # be fetched from specific mirrors (effectively overriding fetch
2263                 # restriction, but only for specific mirrors).
2264                 locations = custom_local_mirrors
2265         else:
2266                 locations = mymirrors
2267
2268         filedict={}
2269         primaryuri_indexes={}
2270         for myuri in myuris:
2271                 myfile=os.path.basename(myuri)
2272                 if not filedict.has_key(myfile):
2273                         filedict[myfile]=[]
2274                         for y in range(0,len(locations)):
2275                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2276                 if myuri[:9]=="mirror://":
2277                         eidx = myuri.find("/", 9)
2278                         if eidx != -1:
2279                                 mirrorname = myuri[9:eidx]
2280
2281                                 # Try user-defined mirrors first
2282                                 if custommirrors.has_key(mirrorname):
2283                                         for cmirr in custommirrors[mirrorname]:
2284                                                 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2285                                                 # remove the mirrors we tried from the list of official mirrors
2286                                                 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2287                                                         thirdpartymirrors[mirrorname].remove(cmirr)
2288                                 # now try the official mirrors
2289                                 if thirdpartymirrors.has_key(mirrorname):
2290                                         shuffle(thirdpartymirrors[mirrorname])
2291
2292                                         for locmirr in thirdpartymirrors[mirrorname]:
2293                                                 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2294
2295                                 if not filedict[myfile]:
2296                                         writemsg("No known mirror by the name: %s\n" % (mirrorname))
2297                         else:
2298                                 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2299                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
2300                 else:
2301                         if restrict_fetch:
2302                                 # Only fetch from specific mirrors is allowed.
2303                                 continue
2304                         if "primaryuri" in mysettings["RESTRICT"].split():
2305                                 # Use the source site first.
2306                                 if primaryuri_indexes.has_key(myfile):
2307                                         primaryuri_indexes[myfile] += 1
2308                                 else:
2309                                         primaryuri_indexes[myfile] = 0
2310                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2311                         else:
2312                                 filedict[myfile].append(myuri)
2313
2314         can_fetch=True
2315
2316         if listonly:
2317                 can_fetch = False
2318
2319         for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2320                 if not mysettings.get(var_name, None):
2321                         can_fetch = False
2322
2323         if can_fetch:
2324                 dirmode  = 02070
2325                 filemode =   060
2326                 modemask =    02
2327                 distdir_dirs = [""]
2328                 if "distlocks" in features:
2329                         distdir_dirs.append(".locks")
2330                 try:
2331                         
2332                         for x in distdir_dirs:
2333                                 mydir = os.path.join(mysettings["DISTDIR"], x)
2334                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2335                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2336                                                 noiselevel=-1)
2337                                         def onerror(e):
2338                                                 raise # bail out on the first error that occurs during recursion
2339                                         if not apply_recursive_permissions(mydir,
2340                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2341                                                 filemode=filemode, filemask=modemask, onerror=onerror):
2342                                                 raise portage_exception.OperationNotPermitted(
2343                                                         "Failed to apply recursive permissions for the portage group.")
2344                 except portage_exception.PortageException, e:
2345                         if not os.path.isdir(mysettings["DISTDIR"]):
2346                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2347                                 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2348                                 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2349
2350         if can_fetch and \
2351                 not fetch_to_ro and \
2352                 not os.access(mysettings["DISTDIR"], os.W_OK):
2353                 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2354                         noiselevel=-1)
2355                 can_fetch = False
2356
2357         if can_fetch and use_locks and locks_in_subdir:
2358                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2359                         if not os.access(distlocks_subdir, os.W_OK):
2360                                 writemsg("!!! No write access to write to %s.  Aborting.\n" % distlocks_subdir,
2361                                         noiselevel=-1)
2362                                 return 0
2363                         del distlocks_subdir
2364         for myfile in filedict.keys():
2365                 """
2366                 fetched  status
2367                 0        nonexistent
2368                 1        partially downloaded
2369                 2        completely downloaded
2370                 """
2371                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2372                 fetched=0
2373                 file_lock = None
2374                 if listonly:
2375                         writemsg_stdout("\n", noiselevel=-1)
2376                 else:
2377                         if use_locks and can_fetch:
2378                                 if locks_in_subdir:
2379                                         file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
2380                                 else:
2381                                         file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
2382                 try:
2383                         if not listonly:
2384                                 if fsmirrors and not os.path.exists(myfile_path):
2385                                         for mydir in fsmirrors:
2386                                                 mirror_file = os.path.join(mydir, myfile)
2387                                                 try:
2388                                                         shutil.copyfile(mirror_file, myfile_path)
2389                                                         writemsg(_("Local mirror has file:" + \
2390                                                                 " %(file)s\n" % {"file":myfile}))
2391                                                         break
2392                                                 except (IOError, OSError), e:
2393                                                         if e.errno != errno.ENOENT:
2394                                                                 raise
2395                                                         del e
2396
2397                                 try:
2398                                         mystat = os.stat(myfile_path)
2399                                 except OSError, e:
2400                                         if e.errno != errno.ENOENT:
2401                                                 raise
2402                                         del e
2403                                 else:
2404                                         try:
2405                                                 apply_secpass_permissions(
2406                                                         myfile_path, gid=portage_gid, mode=0664, mask=02,
2407                                                         stat_cached=mystat)
2408                                         except portage_exception.PortageException, e:
2409                                                 if not os.access(myfile_path, os.R_OK):
2410                                                         writemsg("!!! Failed to adjust permissions:" + \
2411                                                                 " %s\n" % str(e), noiselevel=-1)
2412                                         if myfile not in mydigests:
2413                                                 # We don't have a digest, but the file exists.  We must
2414                                                 # assume that it is fully downloaded.
2415                                                 continue
2416                                         else:
2417                                                 if mystat.st_size < mydigests[myfile]["size"] and \
2418                                                         not restrict_fetch:
2419                                                         fetched = 1 # Try to resume this download.
2420                                                 else:
2421                                                         verified_ok, reason = portage_checksum.verify_all(
2422                                                                 myfile_path, mydigests[myfile])
2423                                                         if not verified_ok:
2424                                                                 writemsg("!!! Previously fetched" + \
2425                                                                         " file: '%s'\n" % myfile, noiselevel=-1)
2426                                                                 writemsg("!!! Reason: %s\n" % reason[0],
2427                                                                         noiselevel=-1)
2428                                                                 writemsg(("!!! Got:      %s\n" + \
2429                                                                         "!!! Expected: %s\n") % \
2430                                                                         (reason[1], reason[2]), noiselevel=-1)
2431                                                                 if can_fetch and not restrict_fetch:
2432                                                                         writemsg("Refetching...\n\n",
2433                                                                                 noiselevel=-1)
2434                                                                         os.unlink(myfile_path)
2435                                                         else:
2436                                                                 eout = output.EOutput()
2437                                                                 eout.quiet = \
2438                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
2439                                                                 for digest_name in mydigests[myfile]:
2440                                                                         eout.ebegin(
2441                                                                                 "%s %s ;-)" % (myfile, digest_name))
2442                                                                         eout.eend(0)
2443                                                                 continue # fetch any remaining files
2444
2445                         for loc in filedict[myfile]:
2446                                 if listonly:
2447                                         writemsg_stdout(loc+" ", noiselevel=-1)
2448                                         continue
2449                                 # allow different fetchcommands per protocol
2450                                 protocol = loc[0:loc.find("://")]
2451                                 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2452                                         fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2453                                 else:
2454                                         fetchcommand=mysettings["FETCHCOMMAND"]
2455                                 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2456                                         resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2457                                 else:
2458                                         resumecommand=mysettings["RESUMECOMMAND"]
2459
2460                                 fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2461                                 resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2462
2463                                 if not can_fetch:
2464                                         if fetched != 2:
2465                                                 if fetched == 0:
2466                                                         writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2467                                                                 noiselevel=-1)
2468                                                 else:
2469                                                         writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2470                                                                 noiselevel=-1)
2471                                                 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2472                                                         if not mysettings.get(var_name, None):
2473                                                                 writemsg(("!!! %s is unset.  It should " + \
2474                                                                 "have been defined in /etc/make.globals.\n") \
2475                                                                  % var_name, noiselevel=-1)
2476                                                 return 0
2477                                         else:
2478                                                 continue
2479
2480                                 if fetched != 2:
2481                                         #we either need to resume or start the download
2482                                         #you can't use "continue" when you're inside a "try" block
2483                                         if fetched==1:
2484                                                 #resume mode:
2485                                                 writemsg(">>> Resuming download...\n")
2486                                                 locfetch=resumecommand
2487                                         else:
2488                                                 #normal mode:
2489                                                 locfetch=fetchcommand
2490                                         writemsg_stdout(">>> Downloading '%s'\n" % \
2491                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2492                                         myfetch=locfetch.replace("${URI}",loc)
2493                                         myfetch=myfetch.replace("${FILE}",myfile)
2494
2495                                         spawn_keywords = {}
2496                                         if "userfetch" in mysettings.features and \
2497                                                 os.getuid() == 0 and portage_gid and portage_uid:
2498                                                 spawn_keywords.update({
2499                                                         "uid"    : portage_uid,
2500                                                         "gid"    : portage_gid,
2501                                                         "groups" : userpriv_groups,
2502                                                         "umask"  : 002})
2503
2504                                         try:
2505
2506                                                 if mysettings.selinux_enabled():
2507                                                         con = selinux.getcontext()
2508                                                         con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2509                                                         selinux.setexec(con)
2510
2511                                                 myret = portage_exec.spawn_bash(myfetch,
2512                                                         env=mysettings.environ(), **spawn_keywords)
2513
2514                                                 if mysettings.selinux_enabled():
2515                                                         selinux.setexec(None)
2516
2517                                         finally:
2518                                                 try:
2519                                                         apply_secpass_permissions(myfile_path,
2520                                                                 gid=portage_gid, mode=0664, mask=02)
2521                                                 except portage_exception.FileNotFound, e:
2522                                                         pass
2523                                                 except portage_exception.PortageException, e:
2524                                                         if not os.access(myfile_path, os.R_OK):
2525                                                                 writemsg("!!! Failed to adjust permissions:" + \
2526                                                                         " %s\n" % str(e), noiselevel=-1)
2527
2528                                         if mydigests!=None and mydigests.has_key(myfile):
2529                                                 try:
2530                                                         mystat = os.stat(myfile_path)
2531                                                 except OSError, e:
2532                                                         if e.errno != errno.ENOENT:
2533                                                                 raise
2534                                                         del e
2535                                                         fetched = 0
2536                                                 else:
2537                                                         # no exception?  file exists. let digestcheck() report
2538                                                         # an appropriately for size or checksum errors
2539                                                         if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2540                                                                 # Fetch failed... Try the next one... Kill 404 files though.
2541                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2542                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2543                                                                         if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2544                                                                                 try:
2545                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2546                                                                                         writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2547                                                                                         fetched = 0
2548                                                                                         continue
2549                                                                                 except (IOError, OSError):
2550                                                                                         pass
2551                                                                 fetched = 1
2552                                                                 continue
2553                                                         if not fetchonly:
2554                                                                 fetched=2
2555                                                                 break
2556                                                         else:
2557                                                                 # File is the correct size--check the checksums for the fetched
2558                                                                 # file NOW, for those users who don't have a stable/continuous
2559                                                                 # net connection. This way we have a chance to try to download
2560                                                                 # from another mirror...
2561                                                                 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2562                                                                 if not verified_ok:
2563                                                                         print reason
2564                                                                         writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2565                                                                                 noiselevel=-1)
2566                                                                         writemsg("!!! Reason: "+reason[0]+"\n",
2567                                                                                 noiselevel=-1)
2568                                                                         writemsg("!!! Got:      %s\n!!! Expected: %s\n" % \
2569                                                                                 (reason[1], reason[2]), noiselevel=-1)
2570                                                                         writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2571                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2572                                                                         fetched=0
2573                                                                 else:
2574                                                                         eout = output.EOutput()
2575                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2576                                                                         for x_key in mydigests[myfile].keys():
2577                                                                                 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2578                                                                                 eout.eend(0)
2579                                                                         fetched=2
2580                                                                         break
2581                                         else:
2582                                                 if not myret:
2583                                                         fetched=2
2584                                                         break
2585                                                 elif mydigests!=None:
2586                                                         writemsg("No digest file available and download failed.\n\n",
2587                                                                 noiselevel=-1)
2588                 finally:
2589                         if use_locks and file_lock:
2590                                 portage_locks.unlockfile(file_lock)
2591
2592                 if listonly:
2593                         writemsg_stdout("\n", noiselevel=-1)
2594                 if fetched != 2:
2595                         if restrict_fetch:
2596                                 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2597                                         mysettings["PF"], "has fetch restriction turned on."
2598                                 print "!!! This probably means that this " + \
2599                                         "ebuild's files must be downloaded"
2600                                 print "!!! manually.  See the comments in" + \
2601                                         " the ebuild for more information.\n"
2602                                 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2603                         elif listonly:
2604                                 continue
2605                         elif not filedict[myfile]:
2606                                 writemsg("Warning: No mirrors available for file" + \
2607                                         " '%s'\n" % (myfile), noiselevel=-1)
2608                         else:
2609                                 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2610                                         noiselevel=-1)
2611                         return 0
2612         return 1
2613
2614 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2615         """
2616         Generates a digest file if missing.  Assumes all files are available.
2617         DEPRECATED: this now only is a compability wrapper for 
2618                     portage_manifest.Manifest()
2619         NOTE: manifestonly and overwrite are useless with manifest2 and
2620               are therefore ignored."""
2621         if myportdb is None:
2622                 writemsg("Warning: myportdb not specified to digestgen\n")
2623                 global portdb
2624                 myportdb = portdb
2625         global _doebuild_manifest_exempt_depend
2626         try:
2627                 _doebuild_manifest_exempt_depend += 1
2628                 distfiles_map = {}
2629                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2630                 for cpv, fetchlist in fetchlist_dict.iteritems():
2631                         for myfile in fetchlist:
2632                                 distfiles_map.setdefault(myfile, []).append(cpv)
2633                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2634                         fetchlist_dict=fetchlist_dict)
2635                 required_hash_types = set(portage_const.MANIFEST1_HASH_FUNCTIONS)
2636                 required_hash_types.update(portage_const.MANIFEST2_HASH_FUNCTIONS)
2637                 required_hash_types.add("size")
2638                 dist_hashes = mf.fhashdict.get("DIST", {})
2639                 missing_hashes = set()
2640                 for myfile in distfiles_map:
2641                         myhashes = dist_hashes.get(myfile)
2642                         if not myhashes:
2643                                 missing_hashes.add(myfile)
2644                                 continue
2645                         if required_hash_types.difference(myhashes):
2646                                 missing_hashes.add(myfile)
2647                 if missing_hashes:
2648                         missing_files = []
2649                         for myfile in missing_hashes:
2650                                 try:
2651                                         os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2652                                 except OSError, e:
2653                                         if e.errno != errno.ENOENT:
2654                                                 raise
2655                                         del e
2656                                         missing_files.append(myfile)
2657                         if missing_files:
2658                                 mytree = os.path.realpath(os.path.dirname(
2659                                         os.path.dirname(mysettings["O"])))
2660                                 myuris = []
2661                                 for myfile in missing_files:
2662                                         for cpv in distfiles_map[myfile]:
2663                                                 alluris, aalist = myportdb.getfetchlist(
2664                                                         cpv, mytree=mytree, all=True,
2665                                                         mysettings=mysettings)
2666                                                 for uri in alluris:
2667                                                         if os.path.basename(uri) == myfile:
2668                                                                 myuris.append(uri)
2669                                 if not fetch(myuris, mysettings):
2670                                         writemsg(("!!! File %s doesn't exist, can't update " + \
2671                                                 "Manifest\n") % myfile, noiselevel=-1)
2672                                         return 0
2673                 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2674                 try:
2675                         mf.create(requiredDistfiles=myarchives,
2676                                 assumeDistHashesSometimes=True,
2677                                 assumeDistHashesAlways=(
2678                                 "assume-digests" in mysettings.features))
2679                 except portage_exception.FileNotFound, e:
2680                         writemsg(("!!! File %s doesn't exist, can't update " + \
2681                                 "Manifest\n") % e, noiselevel=-1)
2682                         return 0
2683                 mf.write(sign=False)
2684                 if "assume-digests" not in mysettings.features:
2685                         distlist = mf.fhashdict.get("DIST", {}).keys()
2686                         distlist.sort()
2687                         auto_assumed = []
2688                         for filename in distlist:
2689                                 if not os.path.exists(
2690                                         os.path.join(mysettings["DISTDIR"], filename)):
2691                                         auto_assumed.append(filename)
2692                         if auto_assumed:
2693                                 mytree = os.path.realpath(
2694                                         os.path.dirname(os.path.dirname(mysettings["O"])))
2695                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2696                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
2697                                 pkgs.sort()
2698                                 writemsg_stdout("  digest.assumed" + output.colorize("WARN",
2699                                         str(len(auto_assumed)).rjust(18)) + "\n")
2700                                 for pkg_key in pkgs:
2701                                         fetchlist = myportdb.getfetchlist(pkg_key,
2702                                                 mysettings=mysettings, all=True, mytree=mytree)[1]
2703                                         pv = pkg_key.split("/")[1]
2704                                         for filename in auto_assumed:
2705                                                 if filename in fetchlist:
2706                                                         writemsg_stdout(
2707                                                                 "   digest-%s::%s\n" % (pv, filename))
2708                 return 1
2709         finally:
2710                 _doebuild_manifest_exempt_depend -= 1
2711
2712 def digestParseFile(myfilename, mysettings=None):
2713         """(filename) -- Parses a given file for entries matching:
2714         <checksumkey> <checksum_hex_string> <filename> <filesize>
2715         Ignores lines that don't start with a valid checksum identifier
2716         and returns a dict with the filenames as keys and {checksumkey:checksum}
2717         as the values.
2718         DEPRECATED: this function is now only a compability wrapper for
2719                     portage_manifest.Manifest()."""
2720
2721         mysplit = myfilename.split(os.sep)
2722         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2723                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2724         elif mysplit[-1] == "Manifest":
2725                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2726
2727         if mysettings is None:
2728                 global settings
2729                 mysettings = config(clone=settings)
2730
2731         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2732
2733 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2734         """Verifies checksums.  Assumes all files have been downloaded.
2735         DEPRECATED: this is now only a compability wrapper for 
2736                     portage_manifest.Manifest()."""
2737         if not strict:
2738                 return 1
2739         pkgdir = mysettings["O"]
2740         manifest_path = os.path.join(pkgdir, "Manifest")
2741         if not os.path.exists(manifest_path):
2742                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2743                         noiselevel=-1)
2744                 if strict:
2745                         return 0
2746         mf = Manifest(pkgdir, mysettings["DISTDIR"])
2747         eout = output.EOutput()
2748         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2749         try:
2750                 eout.ebegin("checking ebuild checksums ;-)")
2751                 mf.checkTypeHashes("EBUILD")
2752                 eout.eend(0)
2753                 eout.ebegin("checking auxfile checksums ;-)")
2754                 mf.checkTypeHashes("AUX")
2755                 eout.eend(0)
2756                 eout.ebegin("checking miscfile checksums ;-)")
2757                 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2758                 eout.eend(0)
2759                 for f in myfiles:
2760                         eout.ebegin("checking %s ;-)" % f)
2761                         mf.checkFileHashes(mf.findFile(f), f)
2762                         eout.eend(0)
2763         except KeyError, e:
2764                 eout.eend(1)
2765                 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2766                 return 0
2767         except portage_exception.FileNotFound, e:
2768                 eout.eend(1)
2769                 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2770                         noiselevel=-1)
2771                 return 0
2772         except portage_exception.DigestException, e:
2773                 eout.eend(1)
2774                 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2775                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2776                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2777                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2778                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2779                 return 0
2780         # Make sure that all of the ebuilds are actually listed in the Manifest.
2781         for f in os.listdir(pkgdir):
2782                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2783                         writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2784                                 os.path.join(pkgdir, f), noiselevel=-1)
2785                         return 0
2786         """ epatch will just grab all the patches out of a directory, so we have to
2787         make sure there aren't any foreign files that it might grab."""
2788         filesdir = os.path.join(pkgdir, "files")
2789         for parent, dirs, files in os.walk(filesdir):
2790                 for d in dirs:
2791                         if d.startswith(".") or d == "CVS":
2792                                 dirs.remove(d)
2793                 for f in files:
2794                         if f.startswith("."):
2795                                 continue
2796                         f = os.path.join(parent, f)[len(filesdir) + 1:]
2797                         file_type = mf.findFile(f)
2798                         if file_type != "AUX" and not f.startswith("digest-"):
2799                                 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2800                                         os.path.join(filesdir, f), noiselevel=-1)
2801                                 return 0
2802         return 1
2803
2804 # parse actionmap to spawn ebuild with the appropriate args
2805 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2806         if alwaysdep or "noauto" not in mysettings.features:
2807                 # process dependency first
2808                 if "dep" in actionmap[mydo].keys():
2809                         retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2810                         if retval:
2811                                 return retval
2812         kwargs = actionmap[mydo]["args"]
2813         mysettings["EBUILD_PHASE"] = mydo
2814         phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2815         mysettings["EBUILD_PHASE"] = ""
2816
2817         if not kwargs["droppriv"] and secpass >= 2:
2818                 """ Privileged phases may have left files that need to be made
2819                 writable to a less privileged user."""
2820                 apply_recursive_permissions(mysettings["T"],
2821                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2822                         filemode=060, filemask=0)
2823
2824         if phase_retval == os.EX_OK:
2825                 if mydo == "install":
2826                         # User and group bits that match the "portage" user or group are
2827                         # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2828                         # necessary.  The chown system call may clear S_ISUID and S_ISGID
2829                         # bits, so those bits are restored if necessary.
2830                         inst_uid = int(mysettings["PORTAGE_INST_UID"])
2831                         inst_gid = int(mysettings["PORTAGE_INST_GID"])
2832                         for parent, dirs, files in os.walk(mysettings["D"]):
2833                                 for fname in chain(dirs, files):
2834                                         fpath = os.path.join(parent, fname)
2835                                         mystat = os.lstat(fpath)
2836                                         if mystat.st_uid != portage_uid and \
2837                                                 mystat.st_gid != portage_gid:
2838                                                 continue
2839                                         myuid = -1
2840                                         mygid = -1
2841                                         if mystat.st_uid == portage_uid:
2842                                                 myuid = inst_uid
2843                                         if mystat.st_gid == portage_gid:
2844                                                 mygid = inst_gid
2845                                         apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2846                                                 mode=mystat.st_mode, stat_cached=mystat,
2847                                                 follow_links=False)
2848                         mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2849                         qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2850                         if qa_retval:
2851                                 writemsg("!!! install_qa_check failed; exiting.\n",
2852                                         noiselevel=-1)
2853                         return qa_retval
2854         return phase_retval
2855
2856
2857 def eapi_is_supported(eapi):
2858         return str(eapi).strip() == str(portage_const.EAPI).strip()
2859
2860 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2861
2862         ebuild_path = os.path.abspath(myebuild)
2863         pkg_dir     = os.path.dirname(ebuild_path)
2864
2865         if mysettings.configdict["pkg"].has_key("CATEGORY"):
2866                 cat = mysettings.configdict["pkg"]["CATEGORY"]
2867         else:
2868                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
2869         mypv = os.path.basename(ebuild_path)[:-7]       
2870         mycpv = cat+"/"+mypv
2871         mysplit=pkgsplit(mypv,silent=0)
2872         if mysplit is None:
2873                 raise portage_exception.IncorrectParameter(
2874                         "Invalid ebuild path: '%s'" % myebuild)
2875
2876         if mydo != "depend":
2877                 """For performance reasons, setcpv only triggers reset when it
2878                 detects a package-specific change in config.  For the ebuild
2879                 environment, a reset call is forced in order to ensure that the
2880                 latest env.d variables are used."""
2881                 mysettings.reset(use_cache=use_cache)
2882                 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
2883
2884         mysettings["EBUILD_PHASE"] = mydo
2885
2886         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
2887
2888         # We are disabling user-specific bashrc files.
2889         mysettings["BASH_ENV"] = INVALID_ENV_FILE
2890
2891         if debug: # Otherwise it overrides emerge's settings.
2892                 # We have no other way to set debug... debug can't be passed in
2893                 # due to how it's coded... Don't overwrite this so we can use it.
2894                 mysettings["PORTAGE_DEBUG"] = "1"
2895
2896         mysettings["ROOT"]     = myroot
2897         mysettings["STARTDIR"] = getcwd()
2898
2899         mysettings["EBUILD"]   = ebuild_path
2900         mysettings["O"]        = pkg_dir
2901         mysettings.configdict["pkg"]["CATEGORY"] = cat
2902         mysettings["FILESDIR"] = pkg_dir+"/files"
2903         mysettings["PF"]       = mypv
2904
2905         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
2906         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2907
2908         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
2909         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
2910         mysettings["PN"] = mysplit[0]
2911         mysettings["PV"] = mysplit[1]
2912         mysettings["PR"] = mysplit[2]
2913
2914         if portage_util.noiselimit < 0:
2915                 mysettings["PORTAGE_QUIET"] = "1"
2916
2917         if mydo != "depend":
2918                 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"]  = \
2919                         mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
2920                 if not eapi_is_supported(eapi):
2921                         # can't do anything with this.
2922                         raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2923                 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
2924                         portage_dep.use_reduce(portage_dep.paren_reduce(
2925                         mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
2926
2927         if mysplit[2] == "r0":
2928                 mysettings["PVR"]=mysplit[1]
2929         else:
2930                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
2931
2932         if mysettings.has_key("PATH"):
2933                 mysplit=mysettings["PATH"].split(":")
2934         else:
2935                 mysplit=[]
2936         if PORTAGE_BIN_PATH not in mysplit:
2937                 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
2938
2939         # Sandbox needs cannonical paths.
2940         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
2941                 mysettings["PORTAGE_TMPDIR"])
2942         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
2943         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
2944         
2945         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
2946         # locations in order to prevent interference.
2947         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
2948                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
2949                         mysettings["PKG_TMPDIR"],
2950                         mysettings["CATEGORY"], mysettings["PF"])
2951         else:
2952                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
2953                         mysettings["BUILD_PREFIX"],
2954                         mysettings["CATEGORY"], mysettings["PF"])
2955
2956         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
2957         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
2958         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
2959         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
2960
2961         mysettings["PORTAGE_BASHRC"] = os.path.join(
2962                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
2963
2964         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
2965         if (mydo!="depend") or not mysettings.has_key("KV"):
2966                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
2967                 if mykv:
2968                         # Regular source tree
2969                         mysettings["KV"]=mykv
2970                 else:
2971                         mysettings["KV"]=""
2972
2973         if (mydo!="depend") or not mysettings.has_key("KVERS"):
2974                 myso=os.uname()[2]
2975                 mysettings["KVERS"]=myso[1]
2976
2977         # Allow color.map to control colors associated with einfo, ewarn, etc...
2978         mycolors = []
2979         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
2980                 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
2981         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
2982
2983 def prepare_build_dirs(myroot, mysettings, cleanup):
2984
2985         clean_dirs = [mysettings["HOME"]]
2986
2987         # We enable cleanup when we want to make sure old cruft (such as the old
2988         # environment) doesn't interfere with the current phase.
2989         if cleanup:
2990                 clean_dirs.append(mysettings["T"])
2991
2992         for clean_dir in clean_dirs:
2993                 try:
2994                         shutil.rmtree(clean_dir)
2995                 except OSError, oe:
2996                         if errno.ENOENT == oe.errno:
2997                                 pass
2998                         elif errno.EPERM == oe.errno:
2999                                 writemsg("%s\n" % oe, noiselevel=-1)
3000                                 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3001                                         clean_dir, noiselevel=-1)
3002                                 return 1
3003                         else:
3004                                 raise
3005
3006         def makedirs(dir_path):
3007                 try:
3008                         os.makedirs(dir_path)
3009                 except OSError, oe:
3010                         if errno.EEXIST == oe.errno:
3011                                 pass
3012                         elif errno.EPERM == oe.errno:
3013                                 writemsg("%s\n" % oe, noiselevel=-1)
3014                                 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3015                                         dir_path, noiselevel=-1)
3016                                 return False
3017                         else:
3018                                 raise
3019                 return True
3020
3021         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3022
3023         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3024         mydirs.append(os.path.dirname(mydirs[-1]))
3025
3026         try:
3027                 for mydir in mydirs:
3028                         portage_util.ensure_dirs(mydir)
3029                         portage_util.apply_secpass_permissions(mydir,
3030                                 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3031                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3032                         """These directories don't necessarily need to be group writable.
3033                         However, the setup phase is commonly run as a privileged user prior
3034                         to the other phases being run by an unprivileged user.  Currently,
3035                         we use the portage group to ensure that the unprivleged user still
3036                         has write access to these directories in any case."""
3037                         portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3038                         portage_util.apply_secpass_permissions(mysettings[dir_key],
3039                                 uid=portage_uid, gid=portage_gid)
3040         except portage_exception.PermissionDenied, e:
3041                 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3042                 return 1
3043         except portage_exception.OperationNotPermitted, e:
3044                 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3045                 return 1
3046         except portage_exception.FileNotFound, e:
3047                 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3048                 return 1
3049
3050         features_dirs = {
3051                 "ccache":{
3052                         "basedir_var":"CCACHE_DIR",
3053                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3054                         "always_recurse":False},
3055                 "confcache":{
3056                         "basedir_var":"CONFCACHE_DIR",
3057                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3058                         "always_recurse":True},
3059                 "distcc":{
3060                         "basedir_var":"DISTCC_DIR",
3061                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3062                         "subdirs":("lock", "state"),
3063                         "always_recurse":True}
3064         }
3065         dirmode  = 02070
3066         filemode =   060
3067         modemask =    02
3068         for myfeature, kwargs in features_dirs.iteritems():
3069                 if myfeature in mysettings.features:
3070                         basedir = mysettings[kwargs["basedir_var"]]
3071                         if basedir == "":
3072                                 basedir = kwargs["default_dir"]
3073                                 mysettings[kwargs["basedir_var"]] = basedir
3074                         try:
3075                                 mydirs = [mysettings[kwargs["basedir_var"]]]
3076                                 if "subdirs" in kwargs:
3077                                         for subdir in kwargs["subdirs"]:
3078                                                 mydirs.append(os.path.join(basedir, subdir))
3079                                 for mydir in mydirs:
3080                                         modified = portage_util.ensure_dirs(mydir,
3081                                                 gid=portage_gid, mode=dirmode, mask=modemask)
3082                                         # To avoid excessive recursive stat calls, we trigger
3083                                         # recursion when the top level directory does not initially
3084                                         # match our permission requirements.
3085                                         if modified or kwargs["always_recurse"]:
3086                                                 if modified:
3087                                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3088                                                                 noiselevel=-1)
3089                                                 def onerror(e):
3090                                                         raise   # The feature is disabled if a single error
3091                                                                         # occurs during permissions adjustment.
3092                                                 if not apply_recursive_permissions(mydir,
3093                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3094                                                 filemode=filemode, filemask=modemask, onerror=onerror):
3095                                                         raise portage_exception.OperationNotPermitted(
3096                                                                 "Failed to apply recursive permissions for the portage group.")
3097                         except portage_exception.PortageException, e:
3098                                 mysettings.features.remove(myfeature)
3099                                 mysettings["FEATURES"] = " ".join(mysettings.features)
3100                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3101                                 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3102                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
3103                                 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3104                                         noiselevel=-1)
3105                                 time.sleep(5)
3106
3107         workdir_mode = 0700
3108         try:
3109                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3110                 if mode.isdigit():
3111                         parsed_mode = int(mode, 8)
3112                 elif mode == "":
3113                         raise KeyError()
3114                 else:
3115                         raise ValueError()
3116                 if parsed_mode & 07777 != parsed_mode:
3117                         raise ValueError("Invalid file mode: %s" % mode)
3118                 else:
3119                         workdir_mode = parsed_mode
3120         except KeyError, e:
3121                 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3122         except ValueError, e:
3123                 if len(str(e)) > 0:
3124                         writemsg("%s\n" % e)
3125                 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3126                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3127         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3128         try:
3129                 apply_secpass_permissions(mysettings["WORKDIR"],
3130                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3131         except portage_exception.FileNotFound:
3132                 pass # ebuild.sh will create it
3133
3134         if mysettings.get("PORT_LOGDIR", "") == "":
3135                 while "PORT_LOGDIR" in mysettings:
3136                         del mysettings["PORT_LOGDIR"]
3137         if "PORT_LOGDIR" in mysettings:
3138                 try:
3139                         portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3140                                 uid=portage_uid, gid=portage_gid, mode=02770)
3141                 except portage_exception.PortageException, e:
3142                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3143                         writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3144                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
3145                         writemsg("!!! Disabling logging.\n", noiselevel=-1)
3146                         while "PORT_LOGDIR" in mysettings:
3147                                 del mysettings["PORT_LOGDIR"]
3148         if "PORT_LOGDIR" in mysettings:
3149                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3150                 if not os.path.exists(logid_path):
3151                         f = open(logid_path, "w")
3152                         f.close()
3153                         del f
3154                 logid_time = time.strftime("%Y%m%d-%H%M%S",
3155                         time.gmtime(os.stat(logid_path).st_mtime))
3156                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3157                         mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3158                         (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3159                 del logid_path, logid_time
3160         else:
3161                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(mysettings["T"], "build.log")
3162
3163 _doebuild_manifest_exempt_depend = 0
3164 _doebuild_manifest_checked = None
3165
3166 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3167         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3168         mydbapi=None, vartree=None, prev_mtimes=None):
3169         
3170         """
3171         Wrapper function that invokes specific ebuild phases through the spawning
3172         of ebuild.sh
3173         
3174         @param myebuild: name of the ebuild to invoke the phase on (CPV)
3175         @type myebuild: String
3176         @param mydo: Phase to run
3177         @type mydo: String
3178         @param myroot: $ROOT (usually '/', see man make.conf)
3179         @type myroot: String
3180         @param mysettings: Portage Configuration
3181         @type mysettings: instance of portage.config
3182         @param debug: Turns on various debug information (eg, debug for spawn)
3183         @type debug: Boolean
3184         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3185         @type listonly: Boolean
3186         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3187         @type fetchonly: Boolean
3188         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3189         @type cleanup: Boolean
3190         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3191         @type dbkey: Dict or String
3192         @param use_cache: Enables the cache
3193         @type use_cache: Boolean
3194         @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3195         @type fetchall: Boolean
3196         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3197         @type tree: String
3198         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3199         @type mydbapi: portdbapi instance
3200         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3201         @type vartree: vartree instance
3202         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3203         @type prev_mtimes: dictionary
3204         @rtype: Boolean
3205         @returns:
3206         1. 0 for success
3207         2. 1 for error
3208         
3209         Most errors have an accompanying error message.
3210         
3211         listonly and fetchonly are only really necessary for operations involving 'fetch'
3212         prev_mtimes are only necessary for merge operations.
3213         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3214         
3215         """
3216         
3217         if not tree:
3218                 writemsg("Warning: tree not specified to doebuild\n")
3219                 tree = "porttree"
3220         global db
3221         
3222         # chunked out deps for each phase, so that ebuild binary can use it 
3223         # to collapse targets down.
3224         actionmap_deps={
3225         "depend": [],
3226         "setup":  [],
3227         "unpack": ["setup"],
3228         "compile":["unpack"],
3229         "test":   ["compile"],
3230         "install":["test"],
3231         "rpm":    ["install"],
3232         "package":["install"],
3233         }
3234         
3235         if mydbapi is None:
3236                 mydbapi = db[myroot][tree].dbapi
3237
3238         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3239                 vartree = db[myroot]["vartree"]
3240
3241         features = mysettings.features
3242
3243         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3244                         "config","setup","depend","fetch","digest",
3245                         "unpack","compile","test","install","rpm","qmerge","merge",
3246                         "package","unmerge", "manifest"]
3247
3248         if mydo not in validcommands:
3249                 validcommands.sort()
3250                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3251                         noiselevel=-1)
3252                 for vcount in range(len(validcommands)):
3253                         if vcount%6 == 0:
3254                                 writemsg("\n!!! ", noiselevel=-1)
3255                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3256                 writemsg("\n", noiselevel=-1)
3257                 return 1
3258
3259         if not os.path.exists(myebuild):
3260                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3261                         noiselevel=-1)
3262                 return 1
3263
3264         global _doebuild_manifest_exempt_depend
3265
3266         if "strict" in features and \
3267                 "digest" not in features and \
3268                 tree == "porttree" and \
3269                 mydo not in ("digest", "manifest", "help") and \
3270                 not _doebuild_manifest_exempt_depend:
3271                 # Always verify the ebuild checksums before executing it.
3272                 pkgdir = os.path.dirname(myebuild)
3273                 manifest_path = os.path.join(pkgdir, "Manifest")
3274                 global _doebuild_manifest_checked
3275                 # Avoid checking the same Manifest several times in a row during a
3276                 # regen with an empty cache.
3277                 if _doebuild_manifest_checked != manifest_path:
3278                         if not os.path.exists(manifest_path):
3279                                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3280                                         noiselevel=-1)
3281                                 return 1
3282                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
3283                         try:
3284                                 mf.checkTypeHashes("EBUILD")
3285                         except portage_exception.FileNotFound, e:
3286                                 writemsg("!!! A file listed in the Manifest " + \
3287                                         "could not be found: %s\n" % str(e), noiselevel=-1)
3288                                 return 1
3289                         except portage_exception.DigestException, e:
3290                                 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3291                                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3292                                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3293                                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3294                                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3295                                 return 1
3296                         # Make sure that all of the ebuilds are actually listed in the
3297                         # Manifest.
3298                         for f in os.listdir(pkgdir):
3299                                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3300                                         writemsg("!!! A file is not listed in the " + \
3301                                         "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3302                                         noiselevel=-1)
3303                                         return 1
3304                         _doebuild_manifest_checked = manifest_path
3305
3306         logfile=None
3307         builddir_lock = None
3308         try:
3309                 if mydo in ("digest", "manifest", "help"):
3310                         # Temporarily exempt the depend phase from manifest checks, in case
3311                         # aux_get calls trigger cache generation.
3312                         _doebuild_manifest_exempt_depend += 1
3313
3314                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3315                         use_cache, mydbapi)
3316
3317                 # get possible slot information from the deps file
3318                 if mydo == "depend":
3319                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3320                         if isinstance(dbkey, dict):
3321                                 mysettings["dbkey"] = ""
3322                                 pr, pw = os.pipe()
3323                                 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3324                                 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3325                                         fd_pipes=fd_pipes, returnpid=True)
3326                                 os.close(pw) # belongs exclusively to the child process now
3327                                 maxbytes = 1024
3328                                 mybytes = []
3329                                 while True:
3330                                         mybytes.append(os.read(pr, maxbytes))
3331                                         if not mybytes[-1]:
3332                                                 break
3333                                 os.close(pr)
3334                                 mybytes = "".join(mybytes)
3335                                 global auxdbkeys
3336                                 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3337                                         dbkey[k] = v
3338                                 retval = os.waitpid(mypids[0], 0)[1]
3339                                 portage_exec.spawned_pids.remove(mypids[0])
3340                                 # If it got a signal, return the signal that was sent, but
3341                                 # shift in order to distinguish it from a return value. (just
3342                                 # like portage_exec.spawn() would do).
3343                                 if retval & 0xff:
3344                                         return (retval & 0xff) << 8
3345                                 # Otherwise, return its exit code.
3346                                 return retval >> 8
3347                         elif dbkey:
3348                                 mysettings["dbkey"] = dbkey
3349                         else:
3350                                 mysettings["dbkey"] = \
3351                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3352
3353                         return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3354
3355                 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3356                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3357                 metadata = dict(izip(dep_keys, mydbapi.aux_get(mycpv, dep_keys)))
3358                 class FakeTree(object):
3359                         def __init__(self, mydb):
3360                                 self.dbapi = mydb
3361                 dep_check_trees = {myroot:{}}
3362                 dep_check_trees[myroot]["porttree"] = \
3363                         FakeTree(fakedbapi(settings=mysettings))
3364                 for dep_type in dep_keys:
3365                         mycheck = dep_check(metadata[dep_type], None, mysettings,
3366                                 myuse="all", myroot=myroot, trees=dep_check_trees)
3367                         if not mycheck[0]:
3368                                 writemsg("%s: %s\n%s\n" % (
3369                                         dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3370                                 return 1
3371
3372                 if "PORTAGE_TMPDIR" not in mysettings or \
3373                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3374                         writemsg("The directory specified in your " + \
3375                                 "PORTAGE_TMPDIR variable, '%s',\n" % \
3376                                 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3377                         writemsg("does not exist.  Please create this directory or " + \
3378                                 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3379                         return 1
3380
3381                 # Build directory creation isn't required for any of these.
3382                 if mydo not in ("digest", "fetch", "help", "manifest"):
3383                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3384                         if mystatus:
3385                                 return mystatus
3386                         # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3387                         logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3388                 if mydo == "unmerge":
3389                         return unmerge(mysettings["CATEGORY"],
3390                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
3391
3392                 # if any of these are being called, handle them -- running them out of
3393                 # the sandbox -- and stop now.
3394                 if mydo in ["clean","cleanrm"]:
3395                         return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3396                                 debug=debug, free=1, logfile=None)
3397                 elif mydo == "help":
3398                         return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3399                                 debug=debug, free=1, logfile=logfile)
3400                 elif mydo == "setup":
3401                         infodir = os.path.join(
3402                                 mysettings["PORTAGE_BUILDDIR"], "build-info")
3403                         if os.path.isdir(infodir):
3404                                 """Load USE flags for setup phase of a binary package.
3405                                 Ideally, the environment.bz2 would be used instead."""
3406                                 mysettings.load_infodir(infodir)
3407                         retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3408                                 debug=debug, free=1, logfile=logfile)
3409                         if secpass >= 2:
3410                                 """ Privileged phases may have left files that need to be made
3411                                 writable to a less privileged user."""
3412                                 apply_recursive_permissions(mysettings["T"],
3413                                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3414                                         filemode=060, filemask=0)
3415                         return retval
3416                 elif mydo == "preinst":
3417                         mysettings["IMAGE"] = mysettings["D"]
3418                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3419                                 mysettings, debug=debug, free=1, logfile=logfile)
3420                         if phase_retval == os.EX_OK:
3421                                 # Post phase logic and tasks that have been factored out of
3422                                 # ebuild.sh.
3423                                 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3424                                         "preinst_sfperms", "preinst_selinux_labels",
3425                                         "preinst_suid_scan"]
3426                                 mysettings["EBUILD_PHASE"] = ""
3427                                 phase_retval = spawn(" ".join(myargs),
3428                                         mysettings, debug=debug, free=1, logfile=logfile)
3429                                 if phase_retval != os.EX_OK:
3430                                         writemsg("!!! post preinst failed; exiting.\n",
3431                                                 noiselevel=-1)
3432                         del mysettings["IMAGE"]
3433                         return phase_retval
3434                 elif mydo == "postinst":
3435                         mysettings.load_infodir(mysettings["O"])
3436                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3437                                 mysettings, debug=debug, free=1, logfile=logfile)
3438                         if phase_retval == os.EX_OK:
3439                                 # Post phase logic and tasks that have been factored out of
3440                                 # ebuild.sh.
3441                                 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3442                                 mysettings["EBUILD_PHASE"] = ""
3443                                 phase_retval = spawn(" ".join(myargs),
3444                                         mysettings, debug=debug, free=1, logfile=logfile)
3445                                 if phase_retval != os.EX_OK:
3446                                         writemsg("!!! post postinst failed; exiting.\n",
3447                                                 noiselevel=-1)
3448                         return phase_retval
3449                 elif mydo in ["prerm","postrm","config"]:
3450                         mysettings.load_infodir(mysettings["O"])
3451                         return spawn(EBUILD_SH_BINARY + " " + mydo,
3452                                 mysettings, debug=debug, free=1, logfile=logfile)
3453
3454                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3455
3456                 # Make sure we get the correct tree in case there are overlays.
3457                 mytree = os.path.realpath(
3458                         os.path.dirname(os.path.dirname(mysettings["O"])))
3459                 newuris, alist = mydbapi.getfetchlist(
3460                         mycpv, mytree=mytree, mysettings=mysettings)
3461                 alluris, aalist = mydbapi.getfetchlist(
3462                         mycpv, mytree=mytree, all=True, mysettings=mysettings)
3463                 mysettings["A"] = " ".join(alist)
3464                 mysettings["AA"] = " ".join(aalist)
3465                 if ("mirror" in features) or fetchall:
3466                         fetchme = alluris[:]
3467                         checkme = aalist[:]
3468                 elif mydo == "digest":
3469                         fetchme = alluris[:]
3470                         checkme = aalist[:]
3471                         # Skip files that we already have digests for.
3472                         mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3473                         mydigests = mf.getTypeDigests("DIST")
3474                         for filename, hashes in mydigests.iteritems():
3475                                 if len(hashes) == len(mf.hashes):
3476                                         checkme = [i for i in checkme if i != filename]
3477                                         fetchme = [i for i in fetchme \
3478                                                 if os.path.basename(i) != filename]
3479                                 del filename, hashes
3480                 else:
3481                         fetchme = newuris[:]
3482                         checkme = alist[:]
3483
3484                 # Only try and fetch the files if we are going to need them ...
3485                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3486                 # unpack compile install`, we will try and fetch 4 times :/
3487                 need_distfiles = (mydo in ("fetch", "unpack") or \
3488                         mydo not in ("digest", "manifest") and "noauto" not in features)
3489                 if need_distfiles and not fetch(
3490                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3491                         return 1
3492
3493                 if mydo == "fetch" and listonly:
3494                         return 0
3495
3496                 try:
3497                         if mydo == "manifest":
3498                                 return not digestgen(aalist, mysettings, overwrite=1,
3499                                         manifestonly=1, myportdb=mydbapi)
3500                         elif mydo == "digest":
3501                                 return not digestgen(aalist, mysettings, overwrite=1,
3502                                         myportdb=mydbapi)
3503                         elif "digest" in mysettings.features:
3504                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3505                 except portage_exception.PermissionDenied, e:
3506                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3507                         if mydo in ("digest", "manifest"):
3508                                 return 1
3509
3510                 # See above comment about fetching only when needed
3511                 if not digestcheck(checkme, mysettings, ("strict" in features),
3512                         (mydo not in ["digest","fetch","unpack"] and \
3513                         mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3514                         "noauto" in features)):
3515                         return 1
3516
3517                 if mydo == "fetch":
3518                         return 0
3519
3520                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3521                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3522                         orig_distdir = mysettings["DISTDIR"]
3523                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3524                         edpath = mysettings["DISTDIR"] = \
3525                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3526                         if os.path.exists(edpath):
3527                                 try:
3528                                         if os.path.isdir(edpath) and not os.path.islink(edpath):
3529                                                 shutil.rmtree(edpath)
3530                                         else:
3531                                                 os.unlink(edpath)
3532                                 except OSError:
3533                                         print "!!! Failed reseting ebuild distdir path, " + edpath
3534                                         raise
3535                         os.mkdir(edpath)
3536                         apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3537                         try:
3538                                 for file in aalist:
3539                                         os.symlink(os.path.join(orig_distdir, file),
3540                                                 os.path.join(edpath, file))
3541                         except OSError:
3542                                 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3543                                 raise
3544
3545                 #initial dep checks complete; time to process main commands
3546
3547                 nosandbox = (("userpriv" in features) and \
3548                         ("usersandbox" not in features) and \
3549                         ("userpriv" not in mysettings["RESTRICT"]) and \
3550                         ("nouserpriv" not in mysettings["RESTRICT"]))
3551                 if nosandbox and ("userpriv" not in features or \
3552                         "userpriv" in mysettings["RESTRICT"] or \
3553                         "nouserpriv" in mysettings["RESTRICT"]):
3554                         nosandbox = ("sandbox" not in features and \
3555                                 "usersandbox" not in features)
3556
3557                 sesandbox = mysettings.selinux_enabled() and \
3558                         "sesandbox" in mysettings.features
3559                 ebuild_sh = EBUILD_SH_BINARY + " %s"
3560                 misc_sh = MISC_SH_BINARY + " dyn_%s"
3561
3562                 # args are for the to spawn function
3563                 actionmap = {
3564 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":0}},
3565 "setup":  {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1,         "sesandbox":0}},
3566 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":sesandbox}},
3567 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3568 "test":   {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3569 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0,         "sesandbox":sesandbox}},
3570 "rpm":    {"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3571 "package":{"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3572                 }
3573
3574                 # merge the deps in so we have again a 'full' actionmap
3575                 # be glad when this can die.
3576                 for x in actionmap.keys():
3577                         if len(actionmap_deps.get(x, [])):
3578                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3579
3580                 if mydo in actionmap.keys():
3581                         if mydo=="package":
3582                                 portage_util.ensure_dirs(
3583                                         os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3584                                 portage_util.ensure_dirs(
3585                                         os.path.join(mysettings["PKGDIR"], "All"))
3586                         retval = spawnebuild(mydo,
3587                                 actionmap, mysettings, debug, logfile=logfile)
3588                 elif mydo=="qmerge":
3589                         # check to ensure install was run.  this *only* pops up when users
3590                         # forget it and are using ebuild
3591                         if not os.path.exists(
3592                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3593                                 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3594                                         noiselevel=-1)
3595                                 return 1
3596                         # qmerge is a special phase that implies noclean.
3597                         if "noclean" not in mysettings.features:
3598                                 mysettings.features.append("noclean")
3599                         #qmerge is specifically not supposed to do a runtime dep check
3600                         retval = merge(
3601                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3602                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3603                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3604                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3605                 elif mydo=="merge":
3606                         retval = spawnebuild("install", actionmap, mysettings, debug,
3607                                 alwaysdep=1, logfile=logfile)
3608                         if retval == os.EX_OK:
3609                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3610                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3611                                         "build-info"), myroot, mysettings,
3612                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3613                                         vartree=vartree, prev_mtimes=prev_mtimes)
3614                 else:
3615                         print "!!! Unknown mydo:",mydo
3616                         return 1
3617
3618                 if retval != os.EX_OK and tree == "porttree":
3619                         for i in xrange(len(mydbapi.porttrees)-1):
3620                                 t = mydbapi.porttrees[i+1]
3621                                 if myebuild.startswith(t):
3622                                         # Display the non-cannonical path, in case it's different, to
3623                                         # prevent confusion.
3624                                         overlays = mysettings["PORTDIR_OVERLAY"].split()
3625                                         try:
3626                                                 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3627                                                         overlays[i], noiselevel=-1)
3628                                         except IndexError:
3629                                                 pass
3630                                         break
3631                 return retval
3632
3633         finally:
3634                 if builddir_lock:
3635                         portage_locks.unlockdir(builddir_lock)
3636
3637                 # Make sure that DISTDIR is restored to it's normal value before we return!
3638                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3639                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3640                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3641
3642                 if logfile:
3643                         try:
3644                                 if os.stat(logfile).st_size == 0:
3645                                         os.unlink(logfile)
3646                         except OSError:
3647                                 pass
3648
3649                 if mydo in ("digest", "manifest", "help"):
3650                         # If necessary, depend phase has been triggered by aux_get calls
3651                         # and the exemption is no longer needed.
3652                         _doebuild_manifest_exempt_depend -= 1
3653
3654 expandcache={}
3655
3656 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3657         """moves a file from src to dest, preserving all permissions and attributes; mtime will
3658         be preserved even when moving across filesystems.  Returns true on success and false on
3659         failure.  Move is atomic."""
3660         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3661         global lchown
3662         if mysettings is None:
3663                 global settings
3664                 mysettings = settings
3665         selinux_enabled = mysettings.selinux_enabled()
3666         try:
3667                 if not sstat:
3668                         sstat=os.lstat(src)
3669
3670         except SystemExit, e:
3671                 raise
3672         except Exception, e:
3673                 print "!!! Stating source file failed... movefile()"
3674                 print "!!!",e
3675                 return None
3676
3677         destexists=1
3678         try:
3679                 dstat=os.lstat(dest)
3680         except (OSError, IOError):
3681                 dstat=os.lstat(os.path.dirname(dest))
3682                 destexists=0
3683
3684         if bsd_chflags:
3685                 # Check that we can actually unset schg etc flags...
3686                 # Clear the flags on source and destination; we'll reinstate them after merging
3687                 if destexists and dstat.st_flags != 0:
3688                         if bsd_chflags.lchflags(dest, 0) < 0:
3689                                 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3690                                         noiselevel=-1)
3691                 # We might have an immutable flag on the parent dir; save and clear.
3692                 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3693                 if pflags != 0:
3694                         bsd_chflags.lchflags(os.path.dirname(dest), 0)
3695
3696                 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3697                         bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3698                         # This is bad: we can't merge the file with these flags set.
3699                         writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3700                                 noiselevel=-1)
3701                         return None
3702
3703         if destexists:
3704                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3705                         try:
3706                                 os.unlink(dest)
3707                                 destexists=0
3708                         except SystemExit, e:
3709                                 raise
3710                         except Exception, e:
3711                                 pass
3712
3713         if stat.S_ISLNK(sstat[stat.ST_MODE]):
3714                 try:
3715                         target=os.readlink(src)
3716                         if mysettings and mysettings["D"]:
3717                                 if target.find(mysettings["D"])==0:
3718                                         target=target[len(mysettings["D"]):]
3719                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3720                                 os.unlink(dest)
3721                         if selinux_enabled:
3722                                 sid = selinux.get_lsid(src)
3723                                 selinux.secure_symlink(target,dest,sid)
3724                         else:
3725                                 os.symlink(target,dest)
3726                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3727                         return os.lstat(dest)[stat.ST_MTIME]
3728                 except SystemExit, e:
3729                         raise
3730                 except Exception, e:
3731                         print "!!! failed to properly create symlink:"
3732                         print "!!!",dest,"->",target
3733                         print "!!!",e
3734                         return None
3735
3736         renamefailed=1
3737         if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3738                 try:
3739                         if selinux_enabled:
3740                                 ret=selinux.secure_rename(src,dest)
3741                         else:
3742                                 ret=os.rename(src,dest)
3743                         renamefailed=0
3744                 except SystemExit, e:
3745                         raise
3746                 except Exception, e:
3747                         if e[0]!=errno.EXDEV:
3748                                 # Some random error.
3749                                 print "!!! Failed to move",src,"to",dest
3750                                 print "!!!",e
3751                                 return None
3752                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3753         if renamefailed:
3754                 didcopy=0
3755                 if stat.S_ISREG(sstat[stat.ST_MODE]):
3756                         try: # For safety copy then move it over.
3757                                 if selinux_enabled:
3758                                         selinux.secure_copy(src,dest+"#new")
3759                                         selinux.secure_rename(dest+"#new",dest)
3760                                 else:
3761                                         shutil.copyfile(src,dest+"#new")
3762                                         os.rename(dest+"#new",dest)
3763                                 didcopy=1
3764                         except SystemExit, e:
3765                                 raise
3766                         except Exception, e:
3767                                 print '!!! copy',src,'->',dest,'failed.'
3768                                 print "!!!",e
3769                                 return None
3770                 else:
3771                         #we don't yet handle special, so we need to fall back to /bin/mv
3772                         if selinux_enabled:
3773                                 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3774                         else:
3775                                 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3776                                 if a[0]!=0:
3777                                         print "!!! Failed to move special file:"
3778                                         print "!!! '"+src+"' to '"+dest+"'"
3779                                         print "!!!",a
3780                                         return None # failure
3781                 try:
3782                         if didcopy:
3783                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3784                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3785                                 else:
3786                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3787                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3788                                 os.unlink(src)
3789                 except SystemExit, e:
3790                         raise
3791                 except Exception, e:
3792                         print "!!! Failed to chown/chmod/unlink in movefile()"
3793                         print "!!!",dest
3794                         print "!!!",e
3795                         return None
3796
3797         if newmtime:
3798                 os.utime(dest,(newmtime,newmtime))
3799         else:
3800                 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3801                 newmtime=sstat[stat.ST_MTIME]
3802
3803         if bsd_chflags:
3804                 # Restore the flags we saved before moving
3805                 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3806                         writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3807                                 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3808                         return None
3809
3810         return newmtime
3811
3812 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3813         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3814         if not os.access(myroot, os.W_OK):
3815                 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3816                         noiselevel=-1)
3817                 return errno.EACCES
3818         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3819                 vartree=vartree)
3820         return mylink.merge(pkgloc, infloc, myroot, myebuild,
3821                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3822
3823 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3824         mylink = dblink(
3825                 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3826         try:
3827                 mylink.lockdb()
3828                 if mylink.exists():
3829                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3830                                 ldpath_mtimes=ldpath_mtimes)
3831                         if retval == os.EX_OK:
3832                                 mylink.delete()
3833                         return retval
3834                 return os.EX_OK
3835         finally:
3836                 mylink.unlockdb()
3837
3838 def getCPFromCPV(mycpv):
3839         """Calls pkgsplit on a cpv and returns only the cp."""
3840         return pkgsplit(mycpv)[0]
3841
3842 def dep_virtual(mysplit, mysettings):
3843         "Does virtual dependency conversion"
3844         newsplit=[]
3845         myvirtuals = mysettings.getvirtuals()
3846         for x in mysplit:
3847                 if type(x)==types.ListType:
3848                         newsplit.append(dep_virtual(x, mysettings))
3849                 else:
3850                         mykey=dep_getkey(x)
3851                         mychoices = myvirtuals.get(mykey, None)
3852                         if mychoices:
3853                                 if len(mychoices) == 1:
3854                                         a = x.replace(mykey, mychoices[0])
3855                                 else:
3856                                         if x[0]=="!":
3857                                                 # blocker needs "and" not "or(||)".
3858                                                 a=[]
3859                                         else:
3860                                                 a=['||']
3861                                         for y in mychoices:
3862                                                 a.append(x.replace(mykey, y))
3863                                 newsplit.append(a)
3864                         else:
3865                                 newsplit.append(x)
3866         return newsplit
3867
3868 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
3869         trees=None, **kwargs):
3870         """Recursively expand new-style virtuals so as to collapse one or more
3871         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
3872         zero cost regardless of whether or not they are currently installed. Virtual
3873         blockers are supported but only when the virtual expands to a single
3874         atom because it wouldn't necessarily make sense to block all the components
3875         of a compound virtual.  When more than one new-style virtual is matched,
3876         the matches are sorted from highest to lowest versions and the atom is
3877         expanded to || ( highest match ... lowest match )."""
3878         newsplit = []
3879         # According to GLEP 37, RDEPEND is the only dependency type that is valid
3880         # for new-style virtuals.  Repoman should enforce this.
3881         dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
3882         def compare_pkgs(a, b):
3883                 return pkgcmp(b[1], a[1])
3884         portdb = trees[myroot]["porttree"].dbapi
3885         myvirtuals = mysettings.getvirtuals()
3886         for x in mysplit:
3887                 if x == "||":
3888                         newsplit.append(x)
3889                         continue
3890                 elif isinstance(x, list):
3891                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
3892                                 mysettings, myroot=myroot, trees=trees, **kwargs))
3893                         continue
3894                 if portage_dep._dep_check_strict and \
3895                         not isvalidatom(x, allow_blockers=True):
3896                         raise portage_exception.ParseError(
3897                                 "invalid atom: '%s'" % x)
3898                 mykey = dep_getkey(x)
3899                 if not mykey.startswith("virtual/"):
3900                         newsplit.append(x)
3901                         continue
3902                 mychoices = myvirtuals.get(mykey, [])
3903                 isblocker = x.startswith("!")
3904                 match_atom = x
3905                 if isblocker:
3906                         match_atom = x[1:]
3907                 pkgs = []
3908                 for cpv in portdb.match(match_atom):
3909                         # only use new-style matches
3910                         if cpv.startswith("virtual/"):
3911                                 pkgs.append((cpv, pkgsplit(cpv)))
3912                 if not (pkgs or mychoices):
3913                         # This one couldn't be expanded as a new-style virtual.  Old-style
3914                         # virtuals have already been expanded by dep_virtual, so this one
3915                         # is unavailable and dep_zapdeps will identify it as such.  The
3916                         # atom is not eliminated here since it may still represent a
3917                         # dependency that needs to be satisfied.
3918                         newsplit.append(x)
3919                         continue
3920                 if not pkgs and len(mychoices) == 1:
3921                         newsplit.append(x.replace(mykey, mychoices[0]))
3922                         continue
3923                 pkgs.sort(compare_pkgs) # Prefer higher versions.
3924                 if isblocker:
3925                         a = []
3926                 else:
3927                         a = ['||']
3928                 for y in pkgs:
3929                         depstring = " ".join(portdb.aux_get(y[0], dep_keys))
3930                         if edebug:
3931                                 print "Virtual Parent:   ", y[0]
3932                                 print "Virtual Depstring:", depstring
3933                         mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
3934                                 trees=trees, **kwargs)
3935                         if not mycheck[0]:
3936                                 raise portage_exception.ParseError(
3937                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
3938                         if isblocker:
3939                                 virtual_atoms = [atom for atom in mycheck[1] \
3940                                         if not atom.startswith("!")]
3941                                 if len(virtual_atoms) == 1:
3942                                         # It wouldn't make sense to block all the components of a
3943                                         # compound virtual, so only a single atom block is allowed.
3944                                         a.append("!" + virtual_atoms[0])
3945                         else:
3946                                 mycheck[1].append("="+y[0]) # pull in the new-style virtual
3947                                 a.append(mycheck[1])
3948                 # Plain old-style virtuals.  New-style virtuals are preferred.
3949                 for y in mychoices:
3950                         a.append(x.replace(mykey, y))
3951                 if isblocker and not a:
3952                         # Probably a compound virtual.  Pass the atom through unprocessed.
3953                         newsplit.append(x)
3954                         continue
3955                 newsplit.append(a)
3956         return newsplit
3957
3958 def dep_eval(deplist):
3959         if not deplist:
3960                 return 1
3961         if deplist[0]=="||":
3962                 #or list; we just need one "1"
3963                 for x in deplist[1:]:
3964                         if type(x)==types.ListType:
3965                                 if dep_eval(x)==1:
3966                                         return 1
3967                         elif x==1:
3968                                         return 1
3969                 #XXX: unless there's no available atoms in the list
3970                 #in which case we need to assume that everything is
3971                 #okay as some ebuilds are relying on an old bug.
3972                 if len(deplist) == 1:
3973                         return 1
3974                 return 0
3975         else:
3976                 for x in deplist:
3977                         if type(x)==types.ListType:
3978                                 if dep_eval(x)==0:
3979                                         return 0
3980                         elif x==0 or x==2:
3981                                 return 0
3982                 return 1
3983
3984 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
3985         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
3986         Returned deplist contains steps that must be taken to satisfy dependencies."""
3987         if trees is None:
3988                 global db
3989                 trees = db
3990         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
3991         if not reduced or unreduced == ["||"] or dep_eval(reduced):
3992                 return []
3993
3994         if unreduced[0] != "||":
3995                 unresolved = []
3996                 for dep, satisfied in izip(unreduced, reduced):
3997                         if isinstance(dep, list):
3998                                 unresolved += dep_zapdeps(dep, satisfied, myroot,
3999                                         use_binaries=use_binaries, trees=trees)
4000                         elif not satisfied:
4001                                 unresolved.append(dep)
4002                 return unresolved
4003
4004         # We're at a ( || atom ... ) type level and need to make a choice
4005         deps = unreduced[1:]
4006         satisfieds = reduced[1:]
4007
4008         # Our preference order is for an the first item that:
4009         # a) contains all unmasked packages with the same key as installed packages
4010         # b) contains all unmasked packages
4011         # c) contains masked installed packages
4012         # d) is the first item
4013
4014         preferred = []
4015         possible_upgrades = []
4016         other = []
4017
4018         # Alias the trees we'll be checking availability against
4019         vardb = None
4020         if "vartree" in trees[myroot]:
4021                 vardb = trees[myroot]["vartree"].dbapi
4022         if use_binaries:
4023                 mydbapi = trees[myroot]["bintree"].dbapi
4024         else:
4025                 mydbapi = trees[myroot]["porttree"].dbapi
4026
4027         # Sort the deps into preferred (installed) and other
4028         # with values of [[required_atom], availablility]
4029         for dep, satisfied in izip(deps, satisfieds):
4030                 if isinstance(dep, list):
4031                         atoms = dep_zapdeps(dep, satisfied, myroot,
4032                                 use_binaries=use_binaries, trees=trees)
4033                 else:
4034                         atoms = [dep]
4035
4036                 all_available = True
4037                 for atom in atoms:
4038                         if not mydbapi.match(atom):
4039                                 all_available = False
4040                                 break
4041
4042                 if not vardb:
4043                         # called by repoman
4044                         preferred.append((atoms, None, all_available))
4045                         continue
4046
4047                 """ The package names rather than the exact atoms are used for an
4048                 initial rough match against installed packages.  More specific
4049                 preference selection is handled later via slot and version comparison."""
4050                 all_installed = True
4051                 for atom in set([dep_getkey(atom) for atom in atoms]):
4052                         # New-style virtuals have zero cost to install.
4053                         if not vardb.match(atom) and not atom.startswith("virtual/"):
4054                                 all_installed = False
4055                                 break
4056
4057                 # Check if the set of atoms will result in a downgrade of
4058                 # an installed package. If they will then don't prefer them
4059                 # over other atoms.
4060                 has_downgrade = False
4061                 versions = {}
4062                 if all_installed or all_available:
4063                         for atom in atoms:
4064                                 mykey = dep_getkey(atom)
4065                                 avail_pkg = best(mydbapi.match(atom))
4066                                 if not avail_pkg:
4067                                         continue
4068                                 avail_slot = "%s:%s" % (mykey,
4069                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4070                                 versions[avail_slot] = avail_pkg
4071                                 inst_pkg = vardb.match(avail_slot)
4072                                 if not inst_pkg:
4073                                         continue
4074                                 # emerge guarantees 1 package per slot here (highest counter)
4075                                 inst_pkg = inst_pkg[0]
4076                                 if avail_pkg != inst_pkg and \
4077                                         avail_pkg != best([avail_pkg, inst_pkg]):
4078                                         has_downgrade = True
4079                                         break
4080
4081                 this_choice = (atoms, versions, all_available)
4082                 if not has_downgrade:
4083                         if all_installed:
4084                                 preferred.append(this_choice)
4085                                 continue
4086                         elif all_available:
4087                                 possible_upgrades.append(this_choice)
4088                                 continue
4089                 other.append(this_choice)
4090
4091         # Compare the "all_installed" choices against the "all_available" choices
4092         # for possible missed upgrades.  The main purpose of this code is to find
4093         # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4094         # into || ( highest version ... lowest version ).  We want to prefer the
4095         # highest all_available version of the new-style virtual when there is a
4096         # lower all_installed version.
4097         for possible_upgrade in list(possible_upgrades):
4098                 atoms, versions, all_available = possible_upgrade
4099                 myslots = set(versions)
4100                 for other_choice in preferred:
4101                         o_atoms, o_versions, o_all_available = other_choice
4102                         intersecting_slots = myslots.intersection(o_versions)
4103                         if not intersecting_slots:
4104                                 continue
4105                         has_upgrade = False
4106                         has_downgrade = False
4107                         for myslot in intersecting_slots:
4108                                 myversion = versions[myslot]
4109                                 o_version = o_versions[myslot]
4110                                 if myversion != o_version:
4111                                         if myversion == best([myversion, o_version]):
4112                                                 has_upgrade = True
4113                                         else:
4114                                                 has_downgrade = True
4115                                                 break
4116                         if has_upgrade and not has_downgrade:
4117                                 o_index = preferred.index(other_choice)
4118                                 preferred.insert(o_index, possible_upgrade)
4119                                 possible_upgrades.remove(possible_upgrade)
4120                                 break
4121         preferred.extend(possible_upgrades)
4122
4123         # preferred now contains a) and c) from the order above with
4124         # the masked flag differentiating the two. other contains b)
4125         # and d) so adding other to preferred will give us a suitable
4126         # list to iterate over.
4127         preferred.extend(other)
4128
4129         for allow_masked in (False, True):
4130                 for atoms, versions, all_available in preferred:
4131                         if all_available or allow_masked:
4132                                 return atoms
4133
4134         assert(False) # This point should not be reachable
4135
4136
4137 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4138         if not len(mydep):
4139                 return mydep
4140         if mydep[0]=="*":
4141                 mydep=mydep[1:]
4142         orig_dep = mydep
4143         mydep = dep_getcpv(orig_dep)
4144         myindex = orig_dep.index(mydep)
4145         prefix = orig_dep[:myindex]
4146         postfix = orig_dep[myindex+len(mydep):]
4147         return prefix + cpv_expand(
4148                 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4149
4150 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4151         use_cache=1, use_binaries=0, myroot="/", trees=None):
4152         """Takes a depend string and parses the condition."""
4153         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4154         #check_config_instance(mysettings)
4155         if trees is None:
4156                 trees = globals()["db"]
4157         if use=="yes":
4158                 if myuse is None:
4159                         #default behavior
4160                         myusesplit = mysettings["USE"].split()
4161                 else:
4162                         myusesplit = myuse
4163                         # We've been given useflags to use.
4164                         #print "USE FLAGS PASSED IN."
4165                         #print myuse
4166                         #if "bindist" in myusesplit:
4167                         #       print "BINDIST is set!"
4168                         #else:
4169                         #       print "BINDIST NOT set."
4170         else:
4171                 #we are being run by autouse(), don't consult USE vars yet.
4172                 # WE ALSO CANNOT USE SETTINGS
4173                 myusesplit=[]
4174
4175         #convert parenthesis to sublists
4176         mysplit = portage_dep.paren_reduce(depstring)
4177
4178         mymasks = set()
4179         useforce = set()
4180         useforce.add(mysettings["ARCH"])
4181         if use == "all":
4182                 # This masking/forcing is only for repoman.  In other cases, relevant
4183                 # masking/forcing should have already been applied via
4184                 # config.regenerate().  Also, binary or installed packages may have
4185                 # been built with flags that are now masked, and it would be
4186                 # inconsistent to mask them now.  Additionally, myuse may consist of
4187                 # flags from a parent package that is being merged to a $ROOT that is
4188                 # different from the one that mysettings represents.
4189                 mymasks.update(mysettings.usemask)
4190                 mymasks.update(mysettings.archlist())
4191                 mymasks.discard(mysettings["ARCH"])
4192                 useforce.update(mysettings.useforce)
4193                 useforce.difference_update(mymasks)
4194         try:
4195                 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4196                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4197         except portage_exception.InvalidDependString, e:
4198                 return [0, str(e)]
4199
4200         # Do the || conversions
4201         mysplit=portage_dep.dep_opconvert(mysplit)
4202
4203         if mysplit == []:
4204                 #dependencies were reduced to nothing
4205                 return [1,[]]
4206
4207         # Recursively expand new-style virtuals so as to
4208         # collapse one or more levels of indirection.
4209         try:
4210                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4211                         use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4212                         use_binaries=use_binaries, myroot=myroot, trees=trees)
4213         except portage_exception.ParseError, e:
4214                 return [0, str(e)]
4215
4216         mysplit2=mysplit[:]
4217         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4218         if mysplit2 is None:
4219                 return [0,"Invalid token"]
4220
4221         writemsg("\n\n\n", 1)
4222         writemsg("mysplit:  %s\n" % (mysplit), 1)
4223         writemsg("mysplit2: %s\n" % (mysplit2), 1)
4224
4225         myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4226                 use_binaries=use_binaries, trees=trees)
4227         mylist = flatten(myzaps)
4228         writemsg("myzaps:   %s\n" % (myzaps), 1)
4229         writemsg("mylist:   %s\n" % (mylist), 1)
4230         #remove duplicates
4231         mydict={}
4232         for x in mylist:
4233                 mydict[x]=1
4234         writemsg("mydict:   %s\n" % (mydict), 1)
4235         return [1,mydict.keys()]
4236
4237 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4238         "Reduces the deplist to ones and zeros"
4239         deplist=mydeplist[:]
4240         for mypos in xrange(len(deplist)):
4241                 if type(deplist[mypos])==types.ListType:
4242                         #recurse
4243                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4244                 elif deplist[mypos]=="||":
4245                         pass
4246                 else:
4247                         mykey = dep_getkey(deplist[mypos])
4248                         if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4249                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4250                                 deplist[mypos]=True
4251                         elif mydbapi is None:
4252                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
4253                                 # return all of deps the deps that have been selected
4254                                 # (excluding those satisfied by package.provided).
4255                                 deplist[mypos] = False
4256                         else:
4257                                 if mode:
4258                                         mydep=mydbapi.xmatch(mode,deplist[mypos])
4259                                 else:
4260                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4261                                 if mydep!=None:
4262                                         tmp=(len(mydep)>=1)
4263                                         if deplist[mypos][0]=="!":
4264                                                 tmp=False
4265                                         deplist[mypos]=tmp
4266                                 else:
4267                                         #encountered invalid string
4268                                         return None
4269         return deplist
4270
4271 def cpv_getkey(mycpv):
4272         myslash=mycpv.split("/")
4273         mysplit=pkgsplit(myslash[-1])
4274         mylen=len(myslash)
4275         if mylen==2:
4276                 return myslash[0]+"/"+mysplit[0]
4277         elif mylen==1:
4278                 return mysplit[0]
4279         else:
4280                 return mysplit
4281
4282 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4283         mysplit=mykey.split("/")
4284         if settings is None:
4285                 settings = globals()["settings"]
4286         virts = settings.getvirtuals("/")
4287         virts_p = settings.get_virts_p("/")
4288         if len(mysplit)==1:
4289                 if mydb and type(mydb)==types.InstanceType:
4290                         for x in settings.categories:
4291                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4292                                         return x+"/"+mykey
4293                         if virts_p.has_key(mykey):
4294                                 return(virts_p[mykey][0])
4295                 return "null/"+mykey
4296         elif mydb:
4297                 if type(mydb)==types.InstanceType:
4298                         if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4299                                 return virts[mykey][0]
4300                 return mykey
4301
4302 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4303         """Given a string (packagename or virtual) expand it into a valid
4304         cat/package string. Virtuals use the mydb to determine which provided
4305         virtual is a valid choice and defaults to the first element when there
4306         are no installed/available candidates."""
4307         myslash=mycpv.split("/")
4308         mysplit=pkgsplit(myslash[-1])
4309         if settings is None:
4310                 settings = globals()["settings"]
4311         virts = settings.getvirtuals("/")
4312         virts_p = settings.get_virts_p("/")
4313         if len(myslash)>2:
4314                 # this is illegal case.
4315                 mysplit=[]
4316                 mykey=mycpv
4317         elif len(myslash)==2:
4318                 if mysplit:
4319                         mykey=myslash[0]+"/"+mysplit[0]
4320                 else:
4321                         mykey=mycpv
4322                 if mydb and virts and mykey in virts:
4323                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4324                         if type(mydb)==types.InstanceType:
4325                                 if not mydb.cp_list(mykey, use_cache=use_cache):
4326                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4327                                         mykey_orig = mykey[:]
4328                                         for vkey in virts[mykey]:
4329                                                 if mydb.cp_list(vkey,use_cache=use_cache):
4330                                                         mykey = vkey
4331                                                         writemsg("virts chosen: %s\n" % (mykey), 1)
4332                                                         break
4333                                         if mykey == mykey_orig:
4334                                                 mykey=virts[mykey][0]
4335                                                 writemsg("virts defaulted: %s\n" % (mykey), 1)
4336                         #we only perform virtual expansion if we are passed a dbapi
4337         else:
4338                 #specific cpv, no category, ie. "foo-1.0"
4339                 if mysplit:
4340                         myp=mysplit[0]
4341                 else:
4342                         # "foo" ?
4343                         myp=mycpv
4344                 mykey=None
4345                 matches=[]
4346                 if mydb:
4347                         for x in settings.categories:
4348                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4349                                         matches.append(x+"/"+myp)
4350                 if (len(matches)>1):
4351                         raise ValueError, matches
4352                 elif matches:
4353                         mykey=matches[0]
4354
4355                 if not mykey and type(mydb)!=types.ListType:
4356                         if virts_p.has_key(myp):
4357                                 mykey=virts_p[myp][0]
4358                         #again, we only perform virtual expansion if we have a dbapi (not a list)
4359                 if not mykey:
4360                         mykey="null/"+myp
4361         if mysplit:
4362                 if mysplit[2]=="r0":
4363                         return mykey+"-"+mysplit[1]
4364                 else:
4365                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4366         else:
4367                 return mykey
4368
4369 def getmaskingreason(mycpv, settings=None, portdb=None):
4370         from portage_util import grablines
4371         if settings is None:
4372                 settings = globals()["settings"]
4373         if portdb is None:
4374                 portdb = globals()["portdb"]
4375         mysplit = catpkgsplit(mycpv)
4376         if not mysplit:
4377                 raise ValueError("invalid CPV: %s" % mycpv)
4378         if not portdb.cpv_exists(mycpv):
4379                 raise KeyError("CPV %s does not exist" % mycpv)
4380         mycp=mysplit[0]+"/"+mysplit[1]
4381
4382         # XXX- This is a temporary duplicate of code from the config constructor.
4383         locations = [os.path.join(settings["PORTDIR"], "profiles")]
4384         locations.extend(settings.profiles)
4385         for ov in settings["PORTDIR_OVERLAY"].split():
4386                 profdir = os.path.join(normalize_path(ov), "profiles")
4387                 if os.path.isdir(profdir):
4388                         locations.append(profdir)
4389         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4390                 USER_CONFIG_PATH.lstrip(os.path.sep)))
4391         locations.reverse()
4392         pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4393         pmasklines = []
4394         while pmasklists: # stack_lists doesn't preserve order so it can't be used
4395                 pmasklines.extend(pmasklists.pop(0))
4396         del pmasklists
4397
4398         if settings.pmaskdict.has_key(mycp):
4399                 for x in settings.pmaskdict[mycp]:
4400                         if mycpv in portdb.xmatch("match-all", x):
4401                                 comment = ""
4402                                 l = "\n"
4403                                 comment_valid = -1
4404                                 for i in xrange(len(pmasklines)):
4405                                         l = pmasklines[i].strip()
4406                                         if l == "":
4407                                                 comment = ""
4408                                                 comment_valid = -1
4409                                         elif l[0] == "#":
4410                                                 comment += (l+"\n")
4411                                                 comment_valid = i + 1
4412                                         elif l == x:
4413                                                 if comment_valid != i:
4414                                                         comment = ""
4415                                                 return comment
4416                                         elif comment_valid != -1:
4417                                                 # Apparently this comment applies to muliple masks, so
4418                                                 # it remains valid until a blank line is encountered.
4419                                                 comment_valid += 1
4420         return None
4421
4422 def getmaskingstatus(mycpv, settings=None, portdb=None):
4423         if settings is None:
4424                 settings = globals()["settings"]
4425         if portdb is None:
4426                 portdb = globals()["portdb"]
4427         mysplit = catpkgsplit(mycpv)
4428         if not mysplit:
4429                 raise ValueError("invalid CPV: %s" % mycpv)
4430         if not portdb.cpv_exists(mycpv):
4431                 raise KeyError("CPV %s does not exist" % mycpv)
4432         mycp=mysplit[0]+"/"+mysplit[1]
4433
4434         rValue = []
4435
4436         # profile checking
4437         revmaskdict=settings.prevmaskdict
4438         if revmaskdict.has_key(mycp):
4439                 for x in revmaskdict[mycp]:
4440                         if x[0]=="*":
4441                                 myatom = x[1:]
4442                         else:
4443                                 myatom = x
4444                         if not match_to_list(mycpv, [myatom]):
4445                                 rValue.append("profile")
4446                                 break
4447
4448         # package.mask checking
4449         maskdict=settings.pmaskdict
4450         unmaskdict=settings.punmaskdict
4451         if maskdict.has_key(mycp):
4452                 for x in maskdict[mycp]:
4453                         if mycpv in portdb.xmatch("match-all", x):
4454                                 unmask=0
4455                                 if unmaskdict.has_key(mycp):
4456                                         for z in unmaskdict[mycp]:
4457                                                 if mycpv in portdb.xmatch("match-all",z):
4458                                                         unmask=1
4459                                                         break
4460                                 if unmask==0:
4461                                         rValue.append("package.mask")
4462
4463         # keywords checking
4464         try:
4465                 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4466         except KeyError:
4467                 # The "depend" phase apparently failed for some reason.  An associated
4468                 # error message will have already been printed to stderr.
4469                 return ["corruption"]
4470         if not eapi_is_supported(eapi):
4471                 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4472         mygroups = mygroups.split()
4473         pgroups = settings["ACCEPT_KEYWORDS"].split()
4474         myarch = settings["ARCH"]
4475         if pgroups and myarch not in pgroups:
4476                 """For operating systems other than Linux, ARCH is not necessarily a
4477                 valid keyword."""
4478                 myarch = pgroups[0].lstrip("~")
4479         pkgdict = settings.pkeywordsdict
4480
4481         cp = dep_getkey(mycpv)
4482         if pkgdict.has_key(cp):
4483                 matches = match_to_list(mycpv, pkgdict[cp].keys())
4484                 for match in matches:
4485                         pgroups.extend(pkgdict[cp][match])
4486                 if matches:
4487                         inc_pgroups = []
4488                         for x in pgroups:
4489                                 if x != "-*" and x.startswith("-"):
4490                                         try:
4491                                                 inc_pgroups.remove(x[1:])
4492                                         except ValueError:
4493                                                 pass
4494                                 if x not in inc_pgroups:
4495                                         inc_pgroups.append(x)
4496                         pgroups = inc_pgroups
4497                         del inc_pgroups
4498
4499         kmask = "missing"
4500
4501         for keyword in pgroups:
4502                 if keyword in mygroups:
4503                         kmask=None
4504
4505         if kmask:
4506                 fallback = None
4507                 for gp in mygroups:
4508                         if gp=="*":
4509                                 kmask=None
4510                                 break
4511                         elif gp=="-"+myarch:
4512                                 kmask="-"+myarch
4513                                 break
4514                         elif gp=="~"+myarch:
4515                                 kmask="~"+myarch
4516                                 break
4517
4518         if kmask:
4519                 rValue.append(kmask+" keyword")
4520         return rValue
4521
4522 class portagetree:
4523         def __init__(self, root="/", virtual=None, clone=None, settings=None):
4524
4525                 if clone:
4526                         self.root=clone.root
4527                         self.portroot=clone.portroot
4528                         self.pkglines=clone.pkglines
4529                 else:
4530                         self.root=root
4531                         if settings is None:
4532                                 settings = globals()["settings"]
4533                         self.settings = settings
4534                         self.portroot=settings["PORTDIR"]
4535                         self.virtual=virtual
4536                         self.dbapi = portdbapi(
4537                                 settings["PORTDIR"], mysettings=settings)
4538
4539         def dep_bestmatch(self,mydep):
4540                 "compatibility method"
4541                 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4542                 if mymatch is None:
4543                         return ""
4544                 return mymatch
4545
4546         def dep_match(self,mydep):
4547                 "compatibility method"
4548                 mymatch=self.dbapi.xmatch("match-visible",mydep)
4549                 if mymatch is None:
4550                         return []
4551                 return mymatch
4552
4553         def exists_specific(self,cpv):
4554                 return self.dbapi.cpv_exists(cpv)
4555
4556         def getallnodes(self):
4557                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
4558                 masked package for nodes in this nodes list."""
4559                 return self.dbapi.cp_all()
4560
4561         def getname(self,pkgname):
4562                 "returns file location for this particular package (DEPRECATED)"
4563                 if not pkgname:
4564                         return ""
4565                 mysplit=pkgname.split("/")
4566                 psplit=pkgsplit(mysplit[1])
4567                 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4568
4569         def resolve_specific(self,myspec):
4570                 cps=catpkgsplit(myspec)
4571                 if not cps:
4572                         return None
4573                 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4574                         settings=self.settings)
4575                 mykey=mykey+"-"+cps[2]
4576                 if cps[3]!="r0":
4577                         mykey=mykey+"-"+cps[3]
4578                 return mykey
4579
4580         def depcheck(self,mycheck,use="yes",myusesplit=None):
4581                 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4582
4583         def getslot(self,mycatpkg):
4584                 "Get a slot for a catpkg; assume it exists."
4585                 myslot = ""
4586                 try:
4587                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4588                 except SystemExit, e:
4589                         raise
4590                 except Exception, e:
4591                         pass
4592                 return myslot
4593
4594
4595 class dbapi:
4596         def __init__(self):
4597                 pass
4598
4599         def close_caches(self):
4600                 pass
4601
4602         def cp_list(self,cp,use_cache=1):
4603                 return
4604
4605         def cpv_all(self):
4606                 cpv_list = []
4607                 for cp in self.cp_all():
4608                         cpv_list.extend(self.cp_list(cp))
4609                 return cpv_list
4610
4611         def aux_get(self,mycpv,mylist):
4612                 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4613                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4614                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4615                 raise NotImplementedError
4616
4617         def match(self,origdep,use_cache=1):
4618                 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4619                 mykey=dep_getkey(mydep)
4620                 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4621                 myslot = portage_dep.dep_getslot(mydep)
4622                 if myslot is not None:
4623                         mylist = [cpv for cpv in mylist \
4624                                 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4625                 return mylist
4626
4627         def match2(self,mydep,mykey,mylist):
4628                 writemsg("DEPRECATED: dbapi.match2\n")
4629                 match_from_list(mydep,mylist)
4630
4631         def invalidentry(self, mypath):
4632                 if re.search("portage_lockfile$",mypath):
4633                         if not os.environ.has_key("PORTAGE_MASTER_PID"):
4634                                 writemsg("Lockfile removed: %s\n" % mypath, 1)
4635                                 portage_locks.unlockfile((mypath,None,None))
4636                         else:
4637                                 # Nothing we can do about it. We're probably sandboxed.
4638                                 pass
4639                 elif re.search(".*/-MERGING-(.*)",mypath):
4640                         if os.path.exists(mypath):
4641                                 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4642                 else:
4643                         writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4644
4645
4646
4647 class fakedbapi(dbapi):
4648         "This is a dbapi to use for the emptytree function.  It's empty, but things can be added to it."
4649         def __init__(self, settings=None):
4650                 self.cpvdict={}
4651                 self.cpdict={}
4652                 if settings is None:
4653                         settings = globals()["settings"]
4654                 self.settings = settings
4655                 self._match_cache = {}
4656
4657         def _clear_cache(self):
4658                 if self._match_cache:
4659                         self._match_cache = {}
4660
4661         def match(self, origdep, use_cache=1):
4662                 result = self._match_cache.get(origdep, None)
4663                 if result is not None:
4664                         return result[:]
4665                 result = dbapi.match(self, origdep, use_cache=use_cache)
4666                 self._match_cache[origdep] = result
4667                 return result[:]
4668
4669         def cpv_exists(self,mycpv):
4670                 return self.cpvdict.has_key(mycpv)
4671
4672         def cp_list(self,mycp,use_cache=1):
4673                 if not self.cpdict.has_key(mycp):
4674                         return []
4675                 else:
4676                         return self.cpdict[mycp]
4677
4678         def cp_all(self):
4679                 returnme=[]
4680                 for x in self.cpdict.keys():
4681                         returnme.extend(self.cpdict[x])
4682                 return returnme
4683
4684         def cpv_all(self):
4685                 return self.cpvdict.keys()
4686
4687         def cpv_inject(self, mycpv, metadata=None):
4688                 """Adds a cpv from the list of available packages."""
4689                 self._clear_cache()
4690                 mycp=cpv_getkey(mycpv)
4691                 self.cpvdict[mycpv] = metadata
4692                 myslot = None
4693                 if metadata:
4694                         myslot = metadata.get("SLOT", None)
4695                 if myslot and mycp in self.cpdict:
4696                         # If necessary, remove another package in the same SLOT.
4697                         for cpv in self.cpdict[mycp]:
4698                                 if mycpv != cpv:
4699                                         other_metadata = self.cpvdict[cpv]
4700                                         if other_metadata:
4701                                                 if myslot == other_metadata.get("SLOT", None):
4702                                                         self.cpv_remove(cpv)
4703                                                         break
4704                 if mycp not in self.cpdict:
4705                         self.cpdict[mycp] = []
4706                 if not mycpv in self.cpdict[mycp]:
4707                         self.cpdict[mycp].append(mycpv)
4708
4709         def cpv_remove(self,mycpv):
4710                 """Removes a cpv from the list of available packages."""
4711                 self._clear_cache()
4712                 mycp=cpv_getkey(mycpv)
4713                 if self.cpvdict.has_key(mycpv):
4714                         del     self.cpvdict[mycpv]
4715                 if not self.cpdict.has_key(mycp):
4716                         return
4717                 while mycpv in self.cpdict[mycp]:
4718                         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4719                 if not len(self.cpdict[mycp]):
4720                         del self.cpdict[mycp]
4721
4722         def aux_get(self, mycpv, wants):
4723                 if not self.cpv_exists(mycpv):
4724                         raise KeyError(mycpv)
4725                 metadata = self.cpvdict[mycpv]
4726                 if not metadata:
4727                         return ["" for x in wants]
4728                 return [metadata.get(x, "") for x in wants]
4729
4730         def aux_update(self, cpv, values):
4731                 self._clear_cache()
4732                 self.cpvdict[cpv].update(values)
4733
4734 class bindbapi(fakedbapi):
4735         def __init__(self, mybintree=None, settings=None):
4736                 self.bintree = mybintree
4737                 self.cpvdict={}
4738                 self.cpdict={}
4739                 if settings is None:
4740                         settings = globals()["settings"]
4741                 self.settings = settings
4742                 self._match_cache = {}
4743                 # Selectively cache metadata in order to optimize dep matching.
4744                 self._aux_cache_keys = set(["SLOT"])
4745                 self._aux_cache = {}
4746
4747         def match(self, *pargs, **kwargs):
4748                 if self.bintree and not self.bintree.populated:
4749                         self.bintree.populate()
4750                 return fakedbapi.match(self, *pargs, **kwargs)
4751
4752         def aux_get(self,mycpv,wants):
4753                 if self.bintree and not self.bintree.populated:
4754                         self.bintree.populate()
4755                 cache_me = False
4756                 if not set(wants).difference(self._aux_cache_keys):
4757                         aux_cache = self._aux_cache.get(mycpv)
4758                         if aux_cache is not None:
4759                                 return [aux_cache[x] for x in wants]
4760                         cache_me = True
4761                 mysplit = mycpv.split("/")
4762                 mylist  = []
4763                 tbz2name = mysplit[1]+".tbz2"
4764                 if self.bintree and not self.bintree.isremote(mycpv):
4765                         tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4766                         getitem = tbz2.getfile
4767                 else:
4768                         getitem = self.bintree.remotepkgs[tbz2name].get
4769                 mydata = {}
4770                 mykeys = wants
4771                 if cache_me:
4772                         mykeys = self._aux_cache_keys.union(wants)
4773                 for x in mykeys:
4774                         myval = getitem(x)
4775                         # myval is None if the key doesn't exist
4776                         # or the tbz2 is corrupt.
4777                         if myval:
4778                                 mydata[x] = " ".join(myval.split())
4779                 if "EAPI" in mykeys:
4780                         if not mydata.setdefault("EAPI", "0"):
4781                                 mydata["EAPI"] = "0"
4782                 if cache_me:
4783                         aux_cache = {}
4784                         for x in self._aux_cache_keys:
4785                                 aux_cache[x] = mydata.get(x, "")
4786                         self._aux_cache[mycpv] = aux_cache
4787                 return [mydata.get(x, "") for x in wants]
4788
4789         def aux_update(self, cpv, values):
4790                 if not self.bintree.populated:
4791                         self.bintree.populate()
4792                 tbz2path = self.bintree.getname(cpv)
4793                 if not os.path.exists(tbz2path):
4794                         raise KeyError(cpv)
4795                 mytbz2 = xpak.tbz2(tbz2path)
4796                 mydata = mytbz2.get_data()
4797                 mydata.update(values)
4798                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4799
4800         def cp_list(self, *pargs, **kwargs):
4801                 if not self.bintree.populated:
4802                         self.bintree.populate()
4803                 return fakedbapi.cp_list(self, *pargs, **kwargs)
4804
4805         def cpv_all(self):
4806                 if not self.bintree.populated:
4807                         self.bintree.populate()
4808                 return fakedbapi.cpv_all(self)
4809
4810 class vardbapi(dbapi):
4811         def __init__(self, root, categories=None, settings=None, vartree=None):
4812                 self.root       = root[:]
4813                 #cache for category directory mtimes
4814                 self.mtdircache = {}
4815                 #cache for dependency checks
4816                 self.matchcache = {}
4817                 #cache for cp_list results
4818                 self.cpcache    = {}
4819                 self.blockers   = None
4820                 if settings is None:
4821                         settings = globals()["settings"]
4822                 self.settings = settings
4823                 if categories is None:
4824                         categories = settings.categories
4825                 self.categories = categories[:]
4826                 if vartree is None:
4827                         vartree = globals()["db"][root]["vartree"]
4828                 self.vartree = vartree
4829                 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
4830                         "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
4831                 self._aux_cache = None
4832                 self._aux_cache_version = "1"
4833                 self._aux_cache_filename = os.path.join(self.root,
4834                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
4835
4836         def cpv_exists(self,mykey):
4837                 "Tells us whether an actual ebuild exists on disk (no masking)"
4838                 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4839
4840         def cpv_counter(self,mycpv):
4841                 "This method will grab the COUNTER. Returns a counter value."
4842                 try:
4843                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
4844                 except KeyError, ValueError:
4845                         pass
4846                 cdir=self.root+VDB_PATH+"/"+mycpv
4847                 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4848
4849                 # We write our new counter value to a new file that gets moved into
4850                 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4851                 corrupted=0
4852                 if os.path.exists(cpath):
4853                         cfile=open(cpath, "r")
4854                         try:
4855                                 counter=long(cfile.readline())
4856                         except ValueError:
4857                                 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4858                                 counter=long(0)
4859                                 corrupted=1
4860                         cfile.close()
4861                 elif os.path.exists(cdir):
4862                         mys = pkgsplit(mycpv)
4863                         myl = self.match(mys[0],use_cache=0)
4864                         print mys,myl
4865                         if len(myl) == 1:
4866                                 try:
4867                                         # Only one package... Counter doesn't matter.
4868                                         write_atomic(cpath, "1")
4869                                         counter = 1
4870                                 except SystemExit, e:
4871                                         raise
4872                                 except Exception, e:
4873                                         writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
4874                                                 noiselevel=-1)
4875                                         writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
4876                                                 noiselevel=-1)
4877                                         writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
4878                                         writemsg("!!! %s\n" % e, noiselevel=-1)
4879                                         sys.exit(1)
4880                         else:
4881                                 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
4882                                         noiselevel=-1)
4883                                 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
4884                                         noiselevel=-1)
4885                                 writemsg("!!! remerge the package.\n", noiselevel=-1)
4886                                 sys.exit(1)
4887                 else:
4888                         counter=long(0)
4889                 if corrupted:
4890                         # update new global counter file
4891                         write_atomic(cpath, str(counter))
4892                 return counter
4893
4894         def cpv_inject(self,mycpv):
4895                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
4896                 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
4897                 counter = self.counter_tick(self.root, mycpv=mycpv)
4898                 # write local package counter so that emerge clean does the right thing
4899                 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
4900
4901         def isInjected(self,mycpv):
4902                 if self.cpv_exists(mycpv):
4903                         if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
4904                                 return True
4905                         if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
4906                                 return True
4907                 return False
4908
4909         def move_ent(self,mylist):
4910                 origcp=mylist[1]
4911                 newcp=mylist[2]
4912
4913                 # sanity check
4914                 for cp in [origcp,newcp]:
4915                         if not (isvalidatom(cp) and isjustname(cp)):
4916                                 raise portage_exception.InvalidPackageName(cp)
4917                 origmatches=self.match(origcp,use_cache=0)
4918                 if not origmatches:
4919                         return
4920                 for mycpv in origmatches:
4921                         mycpsplit=catpkgsplit(mycpv)
4922                         mynewcpv=newcp+"-"+mycpsplit[2]
4923                         mynewcat=newcp.split("/")[0]
4924                         if mycpsplit[3]!="r0":
4925                                 mynewcpv += "-"+mycpsplit[3]
4926                         mycpsplit_new = catpkgsplit(mynewcpv)
4927                         origpath=self.root+VDB_PATH+"/"+mycpv
4928                         if not os.path.exists(origpath):
4929                                 continue
4930                         writemsg_stdout("@")
4931                         if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
4932                                 #create the directory
4933                                 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
4934                         newpath=self.root+VDB_PATH+"/"+mynewcpv
4935                         if os.path.exists(newpath):
4936                                 #dest already exists; keep this puppy where it is.
4937                                 continue
4938                         os.rename(origpath, newpath)
4939
4940                         # We need to rename the ebuild now.
4941                         old_pf = catsplit(mycpv)[1]
4942                         new_pf = catsplit(mynewcpv)[1]
4943                         if new_pf != old_pf:
4944                                 try:
4945                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
4946                                                 os.path.join(newpath, new_pf + ".ebuild"))
4947                                 except OSError, e:
4948                                         if e.errno != errno.ENOENT:
4949                                                 raise
4950                                         del e
4951                                 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
4952
4953                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
4954                         fixdbentries([mylist], newpath)
4955
4956         def update_ents(self, update_iter):
4957                 """Run fixdbentries on all installed packages (time consuming).  Like
4958                 fixpackages, this should be run from a helper script and display
4959                 a progress indicator."""
4960                 dbdir = os.path.join(self.root, VDB_PATH)
4961                 for catdir in listdir(dbdir):
4962                         catdir = dbdir+"/"+catdir
4963                         if os.path.isdir(catdir):
4964                                 for pkgdir in listdir(catdir):
4965                                         pkgdir = catdir+"/"+pkgdir
4966                                         if os.path.isdir(pkgdir):
4967                                                 fixdbentries(update_iter, pkgdir)
4968
4969         def move_slot_ent(self,mylist):
4970                 pkg=mylist[1]
4971                 origslot=mylist[2]
4972                 newslot=mylist[3]
4973
4974                 if not isvalidatom(pkg):
4975                         raise portage_exception.InvalidAtom(pkg)
4976
4977                 origmatches=self.match(pkg,use_cache=0)
4978                 
4979                 if not origmatches:
4980                         return
4981                 for mycpv in origmatches:
4982                         origpath=self.root+VDB_PATH+"/"+mycpv
4983                         if not os.path.exists(origpath):
4984                                 continue
4985
4986                         slot=grabfile(origpath+"/SLOT");
4987                         if (not slot):
4988                                 continue
4989
4990                         if (slot[0]!=origslot):
4991                                 continue
4992
4993                         writemsg_stdout("s")
4994                         write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
4995
4996         def cp_list(self,mycp,use_cache=1):
4997                 mysplit=mycp.split("/")
4998                 if mysplit[0] == '*':
4999                         mysplit[0] = mysplit[0][1:]
5000                 try:
5001                         mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5002                 except OSError:
5003                         mystat=0
5004                 if use_cache and self.cpcache.has_key(mycp):
5005                         cpc=self.cpcache[mycp]
5006                         if cpc[0]==mystat:
5007                                 return cpc[1]
5008                 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5009
5010                 if (list is None):
5011                         return []
5012                 returnme=[]
5013                 for x in list:
5014                         if x.startswith("."):
5015                                 continue
5016                         if x[0] == '-':
5017                                 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5018                                 continue
5019                         ps=pkgsplit(x)
5020                         if not ps:
5021                                 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5022                                 continue
5023                         if len(mysplit) > 1:
5024                                 if ps[0]==mysplit[1]:
5025                                         returnme.append(mysplit[0]+"/"+x)
5026                 if use_cache:
5027                         self.cpcache[mycp]=[mystat,returnme]
5028                 elif self.cpcache.has_key(mycp):
5029                         del self.cpcache[mycp]
5030                 return returnme
5031
5032         def cpv_all(self,use_cache=1):
5033                 returnme=[]
5034                 basepath = self.root+VDB_PATH+"/"
5035
5036                 for x in self.categories:
5037                         for y in listdir(basepath+x,EmptyOnError=1):
5038                                 if y.startswith("."):
5039                                         continue
5040                                 subpath = x+"/"+y
5041                                 # -MERGING- should never be a cpv, nor should files.
5042                                 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5043                                         returnme += [subpath]
5044                 return returnme
5045
5046         def cp_all(self,use_cache=1):
5047                 mylist = self.cpv_all(use_cache=use_cache)
5048                 d={}
5049                 for y in mylist:
5050                         if y[0] == '*':
5051                                 y = y[1:]
5052                         mysplit=catpkgsplit(y)
5053                         if not mysplit:
5054                                 self.invalidentry(self.root+VDB_PATH+"/"+y)
5055                                 continue
5056                         d[mysplit[0]+"/"+mysplit[1]] = None
5057                 return d.keys()
5058
5059         def checkblockers(self,origdep):
5060                 pass
5061
5062         def match(self,origdep,use_cache=1):
5063                 "caching match function"
5064                 mydep = dep_expand(
5065                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5066                 mykey=dep_getkey(mydep)
5067                 mycat=mykey.split("/")[0]
5068                 if not use_cache:
5069                         if self.matchcache.has_key(mycat):
5070                                 del self.mtdircache[mycat]
5071                                 del self.matchcache[mycat]
5072                         mymatch = match_from_list(mydep,
5073                                 self.cp_list(mykey, use_cache=use_cache))
5074                         myslot = portage_dep.dep_getslot(mydep)
5075                         if myslot is not None:
5076                                 mymatch = [cpv for cpv in mymatch \
5077                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5078                         return mymatch
5079                 try:
5080                         curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5081                 except (IOError, OSError):
5082                         curmtime=0
5083
5084                 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5085                         # clear cache entry
5086                         self.mtdircache[mycat]=curmtime
5087                         self.matchcache[mycat]={}
5088                 if not self.matchcache[mycat].has_key(mydep):
5089                         mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5090                         myslot = portage_dep.dep_getslot(mydep)
5091                         if myslot is not None:
5092                                 mymatch = [cpv for cpv in mymatch \
5093                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5094                         self.matchcache[mycat][mydep]=mymatch
5095                 return self.matchcache[mycat][mydep][:]
5096
5097         def findname(self, mycpv):
5098                 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5099
5100         def flush_cache(self):
5101                 """If the current user has permission and the internal aux_get cache has
5102                 been updated, save it to disk and mark it unmodified.  This is called
5103                 by emerge after it has loaded the full vdb for use in dependency
5104                 calculations.  Currently, the cache is only written if the user has
5105                 superuser privileges (since that's required to obtain a lock), but all
5106                 users have read access and benefit from faster metadata lookups (as
5107                 long as at least part of the cache is still valid)."""
5108                 if self._aux_cache is not None and \
5109                         self._aux_cache["modified"] and \
5110                         secpass >= 2:
5111                         valid_nodes = set(self.cpv_all())
5112                         for cpv in self._aux_cache["packages"].keys():
5113                                 if cpv not in valid_nodes:
5114                                         del self._aux_cache["packages"][cpv]
5115                         del self._aux_cache["modified"]
5116                         try:
5117                                 f = atomic_ofstream(self._aux_cache_filename)
5118                                 cPickle.dump(self._aux_cache, f, -1)
5119                                 f.close()
5120                                 portage_util.apply_secpass_permissions(
5121                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
5122                         except (IOError, OSError), e:
5123                                 pass
5124                         self._aux_cache["modified"] = False
5125
5126         def aux_get(self, mycpv, wants):
5127                 """This automatically caches selected keys that are frequently needed
5128                 by emerge for dependency calculations.  The cached metadata is
5129                 considered valid if the mtime of the package directory has not changed
5130                 since the data was cached.  The cache is stored in a pickled dict
5131                 object with the following format:
5132
5133                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5134
5135                 If an error occurs while loading the cache pickle or the version is
5136                 unrecognized, the cache will simple be recreated from scratch (it is
5137                 completely disposable).
5138                 """
5139                 if not self._aux_cache_keys.intersection(wants):
5140                         return self._aux_get(mycpv, wants)
5141                 if self._aux_cache is None:
5142                         try:
5143                                 f = open(self._aux_cache_filename)
5144                                 mypickle = cPickle.Unpickler(f)
5145                                 mypickle.find_global = None
5146                                 self._aux_cache = mypickle.load()
5147                                 f.close()
5148                                 del f
5149                         except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5150                                 pass
5151                         if not self._aux_cache or \
5152                                 not isinstance(self._aux_cache, dict) or \
5153                                 self._aux_cache.get("version") != self._aux_cache_version or \
5154                                 not self._aux_cache.get("packages"):
5155                                 self._aux_cache = {"version":self._aux_cache_version}
5156                                 self._aux_cache["packages"] = {}
5157                         self._aux_cache["modified"] = False
5158                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5159                 mydir_stat = None
5160                 try:
5161                         mydir_stat = os.stat(mydir)
5162                 except OSError, e:
5163                         if e.errno != errno.ENOENT:
5164                                 raise
5165                         raise KeyError(mycpv)
5166                 mydir_mtime = long(mydir_stat.st_mtime)
5167                 pkg_data = self._aux_cache["packages"].get(mycpv)
5168                 mydata = {}
5169                 cache_valid = False
5170                 if pkg_data:
5171                         cache_mtime, metadata = pkg_data
5172                         cache_valid = cache_mtime == mydir_mtime
5173                         if cache_valid and set(metadata) != self._aux_cache_keys:
5174                                 # Allow self._aux_cache_keys to change without a cache version
5175                                 # bump.
5176                                 cache_valid = False
5177                 if cache_valid:
5178                         mydata.update(metadata)
5179                         pull_me = set(wants).difference(self._aux_cache_keys)
5180                 else:
5181                         pull_me = self._aux_cache_keys.union(wants)
5182                 if pull_me:
5183                         # pull any needed data and cache it
5184                         aux_keys = list(pull_me)
5185                         for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5186                                 mydata[k] = v
5187                         if not cache_valid:
5188                                 cache_data = {}
5189                                 for aux_key in self._aux_cache_keys:
5190                                         cache_data[aux_key] = mydata[aux_key]
5191                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5192                                 self._aux_cache["modified"] = True
5193                 return [mydata[x] for x in wants]
5194
5195         def _aux_get(self, mycpv, wants):
5196                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5197                 if not os.path.isdir(mydir):
5198                         raise KeyError(mycpv)
5199                 results = []
5200                 for x in wants:
5201                         try:
5202                                 myf = open(os.path.join(mydir, x), "r")
5203                                 try:
5204                                         myd = myf.read()
5205                                 finally:
5206                                         myf.close()
5207                                 myd = " ".join(myd.split())
5208                         except IOError:
5209                                 myd = ""
5210                         if x == "EAPI" and not myd:
5211                                 results.append("0")
5212                         else:
5213                                 results.append(myd)
5214                 return results
5215
5216         def aux_update(self, cpv, values):
5217                 cat, pkg = cpv.split("/")
5218                 mylink = dblink(cat, pkg, self.root, self.settings,
5219                 treetype="vartree", vartree=self.vartree)
5220                 if not mylink.exists():
5221                         raise KeyError(cpv)
5222                 for k, v in values.iteritems():
5223                         mylink.setfile(k, v)
5224
5225         def counter_tick(self,myroot,mycpv=None):
5226                 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5227
5228         def get_counter_tick_core(self,myroot,mycpv=None):
5229                 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5230
5231         def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5232                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
5233                 cpath=myroot+"var/cache/edb/counter"
5234                 changed=0
5235                 min_counter = 0
5236                 if mycpv:
5237                         mysplit = pkgsplit(mycpv)
5238                         for x in self.match(mysplit[0],use_cache=0):
5239                                 if x==mycpv:
5240                                         continue
5241                                 try:
5242                                         old_counter = long(self.aux_get(x,["COUNTER"])[0])
5243                                         writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5244                                 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5245                                         old_counter = 0
5246                                         writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5247                                 if old_counter > min_counter:
5248                                         min_counter = old_counter
5249
5250                 # We write our new counter value to a new file that gets moved into
5251                 # place to avoid filesystem corruption.
5252                 find_counter = ("find '%s' -type f -name COUNTER | " + \
5253                         "while read f; do echo $(<\"${f}\"); done | " + \
5254                         "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5255                 if os.path.exists(cpath):
5256                         cfile=open(cpath, "r")
5257                         try:
5258                                 counter=long(cfile.readline())
5259                         except (ValueError,OverflowError):
5260                                 try:
5261                                         counter = long(commands.getoutput(find_counter).strip())
5262                                         writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5263                                                 noiselevel=-1)
5264                                         changed=1
5265                                 except (ValueError,OverflowError):
5266                                         writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5267                                                 noiselevel=-1)
5268                                         writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5269                                                 noiselevel=-1)
5270                                         writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5271                                         sys.exit(2)
5272                         cfile.close()
5273                 else:
5274                         try:
5275                                 counter = long(commands.getoutput(find_counter).strip())
5276                                 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5277                                         noiselevel=-1)
5278                         except ValueError: # Value Error for long(), probably others for commands.getoutput
5279                                 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5280                                 counter=long(0)
5281                         changed=1
5282
5283                 if counter < min_counter:
5284                         counter = min_counter+1000
5285                         changed = 1
5286
5287                 if incrementing or changed:
5288
5289                         #increment counter
5290                         counter += 1
5291                         # update new global counter file
5292                         write_atomic(cpath, str(counter))
5293                 return counter
5294
5295 class vartree(object):
5296         "this tree will scan a var/db/pkg database located at root (passed to init)"
5297         def __init__(self, root="/", virtual=None, clone=None, categories=None,
5298                 settings=None):
5299                 if clone:
5300                         self.root       = clone.root[:]
5301                         self.dbapi      = copy.deepcopy(clone.dbapi)
5302                         self.populated  = 1
5303                         self.settings   = config(clone=clone.settings)
5304                 else:
5305                         self.root       = root[:]
5306                         if settings is None:
5307                                 settings = globals()["settings"]
5308                         self.settings = settings # for key_expand calls
5309                         if categories is None:
5310                                 categories = settings.categories
5311                         self.dbapi = vardbapi(self.root, categories=categories,
5312                                 settings=settings, vartree=self)
5313                         self.populated  = 1
5314
5315         def zap(self,mycpv):
5316                 return
5317
5318         def inject(self,mycpv):
5319                 return
5320
5321         def get_provide(self,mycpv):
5322                 myprovides=[]
5323                 mylines = None
5324                 try:
5325                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5326                         if mylines:
5327                                 myuse = myuse.split()
5328                                 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5329                                 for myprovide in mylines:
5330                                         mys = catpkgsplit(myprovide)
5331                                         if not mys:
5332                                                 mys = myprovide.split("/")
5333                                         myprovides += [mys[0] + "/" + mys[1]]
5334                         return myprovides
5335                 except SystemExit, e:
5336                         raise
5337                 except Exception, e:
5338                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
5339                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5340                                 noiselevel=-1)
5341                         if mylines:
5342                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5343                                         noiselevel=-1)
5344                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5345                         return []
5346
5347         def get_all_provides(self):
5348                 myprovides = {}
5349                 for node in self.getallcpv():
5350                         for mykey in self.get_provide(node):
5351                                 if myprovides.has_key(mykey):
5352                                         myprovides[mykey] += [node]
5353                                 else:
5354                                         myprovides[mykey]  = [node]
5355                 return myprovides
5356
5357         def dep_bestmatch(self,mydep,use_cache=1):
5358                 "compatibility method -- all matches, not just visible ones"
5359                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5360                 mymatch = best(self.dbapi.match(
5361                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5362                         use_cache=use_cache))
5363                 if mymatch is None:
5364                         return ""
5365                 else:
5366                         return mymatch
5367
5368         def dep_match(self,mydep,use_cache=1):
5369                 "compatibility method -- we want to see all matches, not just visible ones"
5370                 #mymatch=match(mydep,self.dbapi)
5371                 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5372                 if mymatch is None:
5373                         return []
5374                 else:
5375                         return mymatch
5376
5377         def exists_specific(self,cpv):
5378                 return self.dbapi.cpv_exists(cpv)
5379
5380         def getallcpv(self):
5381                 """temporary function, probably to be renamed --- Gets a list of all
5382                 category/package-versions installed on the system."""
5383                 return self.dbapi.cpv_all()
5384
5385         def getallnodes(self):
5386                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
5387                 masked package for nodes in this nodes list."""
5388                 return self.dbapi.cp_all()
5389
5390         def exists_specific_cat(self,cpv,use_cache=1):
5391                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5392                         settings=self.settings)
5393                 a=catpkgsplit(cpv)
5394                 if not a:
5395                         return 0
5396                 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5397                 for x in mylist:
5398                         b=pkgsplit(x)
5399                         if not b:
5400                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5401                                 continue
5402                         if a[1]==b[0]:
5403                                 return 1
5404                 return 0
5405
5406         def getebuildpath(self,fullpackage):
5407                 cat,package=fullpackage.split("/")
5408                 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5409
5410         def getnode(self,mykey,use_cache=1):
5411                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5412                         settings=self.settings)
5413                 if not mykey:
5414                         return []
5415                 mysplit=mykey.split("/")
5416                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5417                 returnme=[]
5418                 for x in mydirlist:
5419                         mypsplit=pkgsplit(x)
5420                         if not mypsplit:
5421                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5422                                 continue
5423                         if mypsplit[0]==mysplit[1]:
5424                                 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5425                                 returnme.append(appendme)
5426                 return returnme
5427
5428
5429         def getslot(self,mycatpkg):
5430                 "Get a slot for a catpkg; assume it exists."
5431                 try:
5432                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5433                 except KeyError:
5434                         return ""
5435
5436         def hasnode(self,mykey,use_cache):
5437                 """Does the particular node (cat/pkg key) exist?"""
5438                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5439                         settings=self.settings)
5440                 mysplit=mykey.split("/")
5441                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5442                 for x in mydirlist:
5443                         mypsplit=pkgsplit(x)
5444                         if not mypsplit:
5445                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5446                                 continue
5447                         if mypsplit[0]==mysplit[1]:
5448                                 return 1
5449                 return 0
5450
5451         def populate(self):
5452                 self.populated=1
5453
5454 auxdbkeys=[
5455   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
5456         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
5457         'KEYWORDS',  'INHERITED', 'IUSE',      'CDEPEND',
5458         'PDEPEND',   'PROVIDE', 'EAPI',
5459         'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5460         'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5461         ]
5462 auxdbkeylen=len(auxdbkeys)
5463
5464 def close_portdbapi_caches():
5465         for i in portdbapi.portdbapi_instances:
5466                 i.close_caches()
5467
5468
5469 class portdbapi(dbapi):
5470         """this tree will scan a portage directory located at root (passed to init)"""
5471         portdbapi_instances = []
5472
5473         def __init__(self,porttree_root,mysettings=None):
5474                 portdbapi.portdbapi_instances.append(self)
5475
5476                 if mysettings:
5477                         self.mysettings = mysettings
5478                 else:
5479                         global settings
5480                         self.mysettings = config(clone=settings)
5481
5482                 # This is strictly for use in aux_get() doebuild calls when metadata
5483                 # is generated by the depend phase.  It's safest to use a clone for
5484                 # this purpose because doebuild makes many changes to the config
5485                 # instance that is passed in.
5486                 self.doebuild_settings = config(clone=self.mysettings)
5487
5488                 self.manifestVerifyLevel  = None
5489                 self.manifestVerifier     = None
5490                 self.manifestCache        = {}    # {location: [stat, md5]}
5491                 self.manifestMissingCache = []
5492
5493                 if "gpg" in self.mysettings.features:
5494                         self.manifestVerifyLevel   = portage_gpg.EXISTS
5495                         if "strict" in self.mysettings.features:
5496                                 self.manifestVerifyLevel = portage_gpg.MARGINAL
5497                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5498                         elif "severe" in self.mysettings.features:
5499                                 self.manifestVerifyLevel = portage_gpg.TRUSTED
5500                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5501                         else:
5502                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5503
5504                 #self.root=settings["PORTDIR"]
5505                 self.porttree_root = os.path.realpath(porttree_root)
5506
5507                 self.depcachedir = self.mysettings.depcachedir[:]
5508
5509                 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5510                 if self.tmpfs and not os.path.exists(self.tmpfs):
5511                         self.tmpfs = None
5512                 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5513                         self.tmpfs = None
5514                 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5515                         self.tmpfs = None
5516
5517                 self.eclassdb = eclass_cache.cache(self.porttree_root,
5518                         overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5519
5520                 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5521
5522                 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5523                 self.xcache={}
5524                 self.frozen=0
5525
5526                 self.porttrees = [self.porttree_root] + \
5527                         [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5528                 self.auxdbmodule  = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5529                 self.auxdb        = {}
5530                 self._init_cache_dirs()
5531                 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5532                 # ~harring
5533                 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5534                 if secpass < 1:
5535                         from cache import metadata_overlay, volatile
5536                         for x in self.porttrees:
5537                                 db_ro = self.auxdbmodule(self.depcachedir, x,
5538                                         filtered_auxdbkeys, gid=portage_gid, readonly=True)
5539                                 self.auxdb[x] = metadata_overlay.database(
5540                                         self.depcachedir, x, filtered_auxdbkeys,
5541                                         gid=portage_gid, db_rw=volatile.database,
5542                                         db_ro=db_ro)
5543                 else:
5544                         for x in self.porttrees:
5545                                 # location, label, auxdbkeys
5546                                 self.auxdb[x] = self.auxdbmodule(
5547                                         self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5548                 # Selectively cache metadata in order to optimize dep matching.
5549                 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5550                 self._aux_cache = {}
5551
5552         def _init_cache_dirs(self):
5553                 """Create /var/cache/edb/dep and adjust permissions for the portage
5554                 group."""
5555
5556                 dirmode  = 02070
5557                 filemode =   060
5558                 modemask =    02
5559
5560                 try:
5561                         for mydir in (self.depcachedir,):
5562                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5563                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5564                                                 noiselevel=-1)
5565                                         def onerror(e):
5566                                                 raise # bail out on the first error that occurs during recursion
5567                                         if not apply_recursive_permissions(mydir,
5568                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5569                                                 filemode=filemode, filemask=modemask, onerror=onerror):
5570                                                 raise portage_exception.OperationNotPermitted(
5571                                                         "Failed to apply recursive permissions for the portage group.")
5572                 except portage_exception.PortageException, e:
5573                         pass
5574
5575         def close_caches(self):
5576                 for x in self.auxdb.keys():
5577                         self.auxdb[x].sync()
5578                 self.auxdb.clear()
5579
5580         def flush_cache(self):
5581                 for x in self.auxdb.values():
5582                         x.sync()
5583
5584         def finddigest(self,mycpv):
5585                 try:
5586                         mydig   = self.findname2(mycpv)[0]
5587                         if not mydig:
5588                                 return ""
5589                         mydigs  = mydig.split("/")[:-1]
5590                         mydig   = "/".join(mydigs)
5591                         mysplit = mycpv.split("/")
5592                 except OSError:
5593                         return ""
5594                 return mydig+"/files/digest-"+mysplit[-1]
5595
5596         def findname(self,mycpv):
5597                 return self.findname2(mycpv)[0]
5598
5599         def findname2(self, mycpv, mytree=None):
5600                 """ 
5601                 Returns the location of the CPV, and what overlay it was in.
5602                 Searches overlays first, then PORTDIR; this allows us to return the first
5603                 matching file.  As opposed to starting in portdir and then doing overlays
5604                 second, we would have to exhaustively search the overlays until we found
5605                 the file we wanted.
5606                 """
5607                 if not mycpv:
5608                         return "",0
5609                 mysplit=mycpv.split("/")
5610                 psplit=pkgsplit(mysplit[1])
5611
5612                 if mytree:
5613                         mytrees = [mytree]
5614                 else:
5615                         mytrees = self.porttrees[:]
5616                         mytrees.reverse()
5617                 if psplit:
5618                         for x in mytrees:
5619                                 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5620                                 if os.access(file, os.R_OK):
5621                                         return[file, x]
5622                 return None, 0
5623
5624         def aux_get(self, mycpv, mylist, mytree=None):
5625                 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5626                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5627                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5628                 cache_me = False
5629                 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5630                         aux_cache = self._aux_cache.get(mycpv)
5631                         if aux_cache is not None:
5632                                 return [aux_cache[x] for x in mylist]
5633                         cache_me = True
5634                 global auxdbkeys,auxdbkeylen
5635                 cat,pkg = mycpv.split("/", 1)
5636
5637                 myebuild, mylocation = self.findname2(mycpv, mytree)
5638
5639                 if not myebuild:
5640                         writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5641                                 noiselevel=1)
5642                         writemsg("!!!            %s\n" % myebuild, noiselevel=1)
5643                         raise KeyError(mycpv)
5644
5645                 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5646                 if "gpg" in self.mysettings.features:
5647                         try:
5648                                 mys = portage_gpg.fileStats(myManifestPath)
5649                                 if (myManifestPath in self.manifestCache) and \
5650                                    (self.manifestCache[myManifestPath] == mys):
5651                                         pass
5652                                 elif self.manifestVerifier:
5653                                         if not self.manifestVerifier.verify(myManifestPath):
5654                                                 # Verification failed the desired level.
5655                                                 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5656
5657                                 if ("severe" in self.mysettings.features) and \
5658                                    (mys != portage_gpg.fileStats(myManifestPath)):
5659                                         raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5660
5661                         except portage_exception.InvalidSignature, e:
5662                                 if ("strict" in self.mysettings.features) or \
5663                                    ("severe" in self.mysettings.features):
5664                                         raise
5665                                 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5666                         except portage_exception.MissingSignature, e:
5667                                 if ("severe" in self.mysettings.features):
5668                                         raise
5669                                 if ("strict" in self.mysettings.features):
5670                                         if myManifestPath not in self.manifestMissingCache:
5671                                                 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5672                                                 self.manifestMissingCache.insert(0,myManifestPath)
5673                         except (OSError,portage_exception.FileNotFound), e:
5674                                 if ("strict" in self.mysettings.features) or \
5675                                    ("severe" in self.mysettings.features):
5676                                         raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5677                                 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5678                                         noiselevel=-1)
5679
5680
5681                 if os.access(myebuild, os.R_OK):
5682                         emtime=os.stat(myebuild)[stat.ST_MTIME]
5683                 else:
5684                         writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5685                                 noiselevel=-1)
5686                         writemsg("!!!            %s\n" % myebuild,
5687                                 noiselevel=-1)
5688                         raise KeyError
5689
5690                 try:
5691                         mydata = self.auxdb[mylocation][mycpv]
5692                         if emtime != long(mydata.get("_mtime_", 0)):
5693                                 doregen = True
5694                         elif len(mydata.get("_eclasses_", [])) > 0:
5695                                 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5696                         else:
5697                                 doregen = False
5698                                 
5699                 except KeyError:
5700                         doregen = True
5701                 except CacheError:
5702                         doregen = True
5703                         try:                            del self.auxdb[mylocation][mycpv]
5704                         except KeyError:        pass
5705
5706                 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5707
5708                 if doregen:
5709                         writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5710                         writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5711
5712                         self.doebuild_settings.reset()
5713                         mydata = {}
5714                         myret = doebuild(myebuild, "depend",
5715                                 self.doebuild_settings["ROOT"], self.doebuild_settings,
5716                                 dbkey=mydata, tree="porttree", mydbapi=self)
5717                         if myret != os.EX_OK:
5718                                 raise KeyError(mycpv)
5719
5720                         if "EAPI" not in mydata or not mydata["EAPI"].strip():
5721                                 mydata["EAPI"] = "0"
5722
5723                         if not eapi_is_supported(mydata["EAPI"]):
5724                                 # if newer version, wipe everything and negate eapi
5725                                 eapi = mydata["EAPI"]
5726                                 mydata = {}
5727                                 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5728                                 mydata["EAPI"] = "-"+eapi
5729
5730                         if mydata.get("INHERITED", False):
5731                                 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5732                         else:
5733                                 mydata["_eclasses_"] = {}
5734                         
5735                         del mydata["INHERITED"]
5736
5737                         mydata["_mtime_"] = emtime
5738
5739                         self.auxdb[mylocation][mycpv] = mydata
5740
5741                 if not mydata.setdefault("EAPI", "0"):
5742                         mydata["EAPI"] = "0"
5743
5744                 #finally, we look at our internal cache entry and return the requested data.
5745                 returnme = []
5746                 for x in mylist:
5747                         if x == "INHERITED":
5748                                 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5749                         else:
5750                                 returnme.append(mydata.get(x,""))
5751
5752                 if cache_me:
5753                         aux_cache = {}
5754                         for x in self._aux_cache_keys:
5755                                 aux_cache[x] = mydata.get(x, "")
5756                         self._aux_cache[mycpv] = aux_cache
5757
5758                 return returnme
5759
5760         def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5761                 if mysettings is None:
5762                         mysettings = self.mysettings
5763                 try:
5764                         myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5765                 except KeyError:
5766                         print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5767                         sys.exit(1)
5768
5769                 if useflags is None:
5770                         useflags = mysettings["USE"].split()
5771
5772                 myurilist = portage_dep.paren_reduce(myuris)
5773                 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5774                 newuris = flatten(myurilist)
5775
5776                 myfiles = []
5777                 for x in newuris:
5778                         mya = os.path.basename(x)
5779                         if not mya in myfiles:
5780                                 myfiles.append(mya)
5781                 return [newuris, myfiles]
5782
5783         def getfetchsizes(self,mypkg,useflags=None,debug=0):
5784                 # returns a filename:size dictionnary of remaining downloads
5785                 myebuild = self.findname(mypkg)
5786                 pkgdir = os.path.dirname(myebuild)
5787                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5788                 checksums = mf.getDigests()
5789                 if not checksums:
5790                         if debug: print "[empty/missing/bad digest]: "+mypkg
5791                         return None
5792                 filesdict={}
5793                 if useflags is None:
5794                         myuris, myfiles = self.getfetchlist(mypkg,all=1)
5795                 else:
5796                         myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
5797                 #XXX: maybe this should be improved: take partial downloads
5798                 # into account? check checksums?
5799                 for myfile in myfiles:
5800                         if myfile not in checksums:
5801                                 if debug:
5802                                         writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
5803                                 continue
5804                         file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
5805                         mystat = None
5806                         try:
5807                                 mystat = os.stat(file_path)
5808                         except OSError, e:
5809                                 pass
5810                         if mystat is None:
5811                                 existing_size = 0
5812                         else:
5813                                 existing_size = mystat.st_size
5814                         remaining_size = int(checksums[myfile]["size"]) - existing_size
5815                         if remaining_size > 0:
5816                                 # Assume the download is resumable.
5817                                 filesdict[myfile] = remaining_size
5818                         elif remaining_size < 0:
5819                                 # The existing file is too large and therefore corrupt.
5820                                 filesdict[myfile] = int(checksums[myfile]["size"])
5821                 return filesdict
5822
5823         def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5824                 if not useflags:
5825                         if mysettings:
5826                                 useflags = mysettings["USE"].split()
5827                 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5828                 myebuild = self.findname(mypkg)
5829                 pkgdir = os.path.dirname(myebuild)
5830                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5831                 mysums = mf.getDigests()
5832
5833                 failures = {}
5834                 for x in myfiles:
5835                         if not mysums or x not in mysums:
5836                                 ok     = False
5837                                 reason = "digest missing"
5838                         else:
5839                                 try:
5840                                         ok, reason = portage_checksum.verify_all(
5841                                                 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
5842                                 except portage_exception.FileNotFound, e:
5843                                         ok = False
5844                                         reason = "File Not Found: '%s'" % str(e)
5845                         if not ok:
5846                                 failures[x] = reason
5847                 if failures:
5848                         return False
5849                 return True
5850
5851         def getsize(self,mypkg,useflags=None,debug=0):
5852                 # returns the total size of remaining downloads
5853                 #
5854                 # we use getfetchsizes() now, so this function would be obsoleted
5855                 #
5856                 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5857                 if filesdict is None:
5858                         return "[empty/missing/bad digest]"
5859                 mysize=0
5860                 for myfile in filesdict.keys():
5861                         mysum+=filesdict[myfile]
5862                 return mysum
5863
5864         def cpv_exists(self,mykey):
5865                 "Tells us whether an actual ebuild exists on disk (no masking)"
5866                 cps2=mykey.split("/")
5867                 cps=catpkgsplit(mykey,silent=0)
5868                 if not cps:
5869                         #invalid cat/pkg-v
5870                         return 0
5871                 if self.findname(cps[0]+"/"+cps2[1]):
5872                         return 1
5873                 else:
5874                         return 0
5875
5876         def cp_all(self):
5877                 "returns a list of all keys in our tree"
5878                 d={}
5879                 for x in self.mysettings.categories:
5880                         for oroot in self.porttrees:
5881                                 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
5882                                         d[x+"/"+y] = None
5883                 l = d.keys()
5884                 l.sort()
5885                 return l
5886
5887         def p_list(self,mycp):
5888                 d={}
5889                 for oroot in self.porttrees:
5890                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5891                                 if x[-7:]==".ebuild":
5892                                         d[x[:-7]] = None
5893                 return d.keys()
5894
5895         def cp_list(self, mycp, use_cache=1, mytree=None):
5896                 mysplit=mycp.split("/")
5897                 d={}
5898                 if mytree:
5899                         mytrees = [mytree]
5900                 else:
5901                         mytrees = self.porttrees
5902                 for oroot in mytrees:
5903                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5904                                 if x[-7:]==".ebuild":
5905                                         d[mysplit[0]+"/"+x[:-7]] = None
5906                 return d.keys()
5907
5908         def freeze(self):
5909                 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
5910                         self.xcache[x]={}
5911                 self.frozen=1
5912
5913         def melt(self):
5914                 self.xcache={}
5915                 self.frozen=0
5916
5917         def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
5918                 "caching match function; very trick stuff"
5919                 #if no updates are being made to the tree, we can consult our xcache...
5920                 if self.frozen:
5921                         try:
5922                                 return self.xcache[level][origdep][:]
5923                         except KeyError:
5924                                 pass
5925
5926                 if not mydep:
5927                         #this stuff only runs on first call of xmatch()
5928                         #create mydep, mykey from origdep
5929                         mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
5930                         mykey=dep_getkey(mydep)
5931
5932                 if level=="list-visible":
5933                         #a list of all visible packages, not called directly (just by xmatch())
5934                         #myval=self.visible(self.cp_list(mykey))
5935                         myval=self.gvisible(self.visible(self.cp_list(mykey)))
5936                 elif level=="bestmatch-visible":
5937                         #dep match -- best match of all visible packages
5938                         myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
5939                         #get all visible matches (from xmatch()), then choose the best one
5940                 elif level=="bestmatch-list":
5941                         #dep match -- find best match but restrict search to sublist
5942                         myval=best(match_from_list(mydep,mylist))
5943                         #no point is calling xmatch again since we're not caching list deps
5944                 elif level=="match-list":
5945                         #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
5946                         myval=match_from_list(mydep,mylist)
5947                 elif level=="match-visible":
5948                         #dep match -- find all visible matches
5949                         myval = match_from_list(mydep,
5950                                 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
5951                         #get all visible packages, then get the matching ones
5952                 elif level=="match-all":
5953                         #match *all* visible *and* masked packages
5954                         myval=match_from_list(mydep,self.cp_list(mykey))
5955                 else:
5956                         print "ERROR: xmatch doesn't handle",level,"query!"
5957                         raise KeyError
5958                 myslot = portage_dep.dep_getslot(mydep)
5959                 if myslot is not None:
5960                         myval = [cpv for cpv in myval \
5961                                 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5962                 if self.frozen and (level not in ["match-list","bestmatch-list"]):
5963                         self.xcache[level][mydep]=myval
5964                         if origdep and origdep != mydep:
5965                                 self.xcache[level][origdep] = myval
5966                 return myval[:]
5967
5968         def match(self,mydep,use_cache=1):
5969                 return self.xmatch("match-visible",mydep)
5970
5971         def visible(self,mylist):
5972                 """two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
5973                 packages file to remove invisible entries, returning remaining items.  This function assumes
5974                 that all entries in mylist have the same category and package name."""
5975                 if (mylist is None) or (len(mylist)==0):
5976                         return []
5977                 newlist=mylist[:]
5978                 #first, we mask out packages in the package.mask file
5979                 mykey=newlist[0]
5980                 cpv=catpkgsplit(mykey)
5981                 if not cpv:
5982                         #invalid cat/pkg-v
5983                         print "visible(): invalid cat/pkg-v:",mykey
5984                         return []
5985                 mycp=cpv[0]+"/"+cpv[1]
5986                 maskdict=self.mysettings.pmaskdict
5987                 unmaskdict=self.mysettings.punmaskdict
5988                 if maskdict.has_key(mycp):
5989                         for x in maskdict[mycp]:
5990                                 mymatches=self.xmatch("match-all",x)
5991                                 if mymatches is None:
5992                                         #error in package.mask file; print warning and continue:
5993                                         print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
5994                                         continue
5995                                 for y in mymatches:
5996                                         unmask=0
5997                                         if unmaskdict.has_key(mycp):
5998                                                 for z in unmaskdict[mycp]:
5999                                                         mymatches_unmask=self.xmatch("match-all",z)
6000                                                         if y in mymatches_unmask:
6001                                                                 unmask=1
6002                                                                 break
6003                                         if unmask==0:
6004                                                 try:
6005                                                         newlist.remove(y)
6006                                                 except ValueError:
6007                                                         pass
6008
6009                 revmaskdict=self.mysettings.prevmaskdict
6010                 if revmaskdict.has_key(mycp):
6011                         for x in revmaskdict[mycp]:
6012                                 #important: only match against the still-unmasked entries...
6013                                 #notice how we pass "newlist" to the xmatch() call below....
6014                                 #Without this, ~ deps in the packages files are broken.
6015                                 mymatches=self.xmatch("match-list",x,mylist=newlist)
6016                                 if mymatches is None:
6017                                         #error in packages file; print warning and continue:
6018                                         print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6019                                         continue
6020                                 pos=0
6021                                 while pos<len(newlist):
6022                                         if newlist[pos] not in mymatches:
6023                                                 del newlist[pos]
6024                                         else:
6025                                                 pos += 1
6026                 return newlist
6027
6028         def gvisible(self,mylist):
6029                 "strip out group-masked (not in current group) entries"
6030
6031                 if mylist is None:
6032                         return []
6033                 newlist=[]
6034
6035                 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6036                 pkgdict = self.mysettings.pkeywordsdict
6037                 for mycpv in mylist:
6038                         try:
6039                                 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6040                         except KeyError:
6041                                 continue
6042                         except portage_exception.PortageException, e:
6043                                 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6044                                         mycpv, noiselevel=-1)
6045                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6046                                 del e
6047                                 continue
6048                         mygroups=keys.split()
6049                         # Repoman may modify this attribute as necessary.
6050                         pgroups = accept_keywords[:]
6051                         match=0
6052                         cp = dep_getkey(mycpv)
6053                         if pkgdict.has_key(cp):
6054                                 matches = match_to_list(mycpv, pkgdict[cp].keys())
6055                                 for atom in matches:
6056                                         pgroups.extend(pkgdict[cp][atom])
6057                                 if matches:
6058                                         inc_pgroups = []
6059                                         for x in pgroups:
6060                                                 if x != "-*" and x.startswith("-"):
6061                                                         try:
6062                                                                 inc_pgroups.remove(x[1:])
6063                                                         except ValueError:
6064                                                                 pass
6065                                                 if x not in inc_pgroups:
6066                                                         inc_pgroups.append(x)
6067                                         pgroups = inc_pgroups
6068                                         del inc_pgroups
6069                         hasstable = False
6070                         hastesting = False
6071                         for gp in mygroups:
6072                                 if gp=="*":
6073                                         writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6074                                                 noiselevel=-1)
6075                                         match=1
6076                                         break
6077                                 elif gp in pgroups:
6078                                         match=1
6079                                         break
6080                                 elif gp[0] == "~":
6081                                         hastesting = True
6082                                 elif gp[0] != "-":
6083                                         hasstable = True
6084                         if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
6085                                 match=1
6086                         if match and eapi_is_supported(eapi):
6087                                 newlist.append(mycpv)
6088                 return newlist
6089
6090 class binarytree(object):
6091         "this tree scans for a list of all packages available in PKGDIR"
6092         def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6093                 if clone:
6094                         # XXX This isn't cloning. It's an instance of the same thing.
6095                         self.root=clone.root
6096                         self.pkgdir=clone.pkgdir
6097                         self.dbapi=clone.dbapi
6098                         self.populated=clone.populated
6099                         self.tree=clone.tree
6100                         self.remotepkgs=clone.remotepkgs
6101                         self.invalids=clone.invalids
6102                         self.settings = clone.settings
6103                 else:
6104                         self.root=root
6105                         #self.pkgdir=settings["PKGDIR"]
6106                         self.pkgdir = normalize_path(pkgdir)
6107                         self.dbapi = bindbapi(self, settings=settings)
6108                         self.populated=0
6109                         self.tree={}
6110                         self.remotepkgs={}
6111                         self.invalids=[]
6112                         self.settings = settings
6113                         self._pkg_paths = {}
6114
6115         def move_ent(self,mylist):
6116                 if not self.populated:
6117                         self.populate()
6118                 origcp=mylist[1]
6119                 newcp=mylist[2]
6120                 # sanity check
6121                 for cp in [origcp,newcp]:
6122                         if not (isvalidatom(cp) and isjustname(cp)):
6123                                 raise portage_exception.InvalidPackageName(cp)
6124                 origcat = origcp.split("/")[0]
6125                 mynewcat=newcp.split("/")[0]
6126                 origmatches=self.dbapi.cp_list(origcp)
6127                 if not origmatches:
6128                         return
6129                 for mycpv in origmatches:
6130
6131                         mycpsplit=catpkgsplit(mycpv)
6132                         mynewcpv=newcp+"-"+mycpsplit[2]
6133                         if mycpsplit[3]!="r0":
6134                                 mynewcpv += "-"+mycpsplit[3]
6135                         myoldpkg=mycpv.split("/")[1]
6136                         mynewpkg=mynewcpv.split("/")[1]
6137
6138                         if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6139                                 writemsg("!!! Cannot update binary: Destination exists.\n",
6140                                         noiselevel=-1)
6141                                 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6142                                 continue
6143
6144                         tbz2path=self.getname(mycpv)
6145                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6146                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6147                                         noiselevel=-1)
6148                                 continue
6149
6150                         #print ">>> Updating data in:",mycpv
6151                         writemsg_stdout("%")
6152                         mytbz2 = xpak.tbz2(tbz2path)
6153                         mydata = mytbz2.get_data()
6154                         updated_items = update_dbentries([mylist], mydata)
6155                         mydata.update(updated_items)
6156                         mydata["CATEGORY"] = mynewcat+"\n"
6157                         if mynewpkg != myoldpkg:
6158                                 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6159                                 del mydata[myoldpkg+".ebuild"]
6160                                 mydata["PF"] = mynewpkg + "\n"
6161                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6162
6163                         self.dbapi.cpv_remove(mycpv)
6164                         del self._pkg_paths[mycpv]
6165                         new_path = self.getname(mynewcpv)
6166                         self._pkg_paths[mynewcpv] = os.path.join(
6167                                 *new_path.split(os.path.sep)[-2:])
6168                         if new_path != mytbz2:
6169                                 try:
6170                                         os.makedirs(os.path.dirname(new_path))
6171                                 except OSError, e:
6172                                         if e.errno != errno.EEXIST:
6173                                                 raise
6174                                         del e
6175                                 os.rename(tbz2path, new_path)
6176                                 self._remove_symlink(mycpv)
6177                                 if new_path.split(os.path.sep)[-2] == "All":
6178                                         self._create_symlink(mynewcpv)
6179                         self.dbapi.cpv_inject(mynewcpv)
6180
6181                 return 1
6182
6183         def _remove_symlink(self, cpv):
6184                 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6185                 the ${PKGDIR}/${CATEGORY} directory if empty.  The file will not be
6186                 removed if os.path.islink() returns False."""
6187                 mycat, mypkg = catsplit(cpv)
6188                 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6189                 if os.path.islink(mylink):
6190                         """Only remove it if it's really a link so that this method never
6191                         removes a real package that was placed here to avoid a collision."""
6192                         os.unlink(mylink)
6193                 try:
6194                         os.rmdir(os.path.join(self.pkgdir, mycat))
6195                 except OSError, e:
6196                         if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
6197                                 raise
6198                         del e
6199
6200         def _create_symlink(self, cpv):
6201                 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6202                 ${PKGDIR}/${CATEGORY} directory, if necessary).  Any file that may
6203                 exist in the location of the symlink will first be removed."""
6204                 mycat, mypkg = catsplit(cpv)
6205                 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6206                 try:
6207                         os.makedirs(os.path.dirname(full_path))
6208                 except OSError, e:
6209                         if e.errno != errno.EEXIST:
6210                                 raise
6211                         del e
6212                 try:
6213                         os.unlink(full_path)
6214                 except OSError, e:
6215                         if e.errno != errno.ENOENT:
6216                                 raise
6217                         del e
6218                 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6219
6220         def move_slot_ent(self, mylist):
6221                 if not self.populated:
6222                         self.populate()
6223                 pkg=mylist[1]
6224                 origslot=mylist[2]
6225                 newslot=mylist[3]
6226                 
6227                 if not isvalidatom(pkg):
6228                         raise portage_exception.InvalidAtom(pkg)
6229                 
6230                 origmatches=self.dbapi.match(pkg)
6231                 if not origmatches:
6232                         return
6233                 for mycpv in origmatches:
6234                         mycpsplit=catpkgsplit(mycpv)
6235                         myoldpkg=mycpv.split("/")[1]
6236                         tbz2path=self.getname(mycpv)
6237                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6238                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6239                                         noiselevel=-1)
6240                                 continue
6241
6242                         #print ">>> Updating data in:",mycpv
6243                         mytbz2 = xpak.tbz2(tbz2path)
6244                         mydata = mytbz2.get_data()
6245
6246                         slot = mydata["SLOT"]
6247                         if (not slot):
6248                                 continue
6249
6250                         if (slot[0]!=origslot):
6251                                 continue
6252
6253                         writemsg_stdout("S")
6254                         mydata["SLOT"] = newslot+"\n"
6255                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6256                 return 1
6257
6258         def update_ents(self, update_iter):
6259                 if len(update_iter) == 0:
6260                         return
6261                 if not self.populated:
6262                         self.populate()
6263
6264                 for mycpv in self.dbapi.cp_all():
6265                         tbz2path=self.getname(mycpv)
6266                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6267                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6268                                         noiselevel=-1)
6269                                 continue
6270                         #print ">>> Updating binary data:",mycpv
6271                         writemsg_stdout("*")
6272                         mytbz2 = xpak.tbz2(tbz2path)
6273                         mydata = mytbz2.get_data()
6274                         updated_items = update_dbentries(update_iter, mydata)
6275                         if len(updated_items) > 0:
6276                                 mydata.update(updated_items)
6277                                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6278                 return 1
6279
6280         def prevent_collision(self, cpv):
6281                 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6282                 use for a given cpv.  If a collision will occur with an existing
6283                 package from another category, the existing package will be bumped to
6284                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6285                 full_path = self.getname(cpv)
6286                 if "All" == full_path.split(os.path.sep)[-2]:
6287                         return
6288                 """Move a colliding package if it exists.  Code below this point only
6289                 executes in rare cases."""
6290                 mycat, mypkg = catsplit(cpv)
6291                 myfile = mypkg + ".tbz2"
6292                 mypath = os.path.join("All", myfile)
6293                 dest_path = os.path.join(self.pkgdir, mypath)
6294                 if os.path.exists(dest_path):
6295                         # For invalid packages, other_cat could be None.
6296                         other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6297                         if other_cat:
6298                                 other_cat = other_cat.strip()
6299                                 self._move_from_all(other_cat + "/" + mypkg)
6300                 """The file may or may not exist. Move it if necessary and update
6301                 internal state for future calls to getname()."""
6302                 self._move_to_all(cpv)
6303
6304         def _move_to_all(self, cpv):
6305                 """If the file exists, move it.  Whether or not it exists, update state
6306                 for future getname() calls."""
6307                 mycat , mypkg = catsplit(cpv)
6308                 myfile = mypkg + ".tbz2"
6309                 src_path = os.path.join(self.pkgdir, mycat, myfile)
6310                 try:
6311                         mystat = os.lstat(src_path)
6312                 except OSError, e:
6313                         mystat = None
6314                 if mystat and stat.S_ISREG(mystat.st_mode):
6315                         try:
6316                                 os.makedirs(os.path.join(self.pkgdir, "All"))
6317                         except OSError, e:
6318                                 if e.errno != errno.EEXIST:
6319                                         raise
6320                                 del e
6321                         os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6322                         self._create_symlink(cpv)
6323                 self._pkg_paths[cpv] = os.path.join("All", myfile)
6324
6325         def _move_from_all(self, cpv):
6326                 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6327                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6328                 self._remove_symlink(cpv)
6329                 mycat , mypkg = catsplit(cpv)
6330                 myfile = mypkg + ".tbz2"
6331                 mypath = os.path.join(mycat, myfile)
6332                 dest_path = os.path.join(self.pkgdir, mypath)
6333                 try:
6334                         os.makedirs(os.path.dirname(dest_path))
6335                 except OSError, e:
6336                         if e.errno != errno.EEXIST:
6337                                 raise
6338                         del e
6339                 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6340                 self._pkg_paths[cpv] = mypath
6341
6342         def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6343                 "populates the binarytree"
6344                 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6345                         return 0
6346                 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
6347                         return 0
6348
6349                 if not getbinpkgsonly:
6350                         pkg_paths = {}
6351                         dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6352                         if "All" in dirs:
6353                                 dirs.remove("All")
6354                         dirs.sort()
6355                         dirs.insert(0, "All")
6356                         for mydir in dirs:
6357                                 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6358                                         if not myfile.endswith(".tbz2"):
6359                                                 continue
6360                                         mypath = os.path.join(mydir, myfile)
6361                                         full_path = os.path.join(self.pkgdir, mypath)
6362                                         if os.path.islink(full_path):
6363                                                 continue
6364                                         mytbz2 = xpak.tbz2(full_path)
6365                                         # For invalid packages, mycat could be None.
6366                                         mycat = mytbz2.getfile("CATEGORY")
6367                                         mypkg = myfile[:-5]
6368                                         if not mycat:
6369                                                 #old-style or corrupt package
6370                                                 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6371                                                         noiselevel=-1)
6372                                                 writemsg("!!! This binary package is not " + \
6373                                                         "recoverable and should be deleted.\n",
6374                                                         noiselevel=-1)
6375                                                 self.invalids.append(mypkg)
6376                                                 continue
6377                                         mycat = mycat.strip()
6378                                         if mycat != mydir and mydir != "All":
6379                                                 continue
6380                                         if mypkg != mytbz2.getfile("PF").strip():
6381                                                 continue
6382                                         mycpv = mycat + "/" + mypkg
6383                                         if mycpv in pkg_paths:
6384                                                 # All is first, so it's preferred.
6385                                                 continue
6386                                         pkg_paths[mycpv] = mypath
6387                                         self.dbapi.cpv_inject(mycpv)
6388                         self._pkg_paths = pkg_paths
6389
6390                 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6391                         writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6392                                 noiselevel=-1)
6393
6394                 if getbinpkgs and \
6395                         self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6396                         try:
6397                                 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6398                                 if chunk_size < 8:
6399                                         chunk_size = 8
6400                         except (ValueError, KeyError):
6401                                 chunk_size = 3000
6402
6403                         writemsg(green("Fetching binary packages info...\n"))
6404                         self.remotepkgs = getbinpkg.dir_get_metadata(
6405                                 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6406                         writemsg(green("  -- DONE!\n\n"))
6407
6408                         for mypkg in self.remotepkgs.keys():
6409                                 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6410                                         #old-style or corrupt package
6411                                         writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6412                                                 noiselevel=-1)
6413                                         del self.remotepkgs[mypkg]
6414                                         continue
6415                                 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6416                                 fullpkg=mycat+"/"+mypkg[:-5]
6417                                 mykey=dep_getkey(fullpkg)
6418                                 try:
6419                                         # invalid tbz2's can hurt things.
6420                                         #print "cpv_inject("+str(fullpkg)+")"
6421                                         self.dbapi.cpv_inject(fullpkg)
6422                                         #print "  -- Injected"
6423                                 except SystemExit, e:
6424                                         raise
6425                                 except:
6426                                         writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6427                                                 noiselevel=-1)
6428                                         del self.remotepkgs[mypkg]
6429                                         continue
6430                 self.populated=1
6431
6432         def inject(self,cpv):
6433                 return self.dbapi.cpv_inject(cpv)
6434
6435         def exists_specific(self,cpv):
6436                 if not self.populated:
6437                         self.populate()
6438                 return self.dbapi.match(
6439                         dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6440
6441         def dep_bestmatch(self,mydep):
6442                 "compatibility method -- all matches, not just visible ones"
6443                 if not self.populated:
6444                         self.populate()
6445                 writemsg("\n\n", 1)
6446                 writemsg("mydep: %s\n" % mydep, 1)
6447                 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6448                 writemsg("mydep: %s\n" % mydep, 1)
6449                 mykey=dep_getkey(mydep)
6450                 writemsg("mykey: %s\n" % mykey, 1)
6451                 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6452                 writemsg("mymatch: %s\n" % mymatch, 1)
6453                 if mymatch is None:
6454                         return ""
6455                 return mymatch
6456
6457         def getname(self,pkgname):
6458                 """Returns a file location for this package.  The default location is
6459                 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6460                 in the rare event of a collision.  The prevent_collision() method can
6461                 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6462                 specific cpv."""
6463                 if not self.populated:
6464                         self.populate()
6465                 mycpv = pkgname
6466                 mypath = self._pkg_paths.get(mycpv, None)
6467                 if mypath:
6468                         return os.path.join(self.pkgdir, mypath)
6469                 mycat, mypkg = catsplit(mycpv)
6470                 mypath = os.path.join("All", mypkg + ".tbz2")
6471                 if mypath in self._pkg_paths.values():
6472                         mypath = os.path.join(mycat, mypkg + ".tbz2")
6473                 self._pkg_paths[mycpv] = mypath # cache for future lookups
6474                 return os.path.join(self.pkgdir, mypath)
6475
6476         def isremote(self,pkgname):
6477                 "Returns true if the package is kept remotely."
6478                 mysplit=pkgname.split("/")
6479                 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6480                 return remote
6481
6482         def get_use(self,pkgname):
6483                 mysplit=pkgname.split("/")
6484                 if self.isremote(pkgname):
6485                         return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6486                 tbz2=xpak.tbz2(self.getname(pkgname))
6487                 return tbz2.getfile("USE").split()
6488
6489         def gettbz2(self,pkgname):
6490                 "fetches the package from a remote site, if necessary."
6491                 print "Fetching '"+str(pkgname)+"'"
6492                 mysplit  = pkgname.split("/")
6493                 tbz2name = mysplit[1]+".tbz2"
6494                 if not self.isremote(pkgname):
6495                         if (tbz2name not in self.invalids):
6496                                 return
6497                         else:
6498                                 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6499                                         noiselevel=-1)
6500                 mydest = self.pkgdir+"/All/"
6501                 try:
6502                         os.makedirs(mydest, 0775)
6503                 except (OSError, IOError):
6504                         pass
6505                 return getbinpkg.file_get(
6506                         self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6507                         mydest, fcmd=self.settings["RESUMECOMMAND"])
6508
6509         def getslot(self,mycatpkg):
6510                 "Get a slot for a catpkg; assume it exists."
6511                 myslot = ""
6512                 try:
6513                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6514                 except SystemExit, e:
6515                         raise
6516                 except Exception, e:
6517                         pass
6518                 return myslot
6519
6520 class dblink:
6521         """
6522         This class provides an interface to the installed package database
6523         At present this is implemented as a text backend in /var/db/pkg.
6524         """
6525         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6526                 vartree=None):
6527                 """
6528                 Creates a DBlink object for a given CPV.
6529                 The given CPV may not be present in the database already.
6530                 
6531                 @param cat: Category
6532                 @type cat: String
6533                 @param pkg: Package (PV)
6534                 @type pkg: String
6535                 @param myroot: Typically ${ROOT}
6536                 @type myroot: String (Path)
6537                 @param mysettings: Typically portage.config
6538                 @type mysettings: An instance of portage.config
6539                 @param treetype: one of ['porttree','bintree','vartree']
6540                 @type treetype: String
6541                 @param vartree: an instance of vartree corresponding to myroot.
6542                 @type vartree: vartree
6543                 """
6544                 
6545                 self.cat     = cat
6546                 self.pkg     = pkg
6547                 self.mycpv   = self.cat+"/"+self.pkg
6548                 self.mysplit = pkgsplit(self.mycpv)
6549                 self.treetype = treetype
6550                 if vartree is None:
6551                         global db
6552                         vartree = db[myroot]["vartree"]
6553                 self.vartree = vartree
6554
6555                 self.dbroot   = normalize_path(os.path.join(myroot, VDB_PATH))
6556                 self.dbcatdir = self.dbroot+"/"+cat
6557                 self.dbpkgdir = self.dbcatdir+"/"+pkg
6558                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6559                 self.dbdir    = self.dbpkgdir
6560
6561                 self._lock_vdb = None
6562
6563                 self.settings = mysettings
6564                 if self.settings==1:
6565                         raise ValueError
6566
6567                 self.myroot=myroot
6568                 protect_obj = portage_util.ConfigProtect(myroot,
6569                         mysettings.get("CONFIG_PROTECT","").split(),
6570                         mysettings.get("CONFIG_PROTECT_MASK","").split())
6571                 self.updateprotect = protect_obj.updateprotect
6572                 self._config_protect = protect_obj
6573                 self._installed_instance = None
6574                 self.contentscache=[]
6575                 self._contents_inodes = None
6576
6577         def lockdb(self):
6578                 if self._lock_vdb:
6579                         raise AssertionError("Lock already held.")
6580                 # At least the parent needs to exist for the lock file.
6581                 portage_util.ensure_dirs(self.dbroot)
6582                 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6583
6584         def unlockdb(self):
6585                 if self._lock_vdb:
6586                         portage_locks.unlockdir(self._lock_vdb)
6587                         self._lock_vdb = None
6588
6589         def getpath(self):
6590                 "return path to location of db information (for >>> informational display)"
6591                 return self.dbdir
6592
6593         def exists(self):
6594                 "does the db entry exist?  boolean."
6595                 return os.path.exists(self.dbdir)
6596
6597         def create(self):
6598                 "create the skeleton db directory structure.  No contents, virtuals, provides or anything.  Also will create /var/db/pkg if necessary."
6599                 """
6600                 This function should never get called (there is no reason to use it).
6601                 """
6602                 # XXXXX Delete this eventually
6603                 raise Exception, "This is bad. Don't use it."
6604                 if not os.path.exists(self.dbdir):
6605                         os.makedirs(self.dbdir)
6606
6607         def delete(self):
6608                 """
6609                 Remove this entry from the database
6610                 """
6611                 if not os.path.exists(self.dbdir):
6612                         return
6613                 try:
6614                         for x in listdir(self.dbdir):
6615                                 os.unlink(self.dbdir+"/"+x)
6616                         os.rmdir(self.dbdir)
6617                 except OSError, e:
6618                         print "!!! Unable to remove db entry for this package."
6619                         print "!!! It is possible that a directory is in this one. Portage will still"
6620                         print "!!! register this package as installed as long as this directory exists."
6621                         print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6622                         print "!!! "+str(e)
6623                         print
6624                         sys.exit(1)
6625
6626         def clearcontents(self):
6627                 """
6628                 For a given db entry (self), erase the CONTENTS values.
6629                 """
6630                 if os.path.exists(self.dbdir+"/CONTENTS"):
6631                         os.unlink(self.dbdir+"/CONTENTS")
6632
6633         def getcontents(self):
6634                 """
6635                 Get the installed files of a given package (aka what that package installed)
6636                 """
6637                 if not os.path.exists(self.dbdir+"/CONTENTS"):
6638                         return None
6639                 if self.contentscache != []:
6640                         return self.contentscache
6641                 pkgfiles={}
6642                 myc=open(self.dbdir+"/CONTENTS","r")
6643                 mylines=myc.readlines()
6644                 myc.close()
6645                 pos=1
6646                 for line in mylines:
6647                         mydat = line.split()
6648                         # we do this so we can remove from non-root filesystems
6649                         # (use the ROOT var to allow maintenance on other partitions)
6650                         try:
6651                                 mydat[1] = normalize_path(os.path.join(
6652                                         self.myroot, mydat[1].lstrip(os.path.sep)))
6653                                 if mydat[0]=="obj":
6654                                         #format: type, mtime, md5sum
6655                                         pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6656                                 elif mydat[0]=="dir":
6657                                         #format: type
6658                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6659                                 elif mydat[0]=="sym":
6660                                         #format: type, mtime, dest
6661                                         x=len(mydat)-1
6662                                         if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6663                                                 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6664                                                 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6665                                                 x=len(mydat)-1
6666                                         splitter=-1
6667                                         while(x>=0):
6668                                                 if mydat[x]=="->":
6669                                                         splitter=x
6670                                                         break
6671                                                 x=x-1
6672                                         if splitter==-1:
6673                                                 return None
6674                                         pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6675                                 elif mydat[0]=="dev":
6676                                         #format: type
6677                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6678                                 elif mydat[0]=="fif":
6679                                         #format: type
6680                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6681                                 else:
6682                                         return None
6683                         except (KeyError,IndexError):
6684                                 print "portage: CONTENTS line",pos,"corrupt!"
6685                         pos += 1
6686                 self.contentscache=pkgfiles
6687                 return pkgfiles
6688
6689         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6690                 ldpath_mtimes=None):
6691                 """
6692                 Calls prerm
6693                 Unmerges a given package (CPV)
6694                 calls postrm
6695                 calls cleanrm
6696                 calls env_update
6697                 
6698                 @param pkgfiles: files to unmerge (generally self.getcontents() )
6699                 @type pkgfiles: Dictionary
6700                 @param trimworld: Remove CPV from world file if True, not if False
6701                 @type trimworld: Boolean
6702                 @param cleanup: cleanup to pass to doebuild (see doebuild)
6703                 @type cleanup: Boolean
6704                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6705                 @type ldpath_mtimes: Dictionary
6706                 @rtype: Integer
6707                 @returns:
6708                 1. os.EX_OK if everything went well.
6709                 2. return code of the failed phase (for prerm, postrm, cleanrm)
6710                 
6711                 Notes:
6712                 The caller must ensure that lockdb() and unlockdb() are called
6713                 before and after this method.
6714                 """
6715
6716                 contents = self.getcontents()
6717                 # Now, don't assume that the name of the ebuild is the same as the
6718                 # name of the dir; the package may have been moved.
6719                 myebuildpath = None
6720                 mystuff = listdir(self.dbdir, EmptyOnError=1)
6721                 for x in mystuff:
6722                         if x.endswith(".ebuild"):
6723                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6724                                 if x[:-7] != self.pkg:
6725                                         # Clean up after vardbapi.move_ent() breakage in
6726                                         # portage versions before 2.1.2
6727                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
6728                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6729                                 break
6730
6731                 self.settings.load_infodir(self.dbdir)
6732                 if myebuildpath:
6733                         doebuild_environment(myebuildpath, "prerm", self.myroot,
6734                                 self.settings, 0, 0, self.vartree.dbapi)
6735                         catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6736                         portage_util.ensure_dirs(os.path.dirname(catdir),
6737                                 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6738                 builddir_lock = None
6739                 catdir_lock = None
6740                 try:
6741                         if myebuildpath:
6742                                 catdir_lock = portage_locks.lockdir(catdir)
6743                                 portage_util.ensure_dirs(catdir,
6744                                         uid=portage_uid, gid=portage_gid,
6745                                         mode=070, mask=0)
6746                                 builddir_lock = portage_locks.lockdir(
6747                                         self.settings["PORTAGE_BUILDDIR"])
6748                                 try:
6749                                         portage_locks.unlockdir(catdir_lock)
6750                                 finally:
6751                                         catdir_lock = None
6752                                 # Eventually, we'd like to pass in the saved ebuild env here...
6753                                 retval = doebuild(myebuildpath, "prerm", self.myroot,
6754                                         self.settings, cleanup=cleanup, use_cache=0,
6755                                         mydbapi=self.vartree.dbapi, tree="vartree",
6756                                         vartree=self.vartree)
6757                                 # XXX: Decide how to handle failures here.
6758                                 if retval != os.EX_OK:
6759                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
6760                                         return retval
6761
6762                         self._unmerge_pkgfiles(pkgfiles)
6763
6764                         if myebuildpath:
6765                                 retval = doebuild(myebuildpath, "postrm", self.myroot,
6766                                          self.settings, use_cache=0, tree="vartree",
6767                                          mydbapi=self.vartree.dbapi, vartree=self.vartree)
6768
6769                                 # process logs created during pre/postrm
6770                                 elog_process(self.mycpv, self.settings)
6771
6772                                 # XXX: Decide how to handle failures here.
6773                                 if retval != os.EX_OK:
6774                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
6775                                         return retval
6776                                 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
6777                                         tree="vartree", mydbapi=self.vartree.dbapi,
6778                                         vartree=self.vartree)
6779
6780                 finally:
6781                         if builddir_lock:
6782                                 portage_locks.unlockdir(builddir_lock)
6783                         try:
6784                                 if myebuildpath and not catdir_lock:
6785                                         # Lock catdir for removal if empty.
6786                                         catdir_lock = portage_locks.lockdir(catdir)
6787                         finally:
6788                                 if catdir_lock:
6789                                         try:
6790                                                 os.rmdir(catdir)
6791                                         except OSError, e:
6792                                                 if e.errno != errno.ENOTEMPTY:
6793                                                         raise
6794                                                 del e
6795                                         portage_locks.unlockdir(catdir_lock)
6796                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
6797                         contents=contents)
6798                 return os.EX_OK
6799
6800         def _unmerge_pkgfiles(self, pkgfiles):
6801                 """
6802                 
6803                 Unmerges the contents of a package from the liveFS
6804                 Removes the VDB entry for self
6805                 
6806                 @param pkgfiles: typically self.getcontents()
6807                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
6808                 @rtype: None
6809                 """
6810                 global dircache
6811                 dircache={}
6812
6813                 if not pkgfiles:
6814                         writemsg_stdout("No package files given... Grabbing a set.\n")
6815                         pkgfiles=self.getcontents()
6816
6817                 if pkgfiles:
6818                         mykeys=pkgfiles.keys()
6819                         mykeys.sort()
6820                         mykeys.reverse()
6821
6822                         #process symlinks second-to-last, directories last.
6823                         mydirs=[]
6824                         modprotect="/lib/modules/"
6825                         for objkey in mykeys:
6826                                 obj = normalize_path(objkey)
6827                                 if obj[:2]=="//":
6828                                         obj=obj[1:]
6829                                 statobj = None
6830                                 try:
6831                                         statobj = os.stat(obj)
6832                                 except OSError:
6833                                         pass
6834                                 lstatobj = None
6835                                 try:
6836                                         lstatobj = os.lstat(obj)
6837                                 except (OSError, AttributeError):
6838                                         pass
6839                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
6840                                 if statobj is None:
6841                                         if not islink:
6842                                                 #we skip this if we're dealing with a symlink
6843                                                 #because os.stat() will operate on the
6844                                                 #link target rather than the link itself.
6845                                                 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
6846                                                 continue
6847                                 # next line includes a tweak to protect modules from being unmerged,
6848                                 # but we don't protect modules from being overwritten if they are
6849                                 # upgraded. We effectively only want one half of the config protection
6850                                 # functionality for /lib/modules. For portage-ng both capabilities
6851                                 # should be able to be independently specified.
6852                                 if obj.startswith(modprotect):
6853                                         writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
6854                                         continue
6855
6856                                 lmtime=str(lstatobj[stat.ST_MTIME])
6857                                 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
6858                                         writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
6859                                         continue
6860
6861                                 if pkgfiles[objkey][0]=="dir":
6862                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
6863                                                 writemsg_stdout("--- !dir   %s %s\n" % ("dir", obj))
6864                                                 continue
6865                                         mydirs.append(obj)
6866                                 elif pkgfiles[objkey][0]=="sym":
6867                                         if not islink:
6868                                                 writemsg_stdout("--- !sym   %s %s\n" % ("sym", obj))
6869                                                 continue
6870                                         try:
6871                                                 os.unlink(obj)
6872                                                 writemsg_stdout("<<<        %s %s\n" % ("sym",obj))
6873                                         except (OSError,IOError),e:
6874                                                 writemsg_stdout("!!!        %s %s\n" % ("sym",obj))
6875                                 elif pkgfiles[objkey][0]=="obj":
6876                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
6877                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
6878                                                 continue
6879                                         mymd5 = None
6880                                         try:
6881                                                 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
6882                                         except portage_exception.FileNotFound, e:
6883                                                 # the file has disappeared between now and our stat call
6884                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
6885                                                 continue
6886
6887                                         # string.lower is needed because db entries used to be in upper-case.  The
6888                                         # string.lower allows for backwards compatibility.
6889                                         if mymd5 != pkgfiles[objkey][2].lower():
6890                                                 writemsg_stdout("--- !md5   %s %s\n" % ("obj", obj))
6891                                                 continue
6892                                         try:
6893                                                 os.unlink(obj)
6894                                         except (OSError,IOError),e:
6895                                                 pass
6896                                         writemsg_stdout("<<<        %s %s\n" % ("obj",obj))
6897                                 elif pkgfiles[objkey][0]=="fif":
6898                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
6899                                                 writemsg_stdout("--- !fif   %s %s\n" % ("fif", obj))
6900                                                 continue
6901                                         writemsg_stdout("---        %s %s\n" % ("fif",obj))
6902                                 elif pkgfiles[objkey][0]=="dev":
6903                                         writemsg_stdout("---        %s %s\n" % ("dev",obj))
6904
6905                         mydirs.sort()
6906                         mydirs.reverse()
6907
6908                         for obj in mydirs:
6909                                 try:
6910                                         os.rmdir(obj)
6911                                         writemsg_stdout("<<<        %s %s\n" % ("dir",obj))
6912                                 except (OSError, IOError):
6913                                         writemsg_stdout("--- !empty dir %s\n" % obj)
6914
6915                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
6916                 self.vartree.zap(self.mycpv)
6917
6918         def isowner(self,filename,destroot):
6919                 """ 
6920                 Check if filename is a new file or belongs to this package
6921                 (for this or a previous version)
6922                 
6923                 @param filename:
6924                 @type filename:
6925                 @param destroot:
6926                 @type destroot:
6927                 @rtype: Boolean
6928                 @returns:
6929                 1. True if this package owns the file.
6930                 2. False if this package does not own the file.
6931                 """
6932                 destfile = normalize_path(
6933                         os.path.join(destroot, filename.lstrip(os.path.sep)))
6934                 try:
6935                         mylstat = os.lstat(destfile)
6936                 except (OSError, IOError):
6937                         return True
6938
6939                 pkgfiles = self.getcontents()
6940                 if pkgfiles and filename in pkgfiles:
6941                         return True
6942                 if pkgfiles:
6943                         if self._contents_inodes is None:
6944                                 self._contents_inodes = set()
6945                                 for x in pkgfiles:
6946                                         try:
6947                                                 lstat = os.lstat(x)
6948                                                 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
6949                                         except OSError:
6950                                                 pass
6951                         if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
6952                                  return True
6953
6954                 return False
6955
6956         def isprotected(self, filename):
6957                 """In cases where an installed package in the same slot owns a
6958                 protected file that will be merged, bump the mtime on the installed
6959                 file in order to ensure that it isn't unmerged."""
6960                 if not self._config_protect.isprotected(filename):
6961                         return False
6962                 if self._installed_instance is None:
6963                         return True
6964                 mydata = self._installed_instance.getcontents().get(filename, None)
6965                 if mydata is None:
6966                         return True
6967
6968                 # Bump the mtime in order to ensure that the old config file doesn't
6969                 # get unmerged.  The user will have an opportunity to merge the new
6970                 # config with the old one.
6971                 try:
6972                         os.utime(filename, None)
6973                 except OSError, e:
6974                         if e.errno != errno.ENOENT:
6975                                 raise
6976                         del e
6977                         # The file has disappeared, so it's not protected.
6978                         return False
6979                 return True
6980
6981         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
6982                 mydbapi=None, prev_mtimes=None):
6983                 """
6984                 
6985                 This function does the following:
6986                 
6987                 Collision Protection.
6988                 calls doebuild(mydo=pkg_preinst)
6989                 Merges the package to the livefs
6990                 unmerges old version (if required)
6991                 calls doebuild(mydo=pkg_postinst)
6992                 calls env_update
6993                 
6994                 @param srcroot: Typically this is ${D}
6995                 @type srcroot: String (Path)
6996                 @param destroot: Path to merge to (usually ${ROOT})
6997                 @type destroot: String (Path)
6998                 @param inforoot: root of the vardb entry ?
6999                 @type inforoot: String (Path)
7000                 @param myebuild: path to the ebuild that we are processing
7001                 @type myebuild: String (Path)
7002                 @param mydbapi: dbapi which is handed to doebuild.
7003                 @type mydbapi: portdbapi instance
7004                 @param prev_mtimes: { Filename:mtime } mapping for env_update
7005                 @type prev_mtimes: Dictionary
7006                 @rtype: Boolean
7007                 @returns:
7008                 1. 0 on success
7009                 2. 1 on failure
7010                 
7011                 secondhand is a list of symlinks that have been skipped due to their target
7012                 not existing; we will merge these symlinks at a later time.
7013                 """
7014                 if not os.path.isdir(srcroot):
7015                         writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7016                         noiselevel=-1)
7017                         return 1
7018
7019                 if not os.path.exists(self.dbcatdir):
7020                         os.makedirs(self.dbcatdir)
7021
7022                 otherversions=[]
7023                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7024                         otherversions.append(v.split("/")[1])
7025
7026                 slot_matches = self.vartree.dbapi.match(
7027                         "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7028                 if slot_matches:
7029                         # Used by self.isprotected().
7030                         self._installed_instance = dblink(self.cat,
7031                                 catsplit(slot_matches[0])[1], destroot, self.settings,
7032                                 vartree=self.vartree)
7033
7034                 # check for package collisions
7035                 if "collision-protect" in self.settings.features:
7036                         collision_ignore = set([normalize_path(myignore) for myignore in \
7037                                 self.settings.get("COLLISION_IGNORE", "").split()])
7038                         myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7039
7040                         # the linkcheck only works if we are in srcroot
7041                         mycwd = getcwd()
7042                         os.chdir(srcroot)
7043                         mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7044                         myfilelist.extend(mysymlinks)
7045                         mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7046                         del mysymlinks
7047
7048
7049                         stopmerge=False
7050                         starttime=time.time()
7051                         i=0
7052
7053                         otherpkg=[]
7054                         mypkglist=[]
7055
7056                         if self.pkg in otherversions:
7057                                 otherversions.remove(self.pkg)  # we already checked this package
7058
7059                         myslot = self.settings["SLOT"]
7060                         for v in otherversions:
7061                                 # only allow versions with same slot to overwrite files
7062                                 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7063                                         mypkglist.append(
7064                                                 dblink(self.cat, v, destroot, self.settings,
7065                                                         vartree=self.vartree))
7066
7067                         collisions = []
7068
7069                         print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7070                         for f in myfilelist:
7071                                 nocheck = False
7072                                 # listdir isn't intelligent enough to exclude symlinked dirs,
7073                                 # so we have to do it ourself
7074                                 for s in mysymlinked_directories:
7075                                         if f.startswith(s):
7076                                                 nocheck = True
7077                                                 break
7078                                 if nocheck:
7079                                         continue
7080                                 i=i+1
7081                                 if i % 1000 == 0:
7082                                         print str(i)+" files checked ..."
7083                                 if f[0] != "/":
7084                                         f="/"+f
7085                                 isowned = False
7086                                 for ver in [self]+mypkglist:
7087                                         if (ver.isowner(f, destroot) or ver.isprotected(f)):
7088                                                 isowned = True
7089                                                 break
7090                                 if not isowned:
7091                                         collisions.append(f)
7092                                         print "existing file "+f+" is not owned by this package"
7093                                         stopmerge=True
7094                                         if collision_ignore:
7095                                                 if f in collision_ignore:
7096                                                         stopmerge = False
7097                                                 else:
7098                                                         for myignore in collision_ignore:
7099                                                                 if f.startswith(myignore + os.path.sep):
7100                                                                         stopmerge = False
7101                                                                         break
7102                         #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7103                         if stopmerge:
7104                                 print red("*")+" This package is blocked because it wants to overwrite"
7105                                 print red("*")+" files belonging to other packages (see messages above)."
7106                                 print red("*")+" If you have no clue what this is all about report it "
7107                                 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7108                                 print
7109                                 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7110                                 print
7111                                 print
7112                                 print "Searching all installed packages for file collisions..."
7113                                 print "Press Ctrl-C to Stop"
7114                                 print
7115                                 """ Note: The isowner calls result in a stat call for *every*
7116                                 single installed file, since the inode numbers are used to work
7117                                 around the problem of ambiguous paths caused by symlinked files
7118                                 and/or directories.  Though it is slow, it is as accurate as
7119                                 possible."""
7120                                 found_owner = False
7121                                 for cpv in self.vartree.dbapi.cpv_all():
7122                                         cat, pkg = catsplit(cpv)
7123                                         mylink = dblink(cat, pkg, destroot, self.settings,
7124                                                 vartree=self.vartree)
7125                                         mycollisions = []
7126                                         for f in collisions:
7127                                                 if mylink.isowner(f, destroot):
7128                                                         mycollisions.append(f)
7129                                         if mycollisions:
7130                                                 found_owner = True
7131                                                 print " * %s:" % cpv
7132                                                 print
7133                                                 for f in mycollisions:
7134                                                         print "     '%s'" % \
7135                                                                 os.path.join(destroot, f.lstrip(os.path.sep))
7136                                                 print
7137                                 if not found_owner:
7138                                         print "None of the installed packages claim the above file(s)."
7139                                         print
7140                                 sys.exit(1)
7141                         try:
7142                                 os.chdir(mycwd)
7143                         except OSError:
7144                                 pass
7145
7146                 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7147                         """ The merge process may move files out of the image directory,
7148                         which causes invalidation of the .installed flag."""
7149                         try:
7150                                 os.unlink(os.path.join(
7151                                         os.path.dirname(normalize_path(srcroot)), ".installed"))
7152                         except OSError, e:
7153                                 if e.errno != errno.ENOENT:
7154                                         raise
7155                                 del e
7156
7157                 # get old contents info for later unmerging
7158                 oldcontents = self.getcontents()
7159
7160                 self.dbdir = self.dbtmpdir
7161                 self.delete()
7162                 if not os.path.exists(self.dbtmpdir):
7163                         os.makedirs(self.dbtmpdir)
7164
7165                 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7166
7167                 # run preinst script
7168                 if myebuild is None:
7169                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7170                 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7171                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7172                         vartree=self.vartree)
7173
7174                 # XXX: Decide how to handle failures here.
7175                 if a != os.EX_OK:
7176                         writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7177                         return a
7178
7179                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7180                 for x in listdir(inforoot):
7181                         self.copyfile(inforoot+"/"+x)
7182
7183                 # get current counter value (counter_tick also takes care of incrementing it)
7184                 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7185                 # XXX bis: leads to some invalidentry() call through cp_all().
7186                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7187                 # write local package counter for recording
7188                 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7189                 lcfile.write(str(counter))
7190                 lcfile.close()
7191
7192                 # open CONTENTS file (possibly overwriting old one) for recording
7193                 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7194
7195                 self.updateprotect()
7196
7197                 #if we have a file containing previously-merged config file md5sums, grab it.
7198                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7199                 cfgfiledict = grabdict(conf_mem_file)
7200                 if self.settings.has_key("NOCONFMEM"):
7201                         cfgfiledict["IGNORE"]=1
7202                 else:
7203                         cfgfiledict["IGNORE"]=0
7204
7205                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7206                 mymtime    = long(time.time())
7207                 prevmask   = os.umask(0)
7208                 secondhand = []
7209
7210                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7211                 # "second hand" of symlinks to merge later
7212                 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7213                         return 1
7214
7215                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
7216                 # broken symlinks.  We'll merge them too.
7217                 lastlen=0
7218                 while len(secondhand) and len(secondhand)!=lastlen:
7219                         # clear the thirdhand.  Anything from our second hand that
7220                         # couldn't get merged will be added to thirdhand.
7221
7222                         thirdhand=[]
7223                         self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7224
7225                         #swap hands
7226                         lastlen=len(secondhand)
7227
7228                         # our thirdhand now becomes our secondhand.  It's ok to throw
7229                         # away secondhand since thirdhand contains all the stuff that
7230                         # couldn't be merged.
7231                         secondhand = thirdhand
7232
7233                 if len(secondhand):
7234                         # force merge of remaining symlinks (broken or circular; oh well)
7235                         self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7236
7237                 #restore umask
7238                 os.umask(prevmask)
7239
7240                 #if we opened it, close it
7241                 outfile.flush()
7242                 outfile.close()
7243
7244                 if os.path.exists(self.dbpkgdir):
7245                         writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7246                         self.dbdir = self.dbpkgdir
7247                         self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7248                         self.dbdir = self.dbtmpdir
7249                         writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7250
7251                 # We hold both directory locks.
7252                 self.dbdir = self.dbpkgdir
7253                 self.delete()
7254                 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7255                 contents = self.getcontents()
7256
7257                 #write out our collection of md5sums
7258                 if cfgfiledict.has_key("IGNORE"):
7259                         del cfgfiledict["IGNORE"]
7260
7261                 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7262                 if not os.path.exists(my_private_path):
7263                         os.makedirs(my_private_path)
7264                         os.chown(my_private_path, os.getuid(), portage_gid)
7265                         os.chmod(my_private_path, 02770)
7266
7267                 writedict(cfgfiledict, conf_mem_file)
7268                 del conf_mem_file
7269
7270                 #do postinst script
7271                 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7272                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7273
7274                 # XXX: Decide how to handle failures here.
7275                 if a != os.EX_OK:
7276                         writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7277                         return a
7278
7279                 downgrade = False
7280                 for v in otherversions:
7281                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7282                                 downgrade = True
7283
7284                 #update environment settings, library paths. DO NOT change symlinks.
7285                 env_update(makelinks=(not downgrade),
7286                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7287                         contents=contents)
7288                 #dircache may break autoclean because it remembers the -MERGING-pkg file
7289                 global dircache
7290                 if dircache.has_key(self.dbcatdir):
7291                         del dircache[self.dbcatdir]
7292                 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7293
7294                 # Process ebuild logfiles
7295                 elog_process(self.mycpv, self.settings)
7296                 if "noclean" not in self.settings.features:
7297                         doebuild(myebuild, "clean", destroot, self.settings,
7298                                 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7299                 return os.EX_OK
7300
7301         def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7302                 """
7303                 
7304                 This function handles actual merging of the package contents to the livefs.
7305                 It also handles config protection.
7306                 
7307                 @param srcroot: Where are we copying files from (usually ${D})
7308                 @type srcroot: String (Path)
7309                 @param destroot: Typically ${ROOT}
7310                 @type destroot: String (Path)
7311                 @param outfile: File to log operations to
7312                 @type outfile: File Object
7313                 @param secondhand: A set of items to merge in pass two (usually
7314                 or symlinks that point to non-existing files that may get merged later)
7315                 @type secondhand: List
7316                 @param stufftomerge: Either a diretory to merge, or a list of items.
7317                 @type stufftomerge: String or List
7318                 @param cfgfiledict: { File:mtime } mapping for config_protected files
7319                 @type cfgfiledict: Dictionary
7320                 @param thismtime: The current time (typically long(time.time())
7321                 @type thismtime: Long
7322                 @rtype: None or Boolean
7323                 @returns:
7324                 1. True on failure
7325                 2. None otherwise
7326                 
7327                 """
7328                 from os.path import sep, join
7329                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7330                 destroot = normalize_path(destroot).rstrip(sep) + sep
7331                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
7332                 if type(stufftomerge)==types.StringType:
7333                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
7334                         mergelist = listdir(join(srcroot, stufftomerge))
7335                         offset=stufftomerge
7336                 else:
7337                         mergelist=stufftomerge
7338                         offset=""
7339                 for x in mergelist:
7340                         mysrc = join(srcroot, offset, x)
7341                         mydest = join(destroot, offset, x)
7342                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7343                         myrealdest = join(sep, offset, x)
7344                         # stat file once, test using S_* macros many times (faster that way)
7345                         try:
7346                                 mystat=os.lstat(mysrc)
7347                         except OSError, e:
7348                                 writemsg("\n")
7349                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7350                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
7351                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
7352                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7353                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
7354                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
7355                                 sys.exit(1)
7356                         except Exception, e:
7357                                 writemsg("\n")
7358                                 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7359                                 writemsg(red("!!!        A stat call returned the following error for the following file:"))
7360                                 writemsg(    "!!!        Please ensure that your filesystem is intact, otherwise report\n")
7361                                 writemsg(    "!!!        this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7362                                 writemsg(    "!!!        File:  "+str(mysrc)+"\n", noiselevel=-1)
7363                                 writemsg(    "!!!        Error: "+str(e)+"\n", noiselevel=-1)
7364                                 sys.exit(1)
7365
7366
7367                         mymode=mystat[stat.ST_MODE]
7368                         # handy variables; mydest is the target object on the live filesystems;
7369                         # mysrc is the source object in the temporary install dir
7370                         try:
7371                                 mydmode = os.lstat(mydest).st_mode
7372                         except OSError, e:
7373                                 if e.errno != errno.ENOENT:
7374                                         raise
7375                                 del e
7376                                 #dest file doesn't exist
7377                                 mydmode=None
7378
7379                         if stat.S_ISLNK(mymode):
7380                                 # we are merging a symbolic link
7381                                 myabsto=abssymlink(mysrc)
7382                                 if myabsto.startswith(srcroot):
7383                                         myabsto=myabsto[len(srcroot):]
7384                                 myabsto = myabsto.lstrip(sep)
7385                                 myto=os.readlink(mysrc)
7386                                 if self.settings and self.settings["D"]:
7387                                         if myto.startswith(self.settings["D"]):
7388                                                 myto=myto[len(self.settings["D"]):]
7389                                 # myrealto contains the path of the real file to which this symlink points.
7390                                 # we can simply test for existence of this file to see if the target has been merged yet
7391                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
7392                                 if mydmode!=None:
7393                                         #destination exists
7394                                         if not stat.S_ISLNK(mydmode):
7395                                                 if stat.S_ISDIR(mydmode):
7396                                                         # directory in the way: we can't merge a symlink over a directory
7397                                                         # we won't merge this, continue with next file...
7398                                                         continue
7399
7400                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7401                                                         # Kill file blocking installation of symlink to dir #71787
7402                                                         pass
7403                                                 elif self.isprotected(mydest):
7404                                                         # Use md5 of the target in ${D} if it exists...
7405                                                         try:
7406                                                                 newmd5 = portage_checksum.perform_md5(
7407                                                                         join(srcroot, myabsto))
7408                                                         except portage_exception.FileNotFound:
7409                                                                 # Maybe the target is merged already.
7410                                                                 try:
7411                                                                         newmd5 = portage_checksum.perform_md5(
7412                                                                                 myrealto)
7413                                                                 except portage_exception.FileNotFound:
7414                                                                         newmd5 = None
7415                                                         mydest = new_protect_filename(mydest,newmd5=newmd5)
7416
7417                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7418                                 if (secondhand!=None) and (not os.path.exists(myrealto)):
7419                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
7420                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
7421                                         # it later.
7422                                         secondhand.append(mysrc[len(srcroot):])
7423                                         continue
7424                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7425                                 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7426                                 if mymtime!=None:
7427                                         writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7428                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7429                                 else:
7430                                         print "!!! Failed to move file."
7431                                         print "!!!",mydest,"->",myto
7432                                         sys.exit(1)
7433                         elif stat.S_ISDIR(mymode):
7434                                 # we are merging a directory
7435                                 if mydmode!=None:
7436                                         # destination exists
7437
7438                                         if bsd_chflags:
7439                                                 # Save then clear flags on dest.
7440                                                 dflags=bsd_chflags.lgetflags(mydest)
7441                                                 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7442                                                         writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7443                                                                 noiselevel=-1)
7444
7445                                         if not os.access(mydest, os.W_OK):
7446                                                 pkgstuff = pkgsplit(self.pkg)
7447                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7448                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7449                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
7450                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7451                                                 writemsg("!!! And finish by running this: env-update\n\n")
7452                                                 return 1
7453
7454                                         if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7455                                                 # a symlink to an existing directory will work for us; keep it:
7456                                                 writemsg_stdout("--- %s/\n" % mydest)
7457                                                 if bsd_chflags:
7458                                                         bsd_chflags.lchflags(mydest, dflags)
7459                                         else:
7460                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
7461                                                 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7462                                                         sys.exit(1)
7463                                                 print "bak",mydest,mydest+".backup"
7464                                                 #now create our directory
7465                                                 if self.settings.selinux_enabled():
7466                                                         sid = selinux.get_sid(mysrc)
7467                                                         selinux.secure_mkdir(mydest,sid)
7468                                                 else:
7469                                                         os.mkdir(mydest)
7470                                                 if bsd_chflags:
7471                                                         bsd_chflags.lchflags(mydest, dflags)
7472                                                 os.chmod(mydest,mystat[0])
7473                                                 os.chown(mydest,mystat[4],mystat[5])
7474                                                 writemsg_stdout(">>> %s/\n" % mydest)
7475                                 else:
7476                                         #destination doesn't exist
7477                                         if self.settings.selinux_enabled():
7478                                                 sid = selinux.get_sid(mysrc)
7479                                                 selinux.secure_mkdir(mydest,sid)
7480                                         else:
7481                                                 os.mkdir(mydest)
7482                                         os.chmod(mydest,mystat[0])
7483                                         os.chown(mydest,mystat[4],mystat[5])
7484                                         writemsg_stdout(">>> %s/\n" % mydest)
7485                                 outfile.write("dir "+myrealdest+"\n")
7486                                 # recurse and merge this directory
7487                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
7488                                         join(offset, x), cfgfiledict, thismtime):
7489                                         return 1
7490                         elif stat.S_ISREG(mymode):
7491                                 # we are merging a regular file
7492                                 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7493                                 # calculate config file protection stuff
7494                                 mydestdir=os.path.dirname(mydest)
7495                                 moveme=1
7496                                 zing="!!!"
7497                                 if mydmode!=None:
7498                                         # destination file exists
7499                                         if stat.S_ISDIR(mydmode):
7500                                                 # install of destination is blocked by an existing directory with the same name
7501                                                 moveme=0
7502                                                 writemsg_stdout("!!! %s\n" % mydest)
7503                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7504                                                 cfgprot=0
7505                                                 # install of destination is blocked by an existing regular file,
7506                                                 # or by a symlink to an existing regular file;
7507                                                 # now, config file management may come into play.
7508                                                 # we only need to tweak mydest if cfg file management is in play.
7509                                                 if self.isprotected(mydest):
7510                                                         # we have a protection path; enable config file management.
7511                                                         destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7512                                                         if mymd5==destmd5:
7513                                                                 #file already in place; simply update mtimes of destination
7514                                                                 os.utime(mydest,(thismtime,thismtime))
7515                                                                 zing="---"
7516                                                                 moveme=0
7517                                                         else:
7518                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7519                                                                         """ An identical update has previously been
7520                                                                         merged.  Skip it unless the user has chosen
7521                                                                         --noconfmem."""
7522                                                                         zing = "-o-"
7523                                                                         moveme = cfgfiledict["IGNORE"]
7524                                                                         cfgprot = cfgfiledict["IGNORE"]
7525                                                                 else:
7526                                                                         moveme = 1
7527                                                                         cfgprot = 1
7528                                                         if moveme:
7529                                                                 # Merging a new file, so update confmem.
7530                                                                 cfgfiledict[myrealdest] = [mymd5]
7531                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7532                                                                 """A previously remembered update has been
7533                                                                 accepted, so it is removed from confmem."""
7534                                                                 del cfgfiledict[myrealdest]
7535                                                 if cfgprot:
7536                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
7537
7538                                 # whether config protection or not, we merge the new file the
7539                                 # same way.  Unless moveme=0 (blocking directory)
7540                                 if moveme:
7541                                         mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7542                                         if mymtime is None:
7543                                                 sys.exit(1)
7544                                         zing=">>>"
7545                                 else:
7546                                         mymtime=thismtime
7547                                         # We need to touch the destination so that on --update the
7548                                         # old package won't yank the file with it. (non-cfgprot related)
7549                                         os.utime(mydest,(thismtime,thismtime))
7550                                         zing="---"
7551                                 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7552
7553                                         # XXX kludge, can be killed when portage stops relying on
7554                                         # md5+mtime, and uses refcounts
7555                                         # alright, we've fooled w/ mtime on the file; this pisses off static archives
7556                                         # basically internal mtime != file's mtime, so the linker (falsely) thinks
7557                                         # the archive is stale, and needs to have it's toc rebuilt.
7558
7559                                         myf = open(mydest, "r+")
7560
7561                                         # ar mtime field is digits padded with spaces, 12 bytes.
7562                                         lms=str(thismtime+5).ljust(12)
7563                                         myf.seek(0)
7564                                         magic=myf.read(8)
7565                                         if magic != "!<arch>\n":
7566                                                 # not an archive (dolib.a from portage.py makes it here fex)
7567                                                 myf.close()
7568                                         else:
7569                                                 st = os.stat(mydest)
7570                                                 while myf.tell() < st.st_size - 12:
7571                                                         # skip object name
7572                                                         myf.seek(16,1)
7573
7574                                                         # update mtime
7575                                                         myf.write(lms)
7576
7577                                                         # skip uid/gid/mperm
7578                                                         myf.seek(20,1)
7579
7580                                                         # read the archive member's size
7581                                                         x=long(myf.read(10))
7582
7583                                                         # skip the trailing newlines, and add the potential
7584                                                         # extra padding byte if it's not an even size
7585                                                         myf.seek(x + 2 + (x % 2),1)
7586
7587                                                 # and now we're at the end. yay.
7588                                                 myf.close()
7589                                                 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7590                                         os.utime(mydest,(thismtime,thismtime))
7591
7592                                 if mymtime!=None:
7593                                         zing=">>>"
7594                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7595                                 writemsg_stdout("%s %s\n" % (zing,mydest))
7596                         else:
7597                                 # we are merging a fifo or device node
7598                                 zing="!!!"
7599                                 if mydmode is None:
7600                                         # destination doesn't exist
7601                                         if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7602                                                 zing=">>>"
7603                                         else:
7604                                                 sys.exit(1)
7605                                 if stat.S_ISFIFO(mymode):
7606                                         outfile.write("fif %s\n" % myrealdest)
7607                                 else:
7608                                         outfile.write("dev %s\n" % myrealdest)
7609                                 writemsg_stdout(zing+" "+mydest+"\n")
7610
7611         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7612                 mydbapi=None, prev_mtimes=None):
7613                 try:
7614                         self.lockdb()
7615                         return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7616                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7617                 finally:
7618                         self.unlockdb()
7619
7620         def getstring(self,name):
7621                 "returns contents of a file with whitespace converted to spaces"
7622                 if not os.path.exists(self.dbdir+"/"+name):
7623                         return ""
7624                 myfile=open(self.dbdir+"/"+name,"r")
7625                 mydata=myfile.read().split()
7626                 myfile.close()
7627                 return " ".join(mydata)
7628
7629         def copyfile(self,fname):
7630                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7631
7632         def getfile(self,fname):
7633                 if not os.path.exists(self.dbdir+"/"+fname):
7634                         return ""
7635                 myfile=open(self.dbdir+"/"+fname,"r")
7636                 mydata=myfile.read()
7637                 myfile.close()
7638                 return mydata
7639
7640         def setfile(self,fname,data):
7641                 write_atomic(os.path.join(self.dbdir, fname), data)
7642
7643         def getelements(self,ename):
7644                 if not os.path.exists(self.dbdir+"/"+ename):
7645                         return []
7646                 myelement=open(self.dbdir+"/"+ename,"r")
7647                 mylines=myelement.readlines()
7648                 myreturn=[]
7649                 for x in mylines:
7650                         for y in x[:-1].split():
7651                                 myreturn.append(y)
7652                 myelement.close()
7653                 return myreturn
7654
7655         def setelements(self,mylist,ename):
7656                 myelement=open(self.dbdir+"/"+ename,"w")
7657                 for x in mylist:
7658                         myelement.write(x+"\n")
7659                 myelement.close()
7660
7661         def isregular(self):
7662                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
7663                 return os.path.exists(self.dbdir+"/CATEGORY")
7664
7665 class FetchlistDict(UserDict.DictMixin):
7666         """This provide a mapping interface to retrieve fetch lists.  It's used
7667         to allow portage_manifest.Manifest to access fetch lists via a standard
7668         mapping interface rather than use the dbapi directly."""
7669         def __init__(self, pkgdir, settings, mydbapi):
7670                 """pkgdir is a directory containing ebuilds and settings is passed into
7671                 portdbapi.getfetchlist for __getitem__ calls."""
7672                 self.pkgdir = pkgdir
7673                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7674                 self.settings = settings
7675                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7676                 self.portdb = mydbapi
7677         def __getitem__(self, pkg_key):
7678                 """Returns the complete fetch list for a given package."""
7679                 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7680                         all=True, mytree=self.mytree)[1]
7681         def has_key(self, pkg_key):
7682                 """Returns true if the given package exists within pkgdir."""
7683                 return pkg_key in self.keys()
7684         def keys(self):
7685                 """Returns keys for all packages within pkgdir"""
7686                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7687
7688 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7689         """will merge a .tbz2 file, returning a list of runtime dependencies
7690                 that must be satisfied, or None if there was a merge error.     This
7691                 code assumes the package exists."""
7692         global db
7693         if mydbapi is None:
7694                 mydbapi = db[myroot]["bintree"].dbapi
7695         if vartree is None:
7696                 vartree = db[myroot]["vartree"]
7697         if mytbz2[-5:]!=".tbz2":
7698                 print "!!! Not a .tbz2 file"
7699                 return 1
7700
7701         tbz2_lock = None
7702         builddir_lock = None
7703         catdir_lock = None
7704         try:
7705                 """ Don't lock the tbz2 file because the filesytem could be readonly or
7706                 shared by a cluster."""
7707                 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7708
7709                 mypkg = os.path.basename(mytbz2)[:-5]
7710                 xptbz2 = xpak.tbz2(mytbz2)
7711                 mycat = xptbz2.getfile("CATEGORY")
7712                 if not mycat:
7713                         writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7714                                 noiselevel=-1)
7715                         return 1
7716                 mycat = mycat.strip()
7717
7718                 # These are the same directories that would be used at build time.
7719                 builddir = os.path.join(
7720                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7721                 catdir = os.path.dirname(builddir)
7722                 pkgloc = os.path.join(builddir, "image")
7723                 infloc = os.path.join(builddir, "build-info")
7724                 myebuild = os.path.join(
7725                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7726                 portage_util.ensure_dirs(os.path.dirname(catdir),
7727                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7728                 catdir_lock = portage_locks.lockdir(catdir)
7729                 portage_util.ensure_dirs(catdir,
7730                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7731                 builddir_lock = portage_locks.lockdir(builddir)
7732                 try:
7733                         portage_locks.unlockdir(catdir_lock)
7734                 finally:
7735                         catdir_lock = None
7736                 try:
7737                         shutil.rmtree(builddir)
7738                 except (IOError, OSError), e:
7739                         if e.errno != errno.ENOENT:
7740                                 raise
7741                         del e
7742                 for mydir in (builddir, pkgloc, infloc):
7743                         portage_util.ensure_dirs(mydir, uid=portage_uid,
7744                                 gid=portage_gid, mode=0755)
7745                 writemsg_stdout(">>> Extracting info\n")
7746                 xptbz2.unpackinfo(infloc)
7747                 mysettings.load_infodir(infloc)
7748                 # Store the md5sum in the vdb.
7749                 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7750                 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
7751                 fp.close()
7752
7753                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7754
7755                 # Eventually we'd like to pass in the saved ebuild env here.
7756                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7757                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
7758                 if retval != os.EX_OK:
7759                         writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7760                         return retval
7761
7762                 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7763                 retval = portage_exec.spawn_bash(
7764                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7765                         env=mysettings.environ())
7766                 if retval != os.EX_OK:
7767                         writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7768                         return retval
7769                 #portage_locks.unlockfile(tbz2_lock)
7770                 #tbz2_lock = None
7771
7772                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7773                         treetype="bintree")
7774                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7775                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7776                 return retval
7777         finally:
7778                 if tbz2_lock:
7779                         portage_locks.unlockfile(tbz2_lock)
7780                 if builddir_lock:
7781                         try:
7782                                 shutil.rmtree(builddir)
7783                         except (IOError, OSError), e:
7784                                 if e.errno != errno.ENOENT:
7785                                         raise
7786                                 del e
7787                         portage_locks.unlockdir(builddir_lock)
7788                         try:
7789                                 if not catdir_lock:
7790                                         # Lock catdir for removal if empty.
7791                                         catdir_lock = portage_locks.lockdir(catdir)
7792                         finally:
7793                                 if catdir_lock:
7794                                         try:
7795                                                 os.rmdir(catdir)
7796                                         except OSError, e:
7797                                                 if e.errno != errno.ENOTEMPTY:
7798                                                         raise
7799                                                 del e
7800                                         portage_locks.unlockdir(catdir_lock)
7801
7802 def deprecated_profile_check():
7803         if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7804                 return False
7805         deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7806         dcontent = deprecatedfile.readlines()
7807         deprecatedfile.close()
7808         newprofile = dcontent[0]
7809         writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7810                 noiselevel=-1)
7811         writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7812                 noiselevel=-1)
7813         writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7814         if len(dcontent) > 1:
7815                 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7816                 for myline in dcontent[1:]:
7817                         writemsg(myline, noiselevel=-1)
7818                 writemsg("\n\n", noiselevel=-1)
7819         return True
7820
7821 # gets virtual package settings
7822 def getvirtuals(myroot):
7823         global settings
7824         writemsg("--- DEPRECATED call to getvirtual\n")
7825         return settings.getvirtuals(myroot)
7826
7827 def commit_mtimedb(mydict=None, filename=None):
7828         if mydict is None:
7829                 global mtimedb
7830                 if "mtimedb" not in globals() or mtimedb is None:
7831                         return
7832                 mtimedb.commit()
7833                 return
7834         if filename is None:
7835                 global mtimedbfile
7836                 filename = mtimedbfile
7837         mydict["version"] = VERSION
7838         d = {} # for full backward compat, pickle it as a plain dict object.
7839         d.update(mydict)
7840         try:
7841                 f = atomic_ofstream(filename)
7842                 cPickle.dump(d, f, -1)
7843                 f.close()
7844                 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7845         except (IOError, OSError), e:
7846                 pass
7847
7848 def portageexit():
7849         global uid,portage_gid,portdb,db
7850         if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
7851                 close_portdbapi_caches()
7852                 commit_mtimedb()
7853
7854 atexit_register(portageexit)
7855
7856 def global_updates(mysettings, trees, prev_mtimes):
7857         """
7858         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7859
7860         @param mysettings: A config instance for ROOT="/".
7861         @type mysettings: config
7862         @param trees: A dictionary containing portage trees.
7863         @type trees: dict
7864         @param prev_mtimes: A dictionary containing mtimes of files located in
7865                 $PORTDIR/profiles/updates/.
7866         @type prev_mtimes: dict
7867         @rtype: None or List
7868         @return: None if no were no updates, otherwise a list of update commands
7869                 that have been performed.
7870         """
7871         # only do this if we're root and not running repoman/ebuild digest
7872         global secpass
7873         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7874                 return
7875         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7876
7877         try:
7878                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7879                         update_data = grab_updates(updpath)
7880                 else:
7881                         update_data = grab_updates(updpath, prev_mtimes)
7882         except portage_exception.DirectoryNotFound:
7883                 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
7884                 return
7885         myupd = None
7886         if len(update_data) > 0:
7887                 do_upgrade_packagesmessage = 0
7888                 myupd = []
7889                 timestamps = {}
7890                 for mykey, mystat, mycontent in update_data:
7891                         writemsg_stdout("\n\n")
7892                         writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
7893                         writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7894                         writemsg_stdout("  "+bold(".")+"='update pass'  "+bold("*")+"='binary update'  "+bold("@")+"='/var/db move'\n"+"  "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
7895                         valid_updates, errors = parse_updates(mycontent)
7896                         myupd.extend(valid_updates)
7897                         writemsg_stdout(len(valid_updates) * "." + "\n")
7898                         if len(errors) == 0:
7899                                 # Update our internal mtime since we
7900                                 # processed all of our directives.
7901                                 timestamps[mykey] = long(mystat.st_mtime)
7902                         else:
7903                                 for msg in errors:
7904                                         writemsg("%s\n" % msg, noiselevel=-1)
7905
7906                 update_config_files("/",
7907                         mysettings.get("CONFIG_PROTECT","").split(),
7908                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
7909                         myupd)
7910
7911                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7912                         settings=mysettings)
7913                 for update_cmd in myupd:
7914                         if update_cmd[0] == "move":
7915                                 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
7916                                 trees["/"]["bintree"].move_ent(update_cmd)
7917                         elif update_cmd[0] == "slotmove":
7918                                 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
7919                                 trees["/"]["bintree"].move_slot_ent(update_cmd)
7920
7921                 # The above global updates proceed quickly, so they
7922                 # are considered a single mtimedb transaction.
7923                 if len(timestamps) > 0:
7924                         # We do not update the mtime in the mtimedb
7925                         # until after _all_ of the above updates have
7926                         # been processed because the mtimedb will
7927                         # automatically commit when killed by ctrl C.
7928                         for mykey, mtime in timestamps.iteritems():
7929                                 prev_mtimes[mykey] = mtime
7930
7931                 # We gotta do the brute force updates for these now.
7932                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7933                 "fixpackages" in mysettings.features:
7934                         trees["/"]["bintree"].update_ents(myupd)
7935                 else:
7936                         do_upgrade_packagesmessage = 1
7937
7938                 # Update progress above is indicated by characters written to stdout so
7939                 # we print a couple new lines here to separate the progress output from
7940                 # what follows.
7941                 print
7942                 print
7943
7944                 if do_upgrade_packagesmessage and \
7945                         listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
7946                         writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7947                         writemsg_stdout("\n    tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7948                         writemsg_stdout("\n")
7949         if myupd:
7950                 return myupd
7951
7952 #continue setting up other trees
7953
7954 class MtimeDB(dict):
7955         def __init__(self, filename):
7956                 dict.__init__(self)
7957                 self.filename = filename
7958                 self._load(filename)
7959
7960         def _load(self, filename):
7961                 try:
7962                         f = open(filename)
7963                         mypickle = cPickle.Unpickler(f)
7964                         mypickle.find_global = None
7965                         d = mypickle.load()
7966                         f.close()
7967                         del f
7968                 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
7969                         d = {}
7970
7971                 if "old" in d:
7972                         d["updates"] = d["old"]
7973                         del d["old"]
7974                 if "cur" in d:
7975                         del d["cur"]
7976
7977                 d.setdefault("starttime", 0)
7978                 d.setdefault("version", "")
7979                 for k in ("info", "ldpath", "updates"):
7980                         d.setdefault(k, {})
7981
7982                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7983                         "starttime", "updates", "version"))
7984
7985                 for k in d.keys():
7986                         if k not in mtimedbkeys:
7987                                 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7988                                 del d[k]
7989                 self.update(d)
7990                 self._clean_data = copy.deepcopy(d)
7991
7992         def commit(self):
7993                 if not self.filename:
7994                         return
7995                 d = {}
7996                 d.update(self)
7997                 # Only commit if the internal state has changed.
7998                 if d != self._clean_data:
7999                         commit_mtimedb(mydict=d, filename=self.filename)
8000                         self._clean_data = copy.deepcopy(d)
8001
8002 def create_trees(config_root=None, target_root=None, trees=None):
8003         if trees is None:
8004                 trees = {}
8005         else:
8006                 # clean up any existing portdbapi instances
8007                 for myroot in trees:
8008                         portdb = trees[myroot]["porttree"].dbapi
8009                         portdb.close_caches()
8010                         portdbapi.portdbapi_instances.remove(portdb)
8011                         del trees[myroot]["porttree"], myroot, portdb
8012
8013         settings = config(config_root=config_root, target_root=target_root,
8014                 config_incrementals=portage_const.INCREMENTALS)
8015         settings.lock()
8016         settings.validate()
8017
8018         myroots = [(settings["ROOT"], settings)]
8019         if settings["ROOT"] != "/":
8020                 settings = config(config_root=None, target_root=None,
8021                         config_incrementals=portage_const.INCREMENTALS)
8022                 settings.lock()
8023                 settings.validate()
8024                 myroots.append((settings["ROOT"], settings))
8025
8026         for myroot, mysettings in myroots:
8027                 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8028                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8029                 trees[myroot].addLazySingleton(
8030                         "vartree", vartree, myroot, categories=mysettings.categories,
8031                                 settings=mysettings)
8032                 trees[myroot].addLazySingleton("porttree",
8033                         portagetree, myroot, settings=mysettings)
8034                 trees[myroot].addLazySingleton("bintree",
8035                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8036         return trees
8037
8038 # Initialization of legacy globals.  No functions/classes below this point
8039 # please!  When the above functions and classes become independent of the
8040 # below global variables, it will be possible to make the below code
8041 # conditional on a backward compatibility flag (backward compatibility could
8042 # be disabled via an environment variable, for example).  This will enable new
8043 # code that is aware of this flag to import portage without the unnecessary
8044 # overhead (and other issues!) of initializing the legacy globals.
8045
8046 def init_legacy_globals():
8047         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8048         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8049         profiledir, flushmtimedb
8050
8051         # Portage needs to ensure a sane umask for the files it creates.
8052         os.umask(022)
8053
8054         kwargs = {}
8055         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8056                 kwargs[k] = os.environ.get(envvar, "/")
8057
8058         db = create_trees(**kwargs)
8059
8060         settings = db["/"]["vartree"].settings
8061         portdb = db["/"]["porttree"].dbapi
8062
8063         for myroot in db:
8064                 if myroot != "/":
8065                         settings = db[myroot]["vartree"].settings
8066                         portdb = db[myroot]["porttree"].dbapi
8067                         break
8068
8069         root = settings["ROOT"]
8070
8071         mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8072         mtimedb = MtimeDB(mtimedbfile)
8073
8074         # ========================================================================
8075         # COMPATIBILITY
8076         # These attributes should not be used
8077         # within Portage under any circumstances.
8078         # ========================================================================
8079         archlist    = settings.archlist()
8080         features    = settings.features
8081         groups      = settings["ACCEPT_KEYWORDS"].split()
8082         pkglines    = settings.packages
8083         selinux_enabled   = settings.selinux_enabled()
8084         thirdpartymirrors = settings.thirdpartymirrors()
8085         usedefaults       = settings.use_defs
8086         profiledir  = None
8087         if os.path.isdir(PROFILE_PATH):
8088                 profiledir = PROFILE_PATH
8089         def flushmtimedb(record):
8090                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8091         # ========================================================================
8092         # COMPATIBILITY
8093         # These attributes should not be used
8094         # within Portage under any circumstances.
8095         # ========================================================================
8096
8097 # WARNING!
8098 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8099 # use within Portage.  External use of this variable is unsupported because
8100 # it is experimental and it's behavior is likely to change.
8101 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8102         init_legacy_globals()
8103
8104 # Clear the cache
8105 dircache={}
8106
8107 # ============================================================================
8108 # ============================================================================
8109