Make dep_zapdeps() pull SLOT from the correct dbapi instance when it falls back to...
[portage.git] / pym / portage.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6
7 VERSION="$Rev$"[6:-2] + "-svn"
8
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
12
13 try:
14         import sys
15 except ImportError:
16         print "Failed to import sys! Something is _VERY_ wrong with python."
17         raise
18
19 try:
20         import copy, errno, os, re, shutil, time, types
21         try:
22                 import cPickle
23         except ImportError:
24                 import pickle as cPickle
25
26         import stat
27         import commands
28         from time import sleep
29         from random import shuffle
30         import UserDict
31         if getattr(__builtins__, "set", None) is None:
32                 from sets import Set as set
33         from itertools import chain, izip
34 except ImportError, e:
35         sys.stderr.write("\n\n")
36         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
39
40         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42         sys.stderr.write("    "+str(e)+"\n\n");
43         raise
44
45 try:
46         # XXX: This should get renamed to bsd_chflags, I think.
47         import chflags
48         bsd_chflags = chflags
49 except ImportError:
50         bsd_chflags = None
51
52 try:
53         from cache.cache_errors import CacheError
54         import cvstree
55         import xpak
56         import getbinpkg
57         import portage_dep
58         from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59                 isjustname, isspecific, isvalidatom, \
60                 match_from_list, match_to_list, best_match_to_list
61
62         # XXX: This needs to get cleaned up.
63         import output
64         from output import bold, colorize, green, red, yellow
65
66         import portage_const
67         from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68           USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69           PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70           EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71           MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72           DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73           INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74           INCREMENTALS, EAPI, MISC_SH_BINARY
75
76         from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77                                  portage_uid, portage_gid, userpriv_groups
78         from portage_manifest import Manifest
79
80         import portage_util
81         from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82                 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83                 map_dictlist_vals, new_protect_filename, normalize_path, \
84                 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85                 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86         import portage_exception
87         import portage_gpg
88         import portage_locks
89         import portage_exec
90         from portage_exec import atexit_register, run_exitfuncs
91         from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92         import portage_checksum
93         from portage_checksum import perform_md5,perform_checksum,prelink_capable
94         import eclass_cache
95         from portage_localization import _
96         from portage_update import dep_transform, fixdbentries, grab_updates, \
97                 parse_updates, update_config_files, update_dbentries
98
99         # Need these functions directly in portage namespace to not break every external tool in existence
100         from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101                 pkgsplit, vercmp, ververify
102
103         # endversion and endversion_keys are for backward compatibility only.
104         from portage_versions import endversion_keys
105         from portage_versions import suffix_value as endversion
106
107 except ImportError, e:
108         sys.stderr.write("\n\n")
109         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114         sys.stderr.write("!!! a recovery of portage.\n")
115         sys.stderr.write("    "+str(e)+"\n\n")
116         raise
117
118
119 try:
120         import portage_selinux as selinux
121 except OSError, e:
122         writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
123         del e
124 except ImportError:
125         pass
126
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
130
131
132 def load_mod(name):
133         modname = ".".join(name.split(".")[:-1])
134         mod = __import__(modname)
135         components = name.split('.')
136         for comp in components[1:]:
137                 mod = getattr(mod, comp)
138         return mod
139
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
141         for x in key_order:
142                 if top_dict.has_key(x) and top_dict[x].has_key(key):
143                         if FullCopy:
144                                 return copy.deepcopy(top_dict[x][key])
145                         else:
146                                 return top_dict[x][key]
147         if EmptyOnError:
148                 return ""
149         else:
150                 raise KeyError, "Key not found in list; '%s'" % key
151
152 def getcwd():
153         "this fixes situations where the current directory doesn't exist"
154         try:
155                 return os.getcwd()
156         except OSError: #dir doesn't exist
157                 os.chdir("/")
158                 return "/"
159 getcwd()
160
161 def abssymlink(symlink):
162         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163         mylink=os.readlink(symlink)
164         if mylink[0] != '/':
165                 mydir=os.path.dirname(symlink)
166                 mylink=mydir+"/"+mylink
167         return os.path.normpath(mylink)
168
169 dircache = {}
170 cacheHit=0
171 cacheMiss=0
172 cacheStale=0
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174         global cacheHit,cacheMiss,cacheStale
175         mypath = normalize_path(my_original_path)
176         if dircache.has_key(mypath):
177                 cacheHit += 1
178                 cached_mtime, list, ftype = dircache[mypath]
179         else:
180                 cacheMiss += 1
181                 cached_mtime, list, ftype = -1, [], []
182         try:
183                 pathstat = os.stat(mypath)
184                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185                         mtime = pathstat[stat.ST_MTIME]
186                 else:
187                         raise portage_exception.DirectoryNotFound(mypath)
188         except (IOError,OSError,portage_exception.PortageException):
189                 if EmptyOnError:
190                         return [], []
191                 return None, None
192         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193         if mtime != cached_mtime or time.time() - mtime < 4:
194                 if dircache.has_key(mypath):
195                         cacheStale += 1
196                 list = os.listdir(mypath)
197                 ftype = []
198                 for x in list:
199                         try:
200                                 if followSymlinks:
201                                         pathstat = os.stat(mypath+"/"+x)
202                                 else:
203                                         pathstat = os.lstat(mypath+"/"+x)
204
205                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
206                                         ftype.append(0)
207                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
208                                         ftype.append(1)
209                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
210                                         ftype.append(2)
211                                 else:
212                                         ftype.append(3)
213                         except (IOError, OSError):
214                                 ftype.append(3)
215                 dircache[mypath] = mtime, list, ftype
216
217         ret_list = []
218         ret_ftype = []
219         for x in range(0, len(list)):
220                 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221                         ret_list.append(list[x])
222                         ret_ftype.append(ftype[x])
223                 elif (list[x] not in ignorelist):
224                         ret_list.append(list[x])
225                         ret_ftype.append(ftype[x])
226
227         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228         return ret_list, ret_ftype
229
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231         EmptyOnError=False, dirsonly=False):
232         """
233         Portage-specific implementation of os.listdir
234
235         @param mypath: Path whose contents you wish to list
236         @type mypath: String
237         @param recursive: Recursively scan directories contained within mypath
238         @type recursive: Boolean
239         @param filesonly; Only return files, not more directories
240         @type filesonly: Boolean
241         @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242         @type ignorecvs: Boolean
243         @param ignorelist: List of filenames/directories to exclude
244         @type ignorelist: List
245         @param followSymlinks: Follow Symlink'd files and directories
246         @type followSymlinks: Boolean
247         @param EmptyOnError: Return [] if an error occurs.
248         @type EmptyOnError: Boolean
249         @param dirsonly: Only return directories.
250         @type dirsonly: Boolean
251         @rtype: List
252         @returns: A list of files and directories (or just files or just directories) or an empty list.
253         """
254
255         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
256
257         if list is None:
258                 list=[]
259         if ftype is None:
260                 ftype=[]
261
262         if not (filesonly or dirsonly or recursive):
263                 return list
264
265         if recursive:
266                 x=0
267                 while x<len(ftype):
268                         if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
270                                         followSymlinks)
271
272                                 l=l[:]
273                                 for y in range(0,len(l)):
274                                         l[y]=list[x]+"/"+l[y]
275                                 list=list+l
276                                 ftype=ftype+f
277                         x+=1
278         if filesonly:
279                 rlist=[]
280                 for x in range(0,len(ftype)):
281                         if ftype[x]==0:
282                                 rlist=rlist+[list[x]]
283         elif dirsonly:
284                 rlist = []
285                 for x in range(0, len(ftype)):
286                         if ftype[x] == 1:
287                                 rlist = rlist + [list[x]]       
288         else:
289                 rlist=list
290
291         return rlist
292
293 def flatten(mytokens):
294         """this function now turns a [1,[2,3]] list into
295         a [1,2,3] list and returns it."""
296         newlist=[]
297         for x in mytokens:
298                 if type(x)==types.ListType:
299                         newlist.extend(flatten(x))
300                 else:
301                         newlist.append(x)
302         return newlist
303
304 #beautiful directed graph object
305
306 class digraph:
307         def __init__(self):
308                 """Create an empty digraph"""
309                 
310                 # { node : ( { child : priority } , { parent : priority } ) }
311                 self.nodes = {}
312                 self.order = []
313
314         def add(self, node, parent, priority=0):
315                 """Adds the specified node with the specified parent.
316                 
317                 If the dep is a soft-dep and the node already has a hard
318                 relationship to the parent, the relationship is left as hard."""
319                 
320                 if node not in self.nodes:
321                         self.nodes[node] = ({}, {})
322                         self.order.append(node)
323                 
324                 if not parent:
325                         return
326                 
327                 if parent not in self.nodes:
328                         self.nodes[parent] = ({}, {})
329                         self.order.append(parent)
330                 
331                 if parent in self.nodes[node][1]:
332                         if priority > self.nodes[node][1][parent]:
333                                 self.nodes[node][1][parent] = priority
334                 else:
335                         self.nodes[node][1][parent] = priority
336                 
337                 if node in self.nodes[parent][0]:
338                         if priority > self.nodes[parent][0][node]:
339                                 self.nodes[parent][0][node] = priority
340                 else:
341                         self.nodes[parent][0][node] = priority
342
343         def remove(self, node):
344                 """Removes the specified node from the digraph, also removing
345                 and ties to other nodes in the digraph. Raises KeyError if the
346                 node doesn't exist."""
347                 
348                 if node not in self.nodes:
349                         raise KeyError(node)
350                 
351                 for parent in self.nodes[node][1]:
352                         del self.nodes[parent][0][node]
353                 for child in self.nodes[node][0]:
354                         del self.nodes[child][1][node]
355                 
356                 del self.nodes[node]
357                 self.order.remove(node)
358
359         def contains(self, node):
360                 """Checks if the digraph contains mynode"""
361                 return node in self.nodes
362
363         def all_nodes(self):
364                 """Return a list of all nodes in the graph"""
365                 return self.order[:]
366
367         def child_nodes(self, node, ignore_priority=None):
368                 """Return all children of the specified node"""
369                 if ignore_priority is None:
370                         return self.nodes[node][0].keys()
371                 children = []
372                 for child, priority in self.nodes[node][0].iteritems():
373                         if priority > ignore_priority:
374                                 children.append(child)
375                 return children
376
377         def parent_nodes(self, node):
378                 """Return all parents of the specified node"""
379                 return self.nodes[node][1].keys()
380
381         def leaf_nodes(self, ignore_priority=None):
382                 """Return all nodes that have no children
383                 
384                 If ignore_soft_deps is True, soft deps are not counted as
385                 children in calculations."""
386                 
387                 leaf_nodes = []
388                 for node in self.order:
389                         is_leaf_node = True
390                         for child in self.nodes[node][0]:
391                                 if self.nodes[node][0][child] > ignore_priority:
392                                         is_leaf_node = False
393                                         break
394                         if is_leaf_node:
395                                 leaf_nodes.append(node)
396                 return leaf_nodes
397
398         def root_nodes(self, ignore_priority=None):
399                 """Return all nodes that have no parents.
400                 
401                 If ignore_soft_deps is True, soft deps are not counted as
402                 parents in calculations."""
403                 
404                 root_nodes = []
405                 for node in self.order:
406                         is_root_node = True
407                         for parent in self.nodes[node][1]:
408                                 if self.nodes[node][1][parent] > ignore_priority:
409                                         is_root_node = False
410                                         break
411                         if is_root_node:
412                                 root_nodes.append(node)
413                 return root_nodes
414
415         def is_empty(self):
416                 """Checks if the digraph is empty"""
417                 return len(self.nodes) == 0
418
419         def clone(self):
420                 clone = digraph()
421                 clone.nodes = copy.deepcopy(self.nodes)
422                 clone.order = self.order[:]
423                 return clone
424
425         # Backward compatibility
426         addnode = add
427         allnodes = all_nodes
428         allzeros = leaf_nodes
429         hasnode = contains
430         empty = is_empty
431         copy = clone
432
433         def delnode(self, node):
434                 try:
435                         self.remove(node)
436                 except KeyError:
437                         pass
438
439         def firstzero(self):
440                 leaf_nodes = self.leaf_nodes()
441                 if leaf_nodes:
442                         return leaf_nodes[0]
443                 return None
444
445         def hasallzeros(self, ignore_priority=None):
446                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
447                         len(self.order)
448
449         def debug_print(self):
450                 for node in self.nodes:
451                         print node,
452                         if self.nodes[node][0]:
453                                 print "depends on"
454                         else:
455                                 print "(no children)"
456                         for child in self.nodes[node][0]:
457                                 print "  ",child,
458                                 print "(%s)" % self.nodes[node][0][child]
459
460
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463         mylogfiles = listdir(mysettings["T"]+"/logging/")
464         # shortcut for packages without any messages
465         if len(mylogfiles) == 0:
466                 return
467         # exploit listdir() file order so we process log entries in chronological order
468         mylogfiles.reverse()
469         mylogentries = {}
470         my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
471         for f in mylogfiles:
472                 msgfunction, msgtype = f.split(".")
473                 if msgtype.upper() not in my_elog_classes \
474                                 and msgtype.lower() not in my_elog_classes:
475                         continue
476                 if msgfunction not in portage_const.EBUILD_PHASES:
477                         writemsg("!!! can't process invalid log file: %s\n" % f,
478                                 noiselevel=-1)
479                         continue
480                 if not msgfunction in mylogentries:
481                         mylogentries[msgfunction] = []
482                 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483                 mylogentries[msgfunction].append((msgtype, msgcontent))
484
485         # in case the filters matched all messages
486         if len(mylogentries) == 0:
487                 return
488
489         # generate a single string with all log messages
490         fulllog = ""
491         for phase in portage_const.EBUILD_PHASES:
492                 if not phase in mylogentries:
493                         continue
494                 for msgtype,msgcontent in mylogentries[phase]:
495                         fulllog += "%s: %s\n" % (msgtype, phase)
496                         for line in msgcontent:
497                                 fulllog += line
498                         fulllog += "\n"
499
500         # pass the processing to the individual modules
501         logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
502         for s in logsystems:
503                 # - is nicer than _ for module names, so allow people to use it.
504                 s = s.replace("-", "_")
505                 try:
506                         # FIXME: ugly ad.hoc import code
507                         # TODO:  implement a common portage module loader
508                         logmodule = __import__("elog_modules.mod_"+s)
509                         m = getattr(logmodule, "mod_"+s)
510                         def timeout_handler(signum, frame):
511                                 raise portage_exception.PortageException(
512                                         "Timeout in elog_process for system '%s'" % s)
513                         import signal
514                         signal.signal(signal.SIGALRM, timeout_handler)
515                         # Timeout after one minute (in case something like the mail
516                         # module gets hung).
517                         signal.alarm(60)
518                         try:
519                                 m.process(mysettings, cpv, mylogentries, fulllog)
520                         finally:
521                                 signal.alarm(0)
522                         if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523                                 _elog_atexit_handlers.append(m.finalize)
524                                 atexit_register(m.finalize, mysettings)
525                 except (ImportError, AttributeError), e:
526                         writemsg("!!! Error while importing logging modules " + \
527                                 "while loading \"mod_%s\":\n" % str(s))
528                         writemsg("%s\n" % str(e), noiselevel=-1)
529                 except portage_exception.PortageException, e:
530                         writemsg("%s\n" % str(e), noiselevel=-1)
531
532         # clean logfiles to avoid repetitions
533         for f in mylogfiles:
534                 try:
535                         os.unlink(os.path.join(mysettings["T"], "logging", f))
536                 except OSError:
537                         pass
538
539 #parse /etc/env.d and generate /etc/profile.env
540
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542         if target_root is None:
543                 global root
544                 target_root = root
545         if prev_mtimes is None:
546                 global mtimedb
547                 prev_mtimes = mtimedb["ldpath"]
548         envd_dir = os.path.join(target_root, "etc", "env.d")
549         portage_util.ensure_dirs(envd_dir, mode=0755)
550         fns = listdir(envd_dir, EmptyOnError=1)
551         fns.sort()
552         templist = []
553         for x in fns:
554                 if len(x) < 3:
555                         continue
556                 if not x[0].isdigit() or not x[1].isdigit():
557                         continue
558                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
559                         continue
560                 templist.append(x)
561         fns = templist
562         del templist
563
564         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568                   "PYTHONPATH", "ROOTPATH"])
569
570         config_list = []
571
572         for x in fns:
573                 file_path = os.path.join(envd_dir, x)
574                 try:
575                         myconfig = getconfig(file_path, expand=False)
576                 except portage_exception.ParseError, e:
577                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
578                         del e
579                         continue
580                 if myconfig is None:
581                         # broken symlink or file removed by a concurrent process
582                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
583                         continue
584                 config_list.append(myconfig)
585                 if "SPACE_SEPARATED" in myconfig:
586                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
587                         del myconfig["SPACE_SEPARATED"]
588                 if "COLON_SEPARATED" in myconfig:
589                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
590                         del myconfig["COLON_SEPARATED"]
591
592         env = {}
593         specials = {}
594         for var in space_separated:
595                 mylist = []
596                 for myconfig in config_list:
597                         if var in myconfig:
598                                 mylist.extend(filter(None, myconfig[var].split()))
599                                 del myconfig[var] # prepare for env.update(myconfig)
600                 if mylist:
601                         env[var] = " ".join(mylist)
602                 specials[var] = mylist
603
604         for var in colon_separated:
605                 mylist = []
606                 for myconfig in config_list:
607                         if var in myconfig:
608                                 mylist.extend(filter(None, myconfig[var].split(":")))
609                                 del myconfig[var] # prepare for env.update(myconfig)
610                 if mylist:
611                         env[var] = ":".join(mylist)
612                 specials[var] = mylist
613
614         for myconfig in config_list:
615                 """Cumulative variables have already been deleted from myconfig so that
616                 they won't be overwritten by this dict.update call."""
617                 env.update(myconfig)
618
619         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
620         try:
621                 myld = open(ldsoconf_path)
622                 myldlines=myld.readlines()
623                 myld.close()
624                 oldld=[]
625                 for x in myldlines:
626                         #each line has at least one char (a newline)
627                         if x[0]=="#":
628                                 continue
629                         oldld.append(x[:-1])
630         except (IOError, OSError), e:
631                 if e.errno != errno.ENOENT:
632                         raise
633                 oldld = None
634
635         ld_cache_update=False
636
637         newld = specials["LDPATH"]
638         if (oldld!=newld):
639                 #ld.so.conf needs updating and ldconfig needs to be run
640                 myfd = atomic_ofstream(ldsoconf_path)
641                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642                 myfd.write("# contents of /etc/env.d directory\n")
643                 for x in specials["LDPATH"]:
644                         myfd.write(x+"\n")
645                 myfd.close()
646                 ld_cache_update=True
647
648         # Update prelink.conf if we are prelink-enabled
649         if prelink_capable:
650                 newprelink = atomic_ofstream(
651                         os.path.join(target_root, "etc", "prelink.conf"))
652                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653                 newprelink.write("# contents of /etc/env.d directory\n")
654
655                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656                         newprelink.write("-l "+x+"\n");
657                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
658                         if not x:
659                                 continue
660                         if x[-1]!='/':
661                                 x=x+"/"
662                         plmasked=0
663                         for y in specials["PRELINK_PATH_MASK"]:
664                                 if not y:
665                                         continue
666                                 if y[-1]!='/':
667                                         y=y+"/"
668                                 if y==x[0:len(y)]:
669                                         plmasked=1
670                                         break
671                         if not plmasked:
672                                 newprelink.write("-h "+x+"\n")
673                 for x in specials["PRELINK_PATH_MASK"]:
674                         newprelink.write("-b "+x+"\n")
675                 newprelink.close()
676
677         # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
678         # granularity is possible.  In order to avoid the potential ambiguity of
679         # mtimes that differ by less than 1 second, sleep here if any of the
680         # directories have been modified during the current second.
681         sleep_for_mtime_granularity = False
682         current_time = long(time.time())
683         mtime_changed = False
684         lib_dirs = set()
685         for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
686                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
687                 try:
688                         newldpathtime = long(os.stat(x).st_mtime)
689                         lib_dirs.add(normalize_path(x))
690                 except OSError, oe:
691                         if oe.errno == errno.ENOENT:
692                                 try:
693                                         del prev_mtimes[x]
694                                 except KeyError:
695                                         pass
696                                 # ignore this path because it doesn't exist
697                                 continue
698                         raise
699                 if newldpathtime == current_time:
700                         sleep_for_mtime_granularity = True
701                 if x in prev_mtimes:
702                         if prev_mtimes[x] == newldpathtime:
703                                 pass
704                         else:
705                                 prev_mtimes[x] = newldpathtime
706                                 mtime_changed = True
707                 else:
708                         prev_mtimes[x] = newldpathtime
709                         mtime_changed = True
710
711         if mtime_changed:
712                 ld_cache_update = True
713
714         if makelinks and \
715                 not ld_cache_update and \
716                 contents is not None:
717                 libdir_contents_changed = False
718                 for mypath, mydata in contents.iteritems():
719                         if mydata[0] not in ("obj","sym"):
720                                 continue
721                         head, tail = os.path.split(mypath)
722                         if head in lib_dirs:
723                                 libdir_contents_changed = True
724                                 break
725                 if not libdir_contents_changed:
726                         makelinks = False
727
728         # Only run ldconfig as needed
729         if (ld_cache_update or makelinks):
730                 # ldconfig has very different behaviour between FreeBSD and Linux
731                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
732                         # We can't update links if we haven't cleaned other versions first, as
733                         # an older package installed ON TOP of a newer version will cause ldconfig
734                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
735                         # we can safely create links.
736                         writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
737                         if makelinks:
738                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
739                         else:
740                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
741                 elif ostype in ("FreeBSD","DragonFly"):
742                         writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
743                         commands.getstatusoutput(
744                                 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
745                                 (target_root, target_root))
746
747         del specials["LDPATH"]
748
749         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
750         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
751         cenvnotice  = penvnotice[:]
752         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
753         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
754
755         #create /etc/profile.env for bash support
756         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
757         outfile.write(penvnotice)
758
759         env_keys = [ x for x in env if x != "LDPATH" ]
760         env_keys.sort()
761         for x in env_keys:
762                 outfile.write("export %s='%s'\n" % (x, env[x]))
763         outfile.close()
764
765         #create /etc/csh.env for (t)csh support
766         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
767         outfile.write(cenvnotice)
768         for x in env_keys:
769                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
770         outfile.close()
771
772         if sleep_for_mtime_granularity:
773                 while current_time == long(time.time()):
774                         sleep(1)
775
776 def ExtractKernelVersion(base_dir):
777         """
778         Try to figure out what kernel version we are running
779         @param base_dir: Path to sources (usually /usr/src/linux)
780         @type base_dir: string
781         @rtype: tuple( version[string], error[string])
782         @returns:
783         1. tuple( version[string], error[string])
784         Either version or error is populated (but never both)
785
786         """
787         lines = []
788         pathname = os.path.join(base_dir, 'Makefile')
789         try:
790                 f = open(pathname, 'r')
791         except OSError, details:
792                 return (None, str(details))
793         except IOError, details:
794                 return (None, str(details))
795
796         try:
797                 for i in range(4):
798                         lines.append(f.readline())
799         except OSError, details:
800                 return (None, str(details))
801         except IOError, details:
802                 return (None, str(details))
803
804         lines = [l.strip() for l in lines]
805
806         version = ''
807
808         #XXX: The following code relies on the ordering of vars within the Makefile
809         for line in lines:
810                 # split on the '=' then remove annoying whitespace
811                 items = line.split("=")
812                 items = [i.strip() for i in items]
813                 if items[0] == 'VERSION' or \
814                         items[0] == 'PATCHLEVEL':
815                         version += items[1]
816                         version += "."
817                 elif items[0] == 'SUBLEVEL':
818                         version += items[1]
819                 elif items[0] == 'EXTRAVERSION' and \
820                         items[-1] != items[0]:
821                         version += items[1]
822
823         # Grab a list of files named localversion* and sort them
824         localversions = os.listdir(base_dir)
825         for x in range(len(localversions)-1,-1,-1):
826                 if localversions[x][:12] != "localversion":
827                         del localversions[x]
828         localversions.sort()
829
830         # Append the contents of each to the version string, stripping ALL whitespace
831         for lv in localversions:
832                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
833
834         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
835         kernelconfig = getconfig(base_dir+"/.config")
836         if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
837                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
838
839         return (version,None)
840
841 def autouse(myvartree, use_cache=1, mysettings=None):
842         """
843         autuse returns a list of USE variables auto-enabled to packages being installed
844
845         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
846         @type myvartree: vartree
847         @param use_cache: read values from cache
848         @type use_cache: Boolean
849         @param mysettings: Instance of config
850         @type mysettings: config
851         @rtype: string
852         @returns: A string containing a list of USE variables that are enabled via use.defaults
853         """
854         if mysettings is None:
855                 global settings
856                 mysettings = settings
857         if mysettings.profile_path is None:
858                 return ""
859         myusevars=""
860         usedefaults = mysettings.use_defs
861         for myuse in usedefaults:
862                 dep_met = True
863                 for mydep in usedefaults[myuse]:
864                         if not myvartree.dep_match(mydep,use_cache=True):
865                                 dep_met = False
866                                 break
867                 if dep_met:
868                         myusevars += " "+myuse
869         return myusevars
870
871 def check_config_instance(test):
872         if not test or (str(test.__class__) != 'portage.config'):
873                 raise TypeError, "Invalid type for config object: %s" % test.__class__
874
875 class config:
876         """
877         This class encompasses the main portage configuration.  Data is pulled from
878         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
879         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
880         overrides.
881         
882         Generally if you need data like USE flags, FEATURES, environment variables,
883         virtuals ...etc you look in here.
884         """
885         
886         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
887                 config_incrementals=None, config_root=None, target_root=None,
888                 local_config=True):
889                 """
890                 @param clone: If provided, init will use deepcopy to copy by value the instance.
891                 @type clone: Instance of config class.
892                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
893                 and then calling instance.setcpv(mycpv).
894                 @type mycpv: String
895                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
896                 @type config_profile_path: String
897                 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
898                 @type config_incrementals: List
899                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
900                 @type config_root: String
901                 @param target_root: __init__ override of $ROOT env variable.
902                 @type target_root: String
903                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
904                 ignore local config (keywording and unmasking)
905                 @type local_config: Boolean
906                 """
907
908                 debug = os.environ.get("PORTAGE_DEBUG") == "1"
909
910                 self.already_in_regenerate = 0
911
912                 self.locked   = 0
913                 self.mycpv    = None
914                 self.puse     = []
915                 self.modifiedkeys = []
916                 self.uvlist = []
917
918                 self.virtuals = {}
919                 self.virts_p = {}
920                 self.dirVirtuals = None
921                 self.v_count  = 0
922
923                 # Virtuals obtained from the vartree
924                 self.treeVirtuals = {}
925                 # Virtuals by user specification. Includes negatives.
926                 self.userVirtuals = {}
927                 # Virtual negatives from user specifications.
928                 self.negVirtuals  = {}
929
930                 self.user_profile_dir = None
931                 self.local_config = local_config
932
933                 if clone:
934                         self.incrementals = copy.deepcopy(clone.incrementals)
935                         self.profile_path = copy.deepcopy(clone.profile_path)
936                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
937                         self.local_config = copy.deepcopy(clone.local_config)
938
939                         self.module_priority = copy.deepcopy(clone.module_priority)
940                         self.modules         = copy.deepcopy(clone.modules)
941
942                         self.depcachedir = copy.deepcopy(clone.depcachedir)
943
944                         self.packages = copy.deepcopy(clone.packages)
945                         self.virtuals = copy.deepcopy(clone.virtuals)
946
947                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
948                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
949                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
950
951                         self.use_defs = copy.deepcopy(clone.use_defs)
952                         self.usemask  = copy.deepcopy(clone.usemask)
953                         self.usemask_list = copy.deepcopy(clone.usemask_list)
954                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
955                         self.useforce      = copy.deepcopy(clone.useforce)
956                         self.useforce_list = copy.deepcopy(clone.useforce_list)
957                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
958                         self.puse     = copy.deepcopy(clone.puse)
959                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
960                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
961                         self.mycpv    = copy.deepcopy(clone.mycpv)
962
963                         self.configlist = copy.deepcopy(clone.configlist)
964                         self.lookuplist = self.configlist[:]
965                         self.lookuplist.reverse()
966                         self.configdict = {
967                                 "env.d":     self.configlist[0],
968                                 "pkginternal": self.configlist[1],
969                                 "globals":     self.configlist[2],
970                                 "defaults":    self.configlist[3],
971                                 "conf":        self.configlist[4],
972                                 "pkg":         self.configlist[5],
973                                 "auto":        self.configlist[6],
974                                 "backupenv":   self.configlist[7],
975                                 "env":         self.configlist[8] }
976                         self.profiles = copy.deepcopy(clone.profiles)
977                         self.backupenv  = self.configdict["backupenv"]
978                         self.pusedict   = copy.deepcopy(clone.pusedict)
979                         self.categories = copy.deepcopy(clone.categories)
980                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
981                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
982                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
983                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
984                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
985                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
986                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
987                         self.features = copy.deepcopy(clone.features)
988                 else:
989
990                         # backupenv is for calculated incremental variables.
991                         self.backupenv = os.environ.copy()
992                         if not local_config:
993                                 # Clean up pollution from portage_data so that it doesn't
994                                 # interfere with repoman.
995                                 self.backupenv.pop("USERLAND", None)
996
997                         def check_var_directory(varname, var):
998                                 if not os.path.isdir(var):
999                                         writemsg(("!!! Error: %s='%s' is not a directory. " + \
1000                                                 "Please correct this.\n") % (varname, var),
1001                                                 noiselevel=-1)
1002                                         raise portage_exception.DirectoryNotFound(var)
1003
1004                         if config_root is None:
1005                                 config_root = "/"
1006
1007                         config_root = normalize_path(os.path.abspath(
1008                                 config_root)).rstrip(os.path.sep) + os.path.sep
1009
1010                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
1011
1012                         self.depcachedir = DEPCACHE_PATH
1013
1014                         if not config_profile_path:
1015                                 config_profile_path = \
1016                                         os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1017                                 if os.path.isdir(config_profile_path):
1018                                         self.profile_path = config_profile_path
1019                                 else:
1020                                         self.profile_path = None
1021                         else:
1022                                 self.profile_path = config_profile_path[:]
1023
1024                         if not config_incrementals:
1025                                 writemsg("incrementals not specified to class config\n")
1026                                 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1027                         else:
1028                                 self.incrementals = copy.deepcopy(config_incrementals)
1029
1030                         self.module_priority    = ["user","default"]
1031                         self.modules            = {}
1032                         self.modules["user"] = getconfig(
1033                                 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1034                         if self.modules["user"] is None:
1035                                 self.modules["user"] = {}
1036                         self.modules["default"] = {
1037                                 "portdbapi.metadbmodule": "cache.metadata.database",
1038                                 "portdbapi.auxdbmodule":  "cache.flat_hash.database",
1039                         }
1040
1041                         self.usemask=[]
1042                         self.configlist=[]
1043
1044                         # back up our incremental variables:
1045                         self.configdict={}
1046                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1047                         self.configlist.append({})
1048                         self.configdict["env.d"] = self.configlist[-1]
1049
1050                         self.configlist.append({})
1051                         self.configdict["pkginternal"] = self.configlist[-1]
1052
1053                         # The symlink might not exist or might not be a symlink.
1054                         if self.profile_path is None:
1055                                 self.profiles = []
1056                         else:
1057                                 self.profiles = []
1058                                 def addProfile(currentPath):
1059                                         parentsFile = os.path.join(currentPath, "parent")
1060                                         if os.path.exists(parentsFile):
1061                                                 parents = grabfile(parentsFile)
1062                                                 if not parents:
1063                                                         raise portage_exception.ParseError(
1064                                                                 "Empty parent file: '%s'" % parents_file)
1065                                                 for parentPath in parents:
1066                                                         parentPath = normalize_path(os.path.join(
1067                                                                 currentPath, parentPath))
1068                                                         if os.path.exists(parentPath):
1069                                                                 addProfile(parentPath)
1070                                                         else:
1071                                                                 raise portage_exception.ParseError(
1072                                                                         "Parent '%s' not found: '%s'" %  \
1073                                                                         (parentPath, parentsFile))
1074                                         self.profiles.append(currentPath)
1075                                 addProfile(os.path.realpath(self.profile_path))
1076                         if local_config:
1077                                 custom_prof = os.path.join(
1078                                         config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1079                                 if os.path.exists(custom_prof):
1080                                         self.user_profile_dir = custom_prof
1081                                         self.profiles.append(custom_prof)
1082                                 del custom_prof
1083
1084                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1085                         self.packages      = stack_lists(self.packages_list, incremental=1)
1086                         del self.packages_list
1087                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1088
1089                         # revmaskdict
1090                         self.prevmaskdict={}
1091                         for x in self.packages:
1092                                 mycatpkg=dep_getkey(x)
1093                                 if not self.prevmaskdict.has_key(mycatpkg):
1094                                         self.prevmaskdict[mycatpkg]=[x]
1095                                 else:
1096                                         self.prevmaskdict[mycatpkg].append(x)
1097
1098                         # get profile-masked use flags -- INCREMENTAL Child over parent
1099                         self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1100                                 for x in self.profiles]
1101                         self.usemask  = set(stack_lists(
1102                                 self.usemask_list, incremental=True))
1103                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1104                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1105                         del use_defs_lists
1106
1107                         self.pusemask_list = []
1108                         rawpusemask = [grabdict_package(
1109                                 os.path.join(x, "package.use.mask")) \
1110                                 for x in self.profiles]
1111                         for i in xrange(len(self.profiles)):
1112                                 cpdict = {}
1113                                 for k, v in rawpusemask[i].iteritems():
1114                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1115                                 self.pusemask_list.append(cpdict)
1116                         del rawpusemask
1117
1118                         self.pkgprofileuse = []
1119                         rawprofileuse = [grabdict_package(
1120                                 os.path.join(x, "package.use"), juststrings=True) \
1121                                 for x in self.profiles]
1122                         for i in xrange(len(self.profiles)):
1123                                 cpdict = {}
1124                                 for k, v in rawprofileuse[i].iteritems():
1125                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1126                                 self.pkgprofileuse.append(cpdict)
1127                         del rawprofileuse
1128
1129                         self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1130                                 for x in self.profiles]
1131                         self.useforce  = set(stack_lists(
1132                                 self.useforce_list, incremental=True))
1133
1134                         self.puseforce_list = []
1135                         rawpuseforce = [grabdict_package(
1136                                 os.path.join(x, "package.use.force")) \
1137                                 for x in self.profiles]
1138                         for i in xrange(len(self.profiles)):
1139                                 cpdict = {}
1140                                 for k, v in rawpuseforce[i].iteritems():
1141                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1142                                 self.puseforce_list.append(cpdict)
1143                         del rawpuseforce
1144
1145                         try:
1146                                 self.mygcfg   = getconfig(os.path.join(config_root, "etc", "make.globals"))
1147
1148                                 if self.mygcfg is None:
1149                                         self.mygcfg = {}
1150                         except SystemExit, e:
1151                                 raise
1152                         except Exception, e:
1153                                 if debug:
1154                                         raise
1155                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1156                                 if not isinstance(e, EnvironmentError):
1157                                         writemsg("!!! Incorrect multiline literals can cause " + \
1158                                                 "this. Do not use them.\n", noiselevel=-1)
1159                                 sys.exit(1)
1160                         self.configlist.append(self.mygcfg)
1161                         self.configdict["globals"]=self.configlist[-1]
1162
1163                         self.make_defaults_use = []
1164                         self.mygcfg = {}
1165                         if self.profiles:
1166                                 try:
1167                                         mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1168                                         for cfg in mygcfg_dlists:
1169                                                 if cfg:
1170                                                         self.make_defaults_use.append(cfg.get("USE", ""))
1171                                                 else:
1172                                                         self.make_defaults_use.append("")
1173                                         self.mygcfg   = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1174                                         #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1175                                         if self.mygcfg is None:
1176                                                 self.mygcfg = {}
1177                                 except SystemExit, e:
1178                                         raise
1179                                 except Exception, e:
1180                                         if debug:
1181                                                 raise
1182                                         writemsg("!!! %s\n" % (e), noiselevel=-1)
1183                                         if not isinstance(e, EnvironmentError):
1184                                                 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1185                                                         "emerge sync' may fix this. If it does\n",
1186                                                         noiselevel=-1)
1187                                                 writemsg("!!! not then please report this to " + \
1188                                                         "bugs.gentoo.org and, if possible, a dev\n",
1189                                                                 noiselevel=-1)
1190                                                 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1191                                                         noiselevel=-1)
1192                                         sys.exit(1)
1193                         self.configlist.append(self.mygcfg)
1194                         self.configdict["defaults"]=self.configlist[-1]
1195
1196                         try:
1197                                 self.mygcfg = getconfig(
1198                                         os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1199                                         allow_sourcing=True)
1200                                 if self.mygcfg is None:
1201                                         self.mygcfg = {}
1202                         except SystemExit, e:
1203                                 raise
1204                         except Exception, e:
1205                                 if debug:
1206                                         raise
1207                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1208                                 if not isinstance(e, EnvironmentError):
1209                                         writemsg("!!! Incorrect multiline literals can cause " + \
1210                                                 "this. Do not use them.\n", noiselevel=-1)
1211                                 sys.exit(1)
1212
1213                         # Allow ROOT setting to come from make.conf if it's not overridden
1214                         # by the constructor argument (from the calling environment).  As a
1215                         # special exception for a very common use case, config_root == "/"
1216                         # implies that ROOT in make.conf should be ignored.  That way, the
1217                         # user can chroot into $ROOT and the ROOT setting in make.conf will
1218                         # be automatically ignored (unless config_root is other than "/").
1219                         if config_root != "/" and \
1220                                 target_root is None and "ROOT" in self.mygcfg:
1221                                 target_root = self.mygcfg["ROOT"]
1222                         
1223                         self.configlist.append(self.mygcfg)
1224                         self.configdict["conf"]=self.configlist[-1]
1225
1226                         self.configlist.append({})
1227                         self.configdict["pkg"]=self.configlist[-1]
1228
1229                         #auto-use:
1230                         self.configlist.append({})
1231                         self.configdict["auto"]=self.configlist[-1]
1232
1233                         self.configlist.append(self.backupenv) # XXX Why though?
1234                         self.configdict["backupenv"]=self.configlist[-1]
1235
1236                         self.configlist.append(os.environ.copy())
1237                         self.configdict["env"]=self.configlist[-1]
1238                         if not local_config:
1239                                 # Clean up pollution from portage_data so that it doesn't
1240                                 # interfere with repoman.
1241                                 self.configdict["env"].pop("USERLAND", None)
1242
1243                         # make lookuplist for loading package.*
1244                         self.lookuplist=self.configlist[:]
1245                         self.lookuplist.reverse()
1246
1247                         # Blacklist vars that could interfere with portage internals.
1248                         for blacklisted in "CATEGORY", "PKGUSE", "PORTAGE_CONFIGROOT", \
1249                                 "ROOT":
1250                                 for cfg in self.lookuplist:
1251                                         cfg.pop(blacklisted, None)
1252                         del blacklisted, cfg
1253
1254                         if target_root is None:
1255                                 target_root = "/"
1256
1257                         target_root = normalize_path(os.path.abspath(
1258                                 target_root)).rstrip(os.path.sep) + os.path.sep
1259
1260                         check_var_directory("ROOT", target_root)
1261
1262                         env_d = getconfig(
1263                                 os.path.join(target_root, "etc", "profile.env"), expand=False)
1264                         # env_d will be None if profile.env doesn't exist.
1265                         if env_d:
1266                                 self.configdict["env.d"].update(env_d)
1267                                 # Remove duplicate values so they don't override updated
1268                                 # profile.env values later (profile.env is reloaded in each
1269                                 # call to self.regenerate).
1270                                 for cfg in (self.configdict["backupenv"],
1271                                         self.configdict["env"]):
1272                                         for k, v in env_d.iteritems():
1273                                                 try:
1274                                                         if cfg[k] == v:
1275                                                                 del cfg[k]
1276                                                 except KeyError:
1277                                                         pass
1278                                 del cfg, k, v
1279
1280                         self["PORTAGE_CONFIGROOT"] = config_root
1281                         self.backup_changes("PORTAGE_CONFIGROOT")
1282                         self["ROOT"] = target_root
1283                         self.backup_changes("ROOT")
1284
1285                         self.pusedict = {}
1286                         self.pkeywordsdict = {}
1287                         self.punmaskdict = {}
1288                         abs_user_config = os.path.join(config_root,
1289                                 USER_CONFIG_PATH.lstrip(os.path.sep))
1290
1291                         # locations for "categories" and "arch.list" files
1292                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1293                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1294                         pmask_locations.extend(self.profiles)
1295
1296                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1297                         special cases are needed here."""
1298                         overlay_profiles = []
1299                         for ov in self["PORTDIR_OVERLAY"].split():
1300                                 ov = normalize_path(ov)
1301                                 profiles_dir = os.path.join(ov, "profiles")
1302                                 if os.path.isdir(profiles_dir):
1303                                         overlay_profiles.append(profiles_dir)
1304                         locations += overlay_profiles
1305                         
1306                         pmask_locations.extend(overlay_profiles)
1307
1308                         if local_config:
1309                                 locations.append(abs_user_config)
1310                                 pmask_locations.append(abs_user_config)
1311                                 pusedict = grabdict_package(
1312                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1313                                 for key in pusedict.keys():
1314                                         cp = dep_getkey(key)
1315                                         if not self.pusedict.has_key(cp):
1316                                                 self.pusedict[cp] = {}
1317                                         self.pusedict[cp][key] = pusedict[key]
1318
1319                                 #package.keywords
1320                                 pkgdict = grabdict_package(
1321                                         os.path.join(abs_user_config, "package.keywords"),
1322                                         recursive=1)
1323                                 for key in pkgdict.keys():
1324                                         # default to ~arch if no specific keyword is given
1325                                         if not pkgdict[key]:
1326                                                 mykeywordlist = []
1327                                                 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1328                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1329                                                 else:
1330                                                         groups = []
1331                                                 for keyword in groups:
1332                                                         if not keyword[0] in "~-":
1333                                                                 mykeywordlist.append("~"+keyword)
1334                                                 pkgdict[key] = mykeywordlist
1335                                         cp = dep_getkey(key)
1336                                         if not self.pkeywordsdict.has_key(cp):
1337                                                 self.pkeywordsdict[cp] = {}
1338                                         self.pkeywordsdict[cp][key] = pkgdict[key]
1339
1340                                 #package.unmask
1341                                 pkgunmasklines = grabfile_package(
1342                                         os.path.join(abs_user_config, "package.unmask"),
1343                                         recursive=1)
1344                                 for x in pkgunmasklines:
1345                                         mycatpkg=dep_getkey(x)
1346                                         if self.punmaskdict.has_key(mycatpkg):
1347                                                 self.punmaskdict[mycatpkg].append(x)
1348                                         else:
1349                                                 self.punmaskdict[mycatpkg]=[x]
1350
1351                         #getting categories from an external file now
1352                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1353                         self.categories = stack_lists(categories, incremental=1)
1354                         del categories
1355
1356                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1357                         archlist = stack_lists(archlist, incremental=1)
1358                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1359
1360                         #package.mask
1361                         pkgmasklines = []
1362                         for x in pmask_locations:
1363                                 pkgmasklines.append(grabfile_package(
1364                                         os.path.join(x, "package.mask"), recursive=1))
1365                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1366
1367                         self.pmaskdict = {}
1368                         for x in pkgmasklines:
1369                                 mycatpkg=dep_getkey(x)
1370                                 if self.pmaskdict.has_key(mycatpkg):
1371                                         self.pmaskdict[mycatpkg].append(x)
1372                                 else:
1373                                         self.pmaskdict[mycatpkg]=[x]
1374
1375                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1376                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1377                         has_invalid_data = False
1378                         for x in range(len(pkgprovidedlines)-1, -1, -1):
1379                                 myline = pkgprovidedlines[x]
1380                                 if not isvalidatom("=" + myline):
1381                                         writemsg("Invalid package name in package.provided:" + \
1382                                                 " %s\n" % myline, noiselevel=-1)
1383                                         has_invalid_data = True
1384                                         del pkgprovidedlines[x]
1385                                         continue
1386                                 cpvr = catpkgsplit(pkgprovidedlines[x])
1387                                 if not cpvr or cpvr[0] == "null":
1388                                         writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1389                                                 noiselevel=-1)
1390                                         has_invalid_data = True
1391                                         del pkgprovidedlines[x]
1392                                         continue
1393                                 if cpvr[0] == "virtual":
1394                                         writemsg("Virtual package in package.provided: %s\n" % \
1395                                                 myline, noiselevel=-1)
1396                                         has_invalid_data = True
1397                                         del pkgprovidedlines[x]
1398                                         continue
1399                         if has_invalid_data:
1400                                 writemsg("See portage(5) for correct package.provided usage.\n",
1401                                         noiselevel=-1)
1402                         self.pprovideddict = {}
1403                         for x in pkgprovidedlines:
1404                                 cpv=catpkgsplit(x)
1405                                 if not x:
1406                                         continue
1407                                 mycatpkg=dep_getkey(x)
1408                                 if self.pprovideddict.has_key(mycatpkg):
1409                                         self.pprovideddict[mycatpkg].append(x)
1410                                 else:
1411                                         self.pprovideddict[mycatpkg]=[x]
1412
1413                         # reasonable defaults; this is important as without USE_ORDER,
1414                         # USE will always be "" (nothing set)!
1415                         if "USE_ORDER" not in self:
1416                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1417
1418                         self["PORTAGE_GID"] = str(portage_gid)
1419                         self.backup_changes("PORTAGE_GID")
1420
1421                         if self.get("PORTAGE_DEPCACHEDIR", None):
1422                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1423                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1424                         self.backup_changes("PORTAGE_DEPCACHEDIR")
1425
1426                         overlays = self.get("PORTDIR_OVERLAY","").split()
1427                         if overlays:
1428                                 new_ov = []
1429                                 for ov in overlays:
1430                                         ov = normalize_path(ov)
1431                                         if os.path.isdir(ov):
1432                                                 new_ov.append(ov)
1433                                         else:
1434                                                 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1435                                                         " (not a dir): '%s'\n" % ov, noiselevel=-1)
1436                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1437                                 self.backup_changes("PORTDIR_OVERLAY")
1438
1439                         if "CBUILD" not in self and "CHOST" in self:
1440                                 self["CBUILD"] = self["CHOST"]
1441                                 self.backup_changes("CBUILD")
1442
1443                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1444                         self.backup_changes("PORTAGE_BIN_PATH")
1445                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1446                         self.backup_changes("PORTAGE_PYM_PATH")
1447
1448                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1449                                 try:
1450                                         self[var] = str(int(self.get(var, "0")))
1451                                 except ValueError:
1452                                         writemsg(("!!! %s='%s' is not a valid integer.  " + \
1453                                                 "Falling back to '0'.\n") % (var, self[var]),
1454                                                 noiselevel=-1)
1455                                         self[var] = "0"
1456                                 self.backup_changes(var)
1457
1458                         self.regenerate()
1459                         self.features = portage_util.unique_array(self["FEATURES"].split())
1460
1461                         if "gpg" in self.features:
1462                                 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1463                                         not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1464                                         writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1465                                                 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1466                                         self.features.remove("gpg")
1467
1468                         if not portage_exec.sandbox_capable and \
1469                                 ("sandbox" in self.features or "usersandbox" in self.features):
1470                                 if self.profile_path is not None and \
1471                                         os.path.realpath(self.profile_path) == \
1472                                         os.path.realpath(PROFILE_PATH):
1473                                         """ Don't show this warning when running repoman and the
1474                                         sandbox feature came from a profile that doesn't belong to
1475                                         the user."""
1476                                         writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1477                                                 " binary. Disabling...\n\n"), noiselevel=-1)
1478                                 if "sandbox" in self.features:
1479                                         self.features.remove("sandbox")
1480                                 if "usersandbox" in self.features:
1481                                         self.features.remove("usersandbox")
1482
1483                         self.features.sort()
1484                         self["FEATURES"] = " ".join(self.features)
1485                         self.backup_changes("FEATURES")
1486
1487                         self._init_dirs()
1488
1489                 if mycpv:
1490                         self.setcpv(mycpv)
1491
1492         def _init_dirs(self):
1493                 """
1494                 Create a few directories that are critical to portage operation
1495                 """
1496                 if not os.access(self["ROOT"], os.W_OK):
1497                         return
1498
1499                 dir_mode_map = {
1500                         "tmp"             :(-1,          01777, 0),
1501                         "var/tmp"         :(-1,          01777, 0),
1502                         "var/lib/portage" :(portage_gid, 02750, 02),
1503                         "var/cache/edb"   :(portage_gid,  0755, 02)
1504                 }
1505
1506                 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1507                         try:
1508                                 mydir = os.path.join(self["ROOT"], mypath)
1509                                 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1510                         except portage_exception.PortageException, e:
1511                                 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1512                                         noiselevel=-1)
1513                                 writemsg("!!! %s\n" % str(e),
1514                                         noiselevel=-1)
1515
1516         def validate(self):
1517                 """Validate miscellaneous settings and display warnings if necessary.
1518                 (This code was previously in the global scope of portage.py)"""
1519
1520                 groups = self["ACCEPT_KEYWORDS"].split()
1521                 archlist = self.archlist()
1522                 if not archlist:
1523                         writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1524                 else:
1525                         for group in groups:
1526                                 if group not in archlist and group[0] != '-':
1527                                         writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1528                                                 noiselevel=-1)
1529
1530                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1531                         PROFILE_PATH.lstrip(os.path.sep))
1532                 if not os.path.islink(abs_profile_path) and \
1533                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1534                         os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1535                         writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1536                                 noiselevel=-1)
1537                         writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1538                         writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1539
1540                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1541                         USER_VIRTUALS_FILE.lstrip(os.path.sep))
1542                 if os.path.exists(abs_user_virtuals):
1543                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1544                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1545                         writemsg("!!! this new location.\n\n")
1546
1547         def loadVirtuals(self,root):
1548                 """Not currently used by portage."""
1549                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1550                 self.getvirtuals(root)
1551
1552         def load_best_module(self,property_string):
1553                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1554                 try:
1555                         mod = load_mod(best_mod)
1556                 except ImportError:
1557                         dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1558                         sys.exit(1)
1559                 return mod
1560
1561         def lock(self):
1562                 self.locked = 1
1563
1564         def unlock(self):
1565                 self.locked = 0
1566
1567         def modifying(self):
1568                 if self.locked:
1569                         raise Exception, "Configuration is locked."
1570
1571         def backup_changes(self,key=None):
1572                 self.modifying()
1573                 if key and self.configdict["env"].has_key(key):
1574                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1575                 else:
1576                         raise KeyError, "No such key defined in environment: %s" % key
1577
1578         def reset(self,keeping_pkg=0,use_cache=1):
1579                 """
1580                 Restore environment from self.backupenv, call self.regenerate()
1581                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1582                 @type keeping_pkg: Boolean
1583                 @param use_cache: Should self.regenerate use the cache or not
1584                 @type use_cache: Boolean
1585                 @rype: None
1586                 """
1587                 self.modifying()
1588                 self.configdict["env"].clear()
1589                 self.configdict["env"].update(self.backupenv)
1590
1591                 self.modifiedkeys = []
1592                 if not keeping_pkg:
1593                         self.mycpv = None
1594                         self.puse = ""
1595                         self.configdict["pkg"].clear()
1596                         self.configdict["pkginternal"].clear()
1597                         self.configdict["defaults"]["USE"] = \
1598                                 " ".join(self.make_defaults_use)
1599                         self.usemask  = set(stack_lists(
1600                                 self.usemask_list, incremental=True))
1601                         self.useforce  = set(stack_lists(
1602                                 self.useforce_list, incremental=True))
1603                 self.regenerate(use_cache=use_cache)
1604
1605         def load_infodir(self,infodir):
1606                 self.modifying()
1607                 if self.configdict.has_key("pkg"):
1608                         for x in self.configdict["pkg"].keys():
1609                                 del self.configdict["pkg"][x]
1610                 else:
1611                         writemsg("No pkg setup for settings instance?\n",
1612                                 noiselevel=-1)
1613                         sys.exit(17)
1614
1615                 if os.path.exists(infodir):
1616                         if os.path.exists(infodir+"/environment"):
1617                                 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1618
1619                         myre = re.compile('^[A-Z]+$')
1620                         null_byte = "\0"
1621                         for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1622                                 if filename == "FEATURES":
1623                                         # FEATURES from the build host shouldn't be interpreted as
1624                                         # FEATURES on the client system.
1625                                         continue
1626                                 if myre.match(filename):
1627                                         try:
1628                                                 file_path = os.path.join(infodir, filename)
1629                                                 mydata = open(file_path).read().strip()
1630                                                 if len(mydata) < 2048 or filename == "USE":
1631                                                         if null_byte in mydata:
1632                                                                 writemsg("!!! Null byte found in metadata " + \
1633                                                                         "file: '%s'\n" % file_path, noiselevel=-1)
1634                                                                 continue
1635                                                         if filename == "USE":
1636                                                                 binpkg_flags = "-* " + mydata
1637                                                                 self.configdict["pkg"][filename] = binpkg_flags
1638                                                                 self.configdict["env"][filename] = mydata
1639                                                         else:
1640                                                                 self.configdict["pkg"][filename] = mydata
1641                                                                 self.configdict["env"][filename] = mydata
1642                                                 # CATEGORY is important because it's used in doebuild
1643                                                 # to infer the cpv.  If it's corrupted, it leads to
1644                                                 # strange errors later on, so we'll validate it and
1645                                                 # print a warning if necessary.
1646                                                 if filename == "CATEGORY":
1647                                                         matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1648                                                         if not matchobj or matchobj.start() != 0 or \
1649                                                                 matchobj.end() != len(mydata):
1650                                                                 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1651                                                                         os.path.join(infodir, filename), noiselevel=-1)
1652                                         except (OSError, IOError):
1653                                                 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1654                                                         noiselevel=-1)
1655                                                 pass
1656                         return 1
1657                 return 0
1658
1659         def setcpv(self, mycpv, use_cache=1, mydb=None):
1660                 """
1661                 Load a particular CPV into the config, this lets us see the
1662                 Default USE flags for a particular ebuild as well as the USE
1663                 flags from package.use.
1664
1665                 @param mycpv: A cpv to load
1666                 @type mycpv: string
1667                 @param use_cache: Enables caching
1668                 @type use_cache: Boolean
1669                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1670                 @type mydb: dbapi or derivative.
1671                 @rtype: None
1672                 """
1673
1674                 self.modifying()
1675                 if self.mycpv == mycpv:
1676                         return
1677                 has_changed = False
1678                 self.mycpv = mycpv
1679                 cp = dep_getkey(mycpv)
1680                 cpv_slot = self.mycpv
1681                 pkginternaluse = ""
1682                 if mydb:
1683                         slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"])
1684                         cpv_slot = "%s:%s" % (self.mycpv, slot)
1685                         pkginternaluse = []
1686                         for x in iuse.split():
1687                                 if x.startswith("+"):
1688                                         pkginternaluse.append(x[1:])
1689                                 elif x.startswith("-"):
1690                                         pkginternaluse.append(x)
1691                         pkginternaluse = " ".join(pkginternaluse)
1692                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1693                         self.configdict["pkginternal"]["USE"] = pkginternaluse
1694                         has_changed = True
1695                 defaults = []
1696                 for i in xrange(len(self.profiles)):
1697                         defaults.append(self.make_defaults_use[i])
1698                         cpdict = self.pkgprofileuse[i].get(cp, None)
1699                         if cpdict:
1700                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1701                                 if best_match:
1702                                         defaults.append(cpdict[best_match])
1703                 defaults = " ".join(defaults)
1704                 if defaults != self.configdict["defaults"].get("USE",""):
1705                         self.configdict["defaults"]["USE"] = defaults
1706                         has_changed = True
1707                 useforce = []
1708                 for i in xrange(len(self.profiles)):
1709                         useforce.append(self.useforce_list[i])
1710                         cpdict = self.puseforce_list[i].get(cp, None)
1711                         if cpdict:
1712                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1713                                 if best_match:
1714                                         useforce.append(cpdict[best_match])
1715                 useforce = set(stack_lists(useforce, incremental=True))
1716                 if useforce != self.useforce:
1717                         self.useforce = useforce
1718                         has_changed = True
1719                 usemask = []
1720                 for i in xrange(len(self.profiles)):
1721                         usemask.append(self.usemask_list[i])
1722                         cpdict = self.pusemask_list[i].get(cp, None)
1723                         if cpdict:
1724                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1725                                 if best_match:
1726                                         usemask.append(cpdict[best_match])
1727                 usemask = set(stack_lists(usemask, incremental=True))
1728                 if usemask != self.usemask:
1729                         self.usemask = usemask
1730                         has_changed = True
1731                 oldpuse = self.puse
1732                 self.puse = ""
1733                 cpdict = self.pusedict.get(cp)
1734                 if cpdict:
1735                         self.pusekey = best_match_to_list(cpv_slot, cpdict.keys())
1736                         if self.pusekey:
1737                                 self.puse = " ".join(cpdict[self.pusekey])
1738                 if oldpuse != self.puse:
1739                         has_changed = True
1740                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1741                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
1742                 # CATEGORY is essential for doebuild calls
1743                 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1744                 if has_changed:
1745                         self.reset(keeping_pkg=1,use_cache=use_cache)
1746
1747         def setinst(self,mycpv,mydbapi):
1748                 self.modifying()
1749                 if len(self.virtuals) == 0:
1750                         self.getvirtuals()
1751                 # Grab the virtuals this package provides and add them into the tree virtuals.
1752                 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1753                 if isinstance(mydbapi, portdbapi):
1754                         myuse = self["USE"]
1755                 else:
1756                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1757                 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1758
1759                 cp = dep_getkey(mycpv)
1760                 for virt in virts:
1761                         virt = dep_getkey(virt)
1762                         if not self.treeVirtuals.has_key(virt):
1763                                 self.treeVirtuals[virt] = []
1764                         # XXX: Is this bad? -- It's a permanent modification
1765                         if cp not in self.treeVirtuals[virt]:
1766                                 self.treeVirtuals[virt].append(cp)
1767
1768                 self.virtuals = self.__getvirtuals_compile()
1769
1770
1771         def regenerate(self,useonly=0,use_cache=1):
1772                 """
1773                 Regenerate settings
1774                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1775                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1776                 variables.  This also updates the env.d configdict; useful in case an ebuild
1777                 changes the environment.
1778
1779                 If FEATURES has already stacked, it is not stacked twice.
1780
1781                 @param useonly: Only regenerate USE flags (not any other incrementals)
1782                 @type useonly: Boolean
1783                 @param use_cache: Enable Caching (only for autouse)
1784                 @type use_cache: Boolean
1785                 @rtype: None
1786                 """
1787
1788                 self.modifying()
1789                 if self.already_in_regenerate:
1790                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1791                         writemsg("!!! Looping in regenerate.\n",1)
1792                         return
1793                 else:
1794                         self.already_in_regenerate = 1
1795
1796                 # We grab the latest profile.env here since it changes frequently.
1797                 self.configdict["env.d"].clear()
1798                 env_d = getconfig(
1799                         os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1800                 if env_d:
1801                         # env_d will be None if profile.env doesn't exist.
1802                         self.configdict["env.d"].update(env_d)
1803
1804                 if useonly:
1805                         myincrementals=["USE"]
1806                 else:
1807                         myincrementals = self.incrementals
1808                 myincrementals = set(myincrementals)
1809                 # If self.features exists, it has already been stacked and may have
1810                 # been mutated, so don't stack it again or else any mutations will be
1811                 # reverted.
1812                 if "FEATURES" in myincrementals and hasattr(self, "features"):
1813                         myincrementals.remove("FEATURES")
1814
1815                 if "USE" in myincrementals:
1816                         # Process USE last because it depends on USE_EXPAND which is also
1817                         # an incremental!
1818                         myincrementals.remove("USE")
1819
1820                 for mykey in myincrementals:
1821
1822                         mydbs=self.configlist[:-1]
1823
1824                         myflags=[]
1825                         for curdb in mydbs:
1826                                 if mykey not in curdb:
1827                                         continue
1828                                 #variables are already expanded
1829                                 mysplit = curdb[mykey].split()
1830
1831                                 for x in mysplit:
1832                                         if x=="-*":
1833                                                 # "-*" is a special "minus" var that means "unset all settings".
1834                                                 # so USE="-* gnome" will have *just* gnome enabled.
1835                                                 myflags = []
1836                                                 continue
1837
1838                                         if x[0]=="+":
1839                                                 # Not legal. People assume too much. Complain.
1840                                                 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1841                                                         noiselevel=-1)
1842                                                 x=x[1:]
1843                                                 if not x:
1844                                                         continue
1845
1846                                         if (x[0]=="-"):
1847                                                 if (x[1:] in myflags):
1848                                                         # Unset/Remove it.
1849                                                         del myflags[myflags.index(x[1:])]
1850                                                 continue
1851
1852                                         # We got here, so add it now.
1853                                         if x not in myflags:
1854                                                 myflags.append(x)
1855
1856                         myflags.sort()
1857                         #store setting in last element of configlist, the original environment:
1858                         if myflags or mykey in self:
1859                                 self.configlist[-1][mykey] = " ".join(myflags)
1860                         del myflags
1861
1862                 # Do the USE calculation last because it depends on USE_EXPAND.
1863                 if "auto" in self["USE_ORDER"].split(":"):
1864                         self.configdict["auto"]["USE"] = autouse(
1865                                 vartree(root=self["ROOT"], categories=self.categories,
1866                                         settings=self),
1867                                 use_cache=use_cache, mysettings=self)
1868                 else:
1869                         self.configdict["auto"]["USE"] = ""
1870
1871                 use_expand = self.get("USE_EXPAND", "").split()
1872
1873                 if not self.uvlist:
1874                         for x in self["USE_ORDER"].split(":"):
1875                                 if x in self.configdict:
1876                                         self.uvlist.append(self.configdict[x])
1877                         self.uvlist.reverse()
1878
1879                 myflags = set()
1880                 for curdb in self.uvlist:
1881                         cur_use_expand = [x for x in use_expand if x in curdb]
1882                         mysplit = curdb.get("USE", "").split()
1883                         if not mysplit and not cur_use_expand:
1884                                 continue
1885                         for x in mysplit:
1886                                 if x == "-*":
1887                                         myflags.clear()
1888                                         continue
1889
1890                                 if x[0] == "+":
1891                                         writemsg(colorize("BAD", "USE flags should not start " + \
1892                                                 "with a '+': %s\n" % x), noiselevel=-1)
1893                                         x = x[1:]
1894                                         if not x:
1895                                                 continue
1896
1897                                 if x[0] == "-":
1898                                         myflags.discard(x[1:])
1899                                         continue
1900
1901                                 myflags.add(x)
1902
1903                         for var in cur_use_expand:
1904                                 var_lower = var.lower()
1905                                 is_not_incremental = var not in myincrementals
1906                                 if is_not_incremental:
1907                                         prefix = var_lower + "_"
1908                                         for x in list(myflags):
1909                                                 if x.startswith(prefix):
1910                                                         myflags.remove(x)
1911                                 for x in curdb[var].split():
1912                                         if x[0] == "+":
1913                                                 if is_not_incremental:
1914                                                         writemsg(colorize("BAD", "Invalid '+' " + \
1915                                                                 "operator in non-incremental variable " + \
1916                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1917                                                         continue
1918                                                 else:
1919                                                         writemsg(colorize("BAD", "Invalid '+' " + \
1920                                                                 "operator in incremental variable " + \
1921                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1922                                                 x = x[1:]
1923                                         if x[0] == "-":
1924                                                 if is_not_incremental:
1925                                                         writemsg(colorize("BAD", "Invalid '-' " + \
1926                                                                 "operator in non-incremental variable " + \
1927                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1928                                                         continue
1929                                                 myflags.discard(var_lower + "_" + x[1:])
1930                                                 continue
1931                                         myflags.add(var_lower + "_" + x)
1932
1933                 myflags.update(self.useforce)
1934
1935                 # FEATURES=test should imply USE=test
1936                 if "test" in self.configlist[-1].get("FEATURES","").split():
1937                         myflags.add("test")
1938                         if self.get("EBUILD_FORCE_TEST") == "1":
1939                                 self.usemask.discard("test")
1940
1941                 usesplit = [ x for x in myflags if \
1942                         x not in self.usemask]
1943
1944                 usesplit.sort()
1945
1946                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1947                 # that they are consistent.
1948                 for var in use_expand:
1949                         prefix = var.lower() + "_"
1950                         prefix_len = len(prefix)
1951                         expand_flags = set([ x[prefix_len:] for x in usesplit \
1952                                 if x.startswith(prefix) ])
1953                         var_split = self.get(var, "").split()
1954                         # Preserve the order of var_split because it can matter for things
1955                         # like LINGUAS.
1956                         var_split = [ x for x in var_split if x in expand_flags ]
1957                         var_split.extend(expand_flags.difference(var_split))
1958                         if var_split or var in self:
1959                                 # Don't export empty USE_EXPAND vars unless the user config
1960                                 # exports them as empty.  This is required for vars such as
1961                                 # LINGUAS, where unset and empty have different meanings.
1962                                 self[var] = " ".join(var_split)
1963
1964                 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1965                 if self.configdict["defaults"].has_key("ARCH"):
1966                         if self.configdict["defaults"]["ARCH"]:
1967                                 if self.configdict["defaults"]["ARCH"] not in usesplit:
1968                                         usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1969
1970                 self.configlist[-1]["USE"]= " ".join(usesplit)
1971
1972                 self.already_in_regenerate = 0
1973
1974         def get_virts_p(self, myroot):
1975                 if self.virts_p:
1976                         return self.virts_p
1977                 virts = self.getvirtuals(myroot)
1978                 if virts:
1979                         myvkeys = virts.keys()
1980                         for x in myvkeys:
1981                                 vkeysplit = x.split("/")
1982                                 if not self.virts_p.has_key(vkeysplit[1]):
1983                                         self.virts_p[vkeysplit[1]] = virts[x]
1984                 return self.virts_p
1985
1986         def getvirtuals(self, myroot=None):
1987                 """myroot is now ignored because, due to caching, it has always been
1988                 broken for all but the first call."""
1989                 myroot = self["ROOT"]
1990                 if self.virtuals:
1991                         return self.virtuals
1992
1993                 virtuals_list = []
1994                 for x in self.profiles:
1995                         virtuals_file = os.path.join(x, "virtuals")
1996                         virtuals_dict = grabdict(virtuals_file)
1997                         for k in virtuals_dict.keys():
1998                                 if not isvalidatom(k) or dep_getkey(k) != k:
1999                                         writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2000                                                 (virtuals_file, k), noiselevel=-1)
2001                                         del virtuals_dict[k]
2002                                         continue
2003                                 myvalues = virtuals_dict[k]
2004                                 for x in myvalues:
2005                                         myatom = x
2006                                         if x.startswith("-"):
2007                                                 # allow incrementals
2008                                                 myatom = x[1:]
2009                                         if not isvalidatom(myatom):
2010                                                 writemsg("--- Invalid atom in %s: %s\n" % \
2011                                                         (virtuals_file, x), noiselevel=-1)
2012                                                 myvalues.remove(x)
2013                                 if not myvalues:
2014                                         del virtuals_dict[k]
2015                         if virtuals_dict:
2016                                 virtuals_list.append(virtuals_dict)
2017
2018                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2019                 del virtuals_list
2020
2021                 for virt in self.dirVirtuals:
2022                         # Preference for virtuals decreases from left to right.
2023                         self.dirVirtuals[virt].reverse()
2024
2025                 # Repoman does not use user or tree virtuals.
2026                 if self.local_config and not self.treeVirtuals:
2027                         temp_vartree = vartree(myroot, None,
2028                                 categories=self.categories, settings=self)
2029                         # Reduce the provides into a list by CP.
2030                         self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2031
2032                 self.virtuals = self.__getvirtuals_compile()
2033                 return self.virtuals
2034
2035         def __getvirtuals_compile(self):
2036                 """Stack installed and profile virtuals.  Preference for virtuals
2037                 decreases from left to right.
2038                 Order of preference:
2039                 1. installed and in profile
2040                 2. installed only
2041                 3. profile only
2042                 """
2043
2044                 # Virtuals by profile+tree preferences.
2045                 ptVirtuals   = {}
2046
2047                 for virt, installed_list in self.treeVirtuals.iteritems():
2048                         profile_list = self.dirVirtuals.get(virt, None)
2049                         if not profile_list:
2050                                 continue
2051                         for cp in installed_list:
2052                                 if cp in profile_list:
2053                                         ptVirtuals.setdefault(virt, [])
2054                                         ptVirtuals[virt].append(cp)
2055
2056                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2057                         self.dirVirtuals])
2058                 return virtuals
2059
2060         def __delitem__(self,mykey):
2061                 self.modifying()
2062                 for x in self.lookuplist:
2063                         if x != None:
2064                                 if mykey in x:
2065                                         del x[mykey]
2066
2067         def __getitem__(self,mykey):
2068                 match = ''
2069                 for x in self.lookuplist:
2070                         if x is None:
2071                                 writemsg("!!! lookuplist is null.\n")
2072                         elif x.has_key(mykey):
2073                                 match = x[mykey]
2074                                 break
2075                 return match
2076
2077         def has_key(self,mykey):
2078                 for x in self.lookuplist:
2079                         if x.has_key(mykey):
2080                                 return 1
2081                 return 0
2082
2083         def __contains__(self, mykey):
2084                 """Called to implement membership test operators (in and not in)."""
2085                 return bool(self.has_key(mykey))
2086
2087         def setdefault(self, k, x=None):
2088                 if k in self:
2089                         return self[k]
2090                 else:
2091                         self[k] = x
2092                         return x
2093
2094         def get(self, k, x=None):
2095                 if k in self:
2096                         return self[k]
2097                 else:
2098                         return x
2099
2100         def keys(self):
2101                 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2102
2103         def __setitem__(self,mykey,myvalue):
2104                 "set a value; will be thrown away at reset() time"
2105                 if type(myvalue) != types.StringType:
2106                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2107                 self.modifying()
2108                 self.modifiedkeys += [mykey]
2109                 self.configdict["env"][mykey]=myvalue
2110
2111         def environ(self):
2112                 "return our locally-maintained environment"
2113                 mydict={}
2114                 for x in self.keys():
2115                         myvalue = self[x]
2116                         if not isinstance(myvalue, basestring):
2117                                 writemsg("!!! Non-string value in config: %s=%s\n" % \
2118                                         (x, myvalue), noiselevel=-1)
2119                                 continue
2120                         mydict[x] = myvalue
2121                 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2122                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2123                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2124
2125                 return mydict
2126
2127         def thirdpartymirrors(self):
2128                 if getattr(self, "_thirdpartymirrors", None) is None:
2129                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2130                         for x in self["PORTDIR_OVERLAY"].split():
2131                                 profileroots.insert(0, os.path.join(x, "profiles"))
2132                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2133                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2134                 return self._thirdpartymirrors
2135
2136         def archlist(self):
2137                 return flatten([[myarch, "~" + myarch] \
2138                         for myarch in self["PORTAGE_ARCHLIST"].split()])
2139
2140         def selinux_enabled(self):
2141                 if getattr(self, "_selinux_enabled", None) is None:
2142                         self._selinux_enabled = 0
2143                         if "selinux" in self["USE"].split():
2144                                 if "selinux" in globals():
2145                                         if selinux.is_selinux_enabled() == 1:
2146                                                 self._selinux_enabled = 1
2147                                         else:
2148                                                 self._selinux_enabled = 0
2149                                 else:
2150                                         writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2151                                                 noiselevel=-1)
2152                                         self._selinux_enabled = 0
2153                         if self._selinux_enabled == 0:
2154                                 try:    
2155                                         del sys.modules["selinux"]
2156                                 except KeyError:
2157                                         pass
2158                 return self._selinux_enabled
2159
2160 # XXX This would be to replace getstatusoutput completely.
2161 # XXX Issue: cannot block execution. Deadlock condition.
2162 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2163         """
2164         Spawn a subprocess with extra portage-specific options.
2165         Optiosn include:
2166
2167         Sandbox: Sandbox means the spawned process will be limited in its ability t
2168         read and write files (normally this means it is restricted to ${IMAGE}/)
2169         SElinux Sandbox: Enables sandboxing on SElinux
2170         Reduced Privileges: Drops privilages such that the process runs as portage:portage
2171         instead of as root.
2172
2173         Notes: os.system cannot be used because it messes with signal handling.  Instead we
2174         use the portage_exec spawn* family of functions.
2175
2176         This function waits for the process to terminate.
2177
2178         @param mystring: Command to run
2179         @type mystring: String
2180         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2181         @type mysettings: Dictionary or config instance
2182         @param debug: Ignored
2183         @type debug: Boolean
2184         @param free: Enable sandboxing for this process
2185         @type free: Boolean
2186         @param droppriv: Drop to portage:portage when running this command
2187         @type droppriv: Boolean
2188         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2189         @type sesandbox: Boolean
2190         @param keywords: Extra options encoded as a dict, to be passed to spawn
2191         @type keywords: Dictionary
2192         @rtype: Integer
2193         @returns:
2194         1. The return code of the spawned process.
2195         """
2196
2197         if type(mysettings) == types.DictType:
2198                 env=mysettings
2199                 keywords["opt_name"]="[ %s ]" % "portage"
2200         else:
2201                 check_config_instance(mysettings)
2202                 env=mysettings.environ()
2203                 keywords["opt_name"]="[%s]" % mysettings["PF"]
2204
2205         # The default policy for the sesandbox domain only allows entry (via exec)
2206         # from shells and from binaries that belong to portage (the number of entry
2207         # points is minimized).  The "tee" binary is not among the allowed entry
2208         # points, so it is spawned outside of the sesandbox domain and reads from a
2209         # pipe between two domains.
2210         logfile = keywords.get("logfile")
2211         mypids = []
2212         pw = None
2213         if logfile:
2214                 del keywords["logfile"]
2215                 fd_pipes = keywords.get("fd_pipes")
2216                 if fd_pipes is None:
2217                         fd_pipes = {0:0, 1:1, 2:2}
2218                 elif 1 not in fd_pipes or 2 not in fd_pipes:
2219                         raise ValueError(fd_pipes)
2220                 pr, pw = os.pipe()
2221                 mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
2222                          returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
2223                 os.close(pr)
2224                 fd_pipes[1] = pw
2225                 fd_pipes[2] = pw
2226                 keywords["fd_pipes"] = fd_pipes
2227
2228         features = mysettings.features
2229         # XXX: Negative RESTRICT word
2230         droppriv=(droppriv and ("userpriv" in features) and not \
2231                 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2232                  ("userpriv" in mysettings["RESTRICT"].split())))
2233
2234         if droppriv and not uid and portage_gid and portage_uid:
2235                 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2236
2237         if not free:
2238                 free=((droppriv and "usersandbox" not in features) or \
2239                         (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2240
2241         if free:
2242                 keywords["opt_name"] += " bash"
2243                 spawn_func = portage_exec.spawn_bash
2244         else:
2245                 keywords["opt_name"] += " sandbox"
2246                 spawn_func = portage_exec.spawn_sandbox
2247
2248         if sesandbox:
2249                 con = selinux.getcontext()
2250                 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2251                 selinux.setexec(con)
2252
2253         returnpid = keywords.get("returnpid")
2254         keywords["returnpid"] = True
2255         try:
2256                 mypids.extend(spawn_func(mystring, env=env, **keywords))
2257         finally:
2258                 if pw:
2259                         os.close(pw)
2260                 if sesandbox:
2261                         selinux.setexec(None)
2262
2263         if returnpid:
2264                 return mypids
2265
2266         while mypids:
2267                 pid = mypids.pop(0)
2268                 retval = os.waitpid(pid, 0)[1]
2269                 portage_exec.spawned_pids.remove(pid)
2270                 if retval != os.EX_OK:
2271                         for pid in mypids:
2272                                 if os.waitpid(pid, os.WNOHANG) == (0,0):
2273                                         import signal
2274                                         os.kill(pid, signal.SIGTERM)
2275                                         os.waitpid(pid, 0)
2276                                 portage_exec.spawned_pids.remove(pid)
2277                         if retval & 0xff:
2278                                 return (retval & 0xff) << 8
2279                         return retval >> 8
2280         return os.EX_OK
2281
2282 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2283         "fetch files.  Will use digest file if available."
2284
2285         features = mysettings.features
2286         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2287         if ("mirror" in mysettings["RESTRICT"].split()) or \
2288            ("nomirror" in mysettings["RESTRICT"].split()):
2289                 if ("mirror" in features) and ("lmirror" not in features):
2290                         # lmirror should allow you to bypass mirror restrictions.
2291                         # XXX: This is not a good thing, and is temporary at best.
2292                         print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2293                         return 1
2294
2295         thirdpartymirrors = mysettings.thirdpartymirrors()
2296
2297         check_config_instance(mysettings)
2298
2299         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2300                 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2301
2302         mymirrors=[]
2303
2304         if listonly or ("distlocks" not in features):
2305                 use_locks = 0
2306
2307         fetch_to_ro = 0
2308         if "skiprocheck" in features:
2309                 fetch_to_ro = 1
2310
2311         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2312                 if use_locks:
2313                         writemsg(red("!!! For fetching to a read-only filesystem, " + \
2314                                 "locking should be turned off.\n"), noiselevel=-1)
2315                         writemsg("!!! This can be done by adding -distlocks to " + \
2316                                 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2317 #                       use_locks = 0
2318
2319         # local mirrors are always added
2320         if custommirrors.has_key("local"):
2321                 mymirrors += custommirrors["local"]
2322
2323         if ("nomirror" in mysettings["RESTRICT"].split()) or \
2324            ("mirror"   in mysettings["RESTRICT"].split()):
2325                 # We don't add any mirrors.
2326                 pass
2327         else:
2328                 if try_mirrors:
2329                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2330
2331         pkgdir = mysettings.get("O")
2332         if pkgdir:
2333                 mydigests = Manifest(
2334                         pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
2335         else:
2336                 # no digests because fetch was not called for a specific package
2337                 mydigests = {}
2338
2339         fsmirrors = []
2340         for x in range(len(mymirrors)-1,-1,-1):
2341                 if mymirrors[x] and mymirrors[x][0]=='/':
2342                         fsmirrors += [mymirrors[x]]
2343                         del mymirrors[x]
2344
2345         restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2346         custom_local_mirrors = custommirrors.get("local", [])
2347         if restrict_fetch:
2348                 # With fetch restriction, a normal uri may only be fetched from
2349                 # custom local mirrors (if available).  A mirror:// uri may also
2350                 # be fetched from specific mirrors (effectively overriding fetch
2351                 # restriction, but only for specific mirrors).
2352                 locations = custom_local_mirrors
2353         else:
2354                 locations = mymirrors
2355
2356         filedict={}
2357         primaryuri_indexes={}
2358         for myuri in myuris:
2359                 myfile=os.path.basename(myuri)
2360                 if not filedict.has_key(myfile):
2361                         filedict[myfile]=[]
2362                         for y in range(0,len(locations)):
2363                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2364                 if myuri[:9]=="mirror://":
2365                         eidx = myuri.find("/", 9)
2366                         if eidx != -1:
2367                                 mirrorname = myuri[9:eidx]
2368
2369                                 # Try user-defined mirrors first
2370                                 if custommirrors.has_key(mirrorname):
2371                                         for cmirr in custommirrors[mirrorname]:
2372                                                 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2373                                                 # remove the mirrors we tried from the list of official mirrors
2374                                                 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2375                                                         thirdpartymirrors[mirrorname].remove(cmirr)
2376                                 # now try the official mirrors
2377                                 if thirdpartymirrors.has_key(mirrorname):
2378                                         shuffle(thirdpartymirrors[mirrorname])
2379
2380                                         for locmirr in thirdpartymirrors[mirrorname]:
2381                                                 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2382
2383                                 if not filedict[myfile]:
2384                                         writemsg("No known mirror by the name: %s\n" % (mirrorname))
2385                         else:
2386                                 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2387                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
2388                 else:
2389                         if restrict_fetch:
2390                                 # Only fetch from specific mirrors is allowed.
2391                                 continue
2392                         if "primaryuri" in mysettings["RESTRICT"].split():
2393                                 # Use the source site first.
2394                                 if primaryuri_indexes.has_key(myfile):
2395                                         primaryuri_indexes[myfile] += 1
2396                                 else:
2397                                         primaryuri_indexes[myfile] = 0
2398                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2399                         else:
2400                                 filedict[myfile].append(myuri)
2401
2402         can_fetch=True
2403
2404         if listonly:
2405                 can_fetch = False
2406
2407         for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2408                 if not mysettings.get(var_name, None):
2409                         can_fetch = False
2410
2411         if can_fetch:
2412                 dirmode  = 02070
2413                 filemode =   060
2414                 modemask =    02
2415                 distdir_dirs = [""]
2416                 if "distlocks" in features:
2417                         distdir_dirs.append(".locks")
2418                 try:
2419                         
2420                         for x in distdir_dirs:
2421                                 mydir = os.path.join(mysettings["DISTDIR"], x)
2422                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2423                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2424                                                 noiselevel=-1)
2425                                         def onerror(e):
2426                                                 raise # bail out on the first error that occurs during recursion
2427                                         if not apply_recursive_permissions(mydir,
2428                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2429                                                 filemode=filemode, filemask=modemask, onerror=onerror):
2430                                                 raise portage_exception.OperationNotPermitted(
2431                                                         "Failed to apply recursive permissions for the portage group.")
2432                 except portage_exception.PortageException, e:
2433                         if not os.path.isdir(mysettings["DISTDIR"]):
2434                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2435                                 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2436                                 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2437
2438         if can_fetch and \
2439                 not fetch_to_ro and \
2440                 not os.access(mysettings["DISTDIR"], os.W_OK):
2441                 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2442                         noiselevel=-1)
2443                 can_fetch = False
2444
2445         if can_fetch and use_locks and locks_in_subdir:
2446                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2447                         if not os.access(distlocks_subdir, os.W_OK):
2448                                 writemsg("!!! No write access to write to %s.  Aborting.\n" % distlocks_subdir,
2449                                         noiselevel=-1)
2450                                 return 0
2451                         del distlocks_subdir
2452         for myfile in filedict.keys():
2453                 """
2454                 fetched  status
2455                 0        nonexistent
2456                 1        partially downloaded
2457                 2        completely downloaded
2458                 """
2459                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2460                 fetched=0
2461                 file_lock = None
2462                 if listonly:
2463                         writemsg_stdout("\n", noiselevel=-1)
2464                 else:
2465                         if use_locks and can_fetch:
2466                                 waiting_msg = None
2467                                 if "parallel-fetch" in features:
2468                                         waiting_msg = ("Downloading '%s'... " + \
2469                                                 "see /var/log/emerge-fetch.log for details.") % myfile
2470                                 if locks_in_subdir:
2471                                         file_lock = portage_locks.lockfile(
2472                                                 os.path.join(mysettings["DISTDIR"],
2473                                                 locks_in_subdir, myfile), wantnewlockfile=1,
2474                                                 waiting_msg=waiting_msg)
2475                                 else:
2476                                         file_lock = portage_locks.lockfile(
2477                                                 myfile_path, wantnewlockfile=1,
2478                                                 waiting_msg=waiting_msg)
2479                 try:
2480                         if not listonly:
2481                                 if fsmirrors and not os.path.exists(myfile_path):
2482                                         for mydir in fsmirrors:
2483                                                 mirror_file = os.path.join(mydir, myfile)
2484                                                 try:
2485                                                         shutil.copyfile(mirror_file, myfile_path)
2486                                                         writemsg(_("Local mirror has file:" + \
2487                                                                 " %(file)s\n" % {"file":myfile}))
2488                                                         break
2489                                                 except (IOError, OSError), e:
2490                                                         if e.errno != errno.ENOENT:
2491                                                                 raise
2492                                                         del e
2493
2494                                 try:
2495                                         mystat = os.stat(myfile_path)
2496                                 except OSError, e:
2497                                         if e.errno != errno.ENOENT:
2498                                                 raise
2499                                         del e
2500                                 else:
2501                                         try:
2502                                                 apply_secpass_permissions(
2503                                                         myfile_path, gid=portage_gid, mode=0664, mask=02,
2504                                                         stat_cached=mystat)
2505                                         except portage_exception.PortageException, e:
2506                                                 if not os.access(myfile_path, os.R_OK):
2507                                                         writemsg("!!! Failed to adjust permissions:" + \
2508                                                                 " %s\n" % str(e), noiselevel=-1)
2509                                         if myfile not in mydigests:
2510                                                 # We don't have a digest, but the file exists.  We must
2511                                                 # assume that it is fully downloaded.
2512                                                 continue
2513                                         else:
2514                                                 if mystat.st_size < mydigests[myfile]["size"] and \
2515                                                         not restrict_fetch:
2516                                                         fetched = 1 # Try to resume this download.
2517                                                 else:
2518                                                         verified_ok, reason = portage_checksum.verify_all(
2519                                                                 myfile_path, mydigests[myfile])
2520                                                         if not verified_ok:
2521                                                                 writemsg("!!! Previously fetched" + \
2522                                                                         " file: '%s'\n" % myfile, noiselevel=-1)
2523                                                                 writemsg("!!! Reason: %s\n" % reason[0],
2524                                                                         noiselevel=-1)
2525                                                                 writemsg(("!!! Got:      %s\n" + \
2526                                                                         "!!! Expected: %s\n") % \
2527                                                                         (reason[1], reason[2]), noiselevel=-1)
2528                                                                 if reason[0] == "Insufficient data for checksum verification":
2529                                                                         return 0
2530                                                                 if can_fetch and not restrict_fetch:
2531                                                                         writemsg("Refetching...\n\n",
2532                                                                                 noiselevel=-1)
2533                                                                         os.unlink(myfile_path)
2534                                                         else:
2535                                                                 eout = output.EOutput()
2536                                                                 eout.quiet = \
2537                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
2538                                                                 for digest_name in mydigests[myfile]:
2539                                                                         eout.ebegin(
2540                                                                                 "%s %s ;-)" % (myfile, digest_name))
2541                                                                         eout.eend(0)
2542                                                                 continue # fetch any remaining files
2543
2544                         for loc in filedict[myfile]:
2545                                 if listonly:
2546                                         writemsg_stdout(loc+" ", noiselevel=-1)
2547                                         continue
2548                                 # allow different fetchcommands per protocol
2549                                 protocol = loc[0:loc.find("://")]
2550                                 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2551                                         fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2552                                 else:
2553                                         fetchcommand=mysettings["FETCHCOMMAND"]
2554                                 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2555                                         resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2556                                 else:
2557                                         resumecommand=mysettings["RESUMECOMMAND"]
2558
2559                                 if not can_fetch:
2560                                         if fetched != 2:
2561                                                 if fetched == 0:
2562                                                         writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2563                                                                 noiselevel=-1)
2564                                                 else:
2565                                                         writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2566                                                                 noiselevel=-1)
2567                                                 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2568                                                         if not mysettings.get(var_name, None):
2569                                                                 writemsg(("!!! %s is unset.  It should " + \
2570                                                                 "have been defined in /etc/make.globals.\n") \
2571                                                                  % var_name, noiselevel=-1)
2572                                                 return 0
2573                                         else:
2574                                                 continue
2575
2576                                 if fetched != 2:
2577                                         #we either need to resume or start the download
2578                                         #you can't use "continue" when you're inside a "try" block
2579                                         if fetched==1:
2580                                                 #resume mode:
2581                                                 writemsg(">>> Resuming download...\n")
2582                                                 locfetch=resumecommand
2583                                         else:
2584                                                 #normal mode:
2585                                                 locfetch=fetchcommand
2586                                         writemsg_stdout(">>> Downloading '%s'\n" % \
2587                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2588                                         variables = {
2589                                                 "DISTDIR": mysettings["DISTDIR"],
2590                                                 "URI":     loc,
2591                                                 "FILE":    myfile
2592                                         }
2593                                         import shlex, StringIO
2594                                         lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
2595                                         lexer.whitespace_split = True
2596                                         myfetch = [varexpand(x, mydict=variables) for x in lexer]
2597
2598                                         spawn_keywords = {}
2599                                         if "userfetch" in mysettings.features and \
2600                                                 os.getuid() == 0 and portage_gid and portage_uid:
2601                                                 spawn_keywords.update({
2602                                                         "uid"    : portage_uid,
2603                                                         "gid"    : portage_gid,
2604                                                         "groups" : userpriv_groups,
2605                                                         "umask"  : 002})
2606
2607                                         try:
2608
2609                                                 if mysettings.selinux_enabled():
2610                                                         con = selinux.getcontext()
2611                                                         con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2612                                                         selinux.setexec(con)
2613                                                         # bash is an allowed entrypoint, while most binaries are not
2614                                                         myfetch = ["bash", "-c", "exec \"$@\"", myfetch[0]] + myfetch
2615
2616                                                 myret = portage_exec.spawn(myfetch,
2617                                                         env=mysettings.environ(), **spawn_keywords)
2618
2619                                                 if mysettings.selinux_enabled():
2620                                                         selinux.setexec(None)
2621
2622                                         finally:
2623                                                 try:
2624                                                         apply_secpass_permissions(myfile_path,
2625                                                                 gid=portage_gid, mode=0664, mask=02)
2626                                                 except portage_exception.FileNotFound, e:
2627                                                         pass
2628                                                 except portage_exception.PortageException, e:
2629                                                         if not os.access(myfile_path, os.R_OK):
2630                                                                 writemsg("!!! Failed to adjust permissions:" + \
2631                                                                         " %s\n" % str(e), noiselevel=-1)
2632
2633                                         if mydigests!=None and mydigests.has_key(myfile):
2634                                                 try:
2635                                                         mystat = os.stat(myfile_path)
2636                                                 except OSError, e:
2637                                                         if e.errno != errno.ENOENT:
2638                                                                 raise
2639                                                         del e
2640                                                         fetched = 0
2641                                                 else:
2642                                                         # no exception?  file exists. let digestcheck() report
2643                                                         # an appropriately for size or checksum errors
2644                                                         if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2645                                                                 # Fetch failed... Try the next one... Kill 404 files though.
2646                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2647                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2648                                                                         if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2649                                                                                 try:
2650                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2651                                                                                         writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2652                                                                                         fetched = 0
2653                                                                                         continue
2654                                                                                 except (IOError, OSError):
2655                                                                                         pass
2656                                                                 fetched = 1
2657                                                                 continue
2658                                                         if not fetchonly:
2659                                                                 fetched=2
2660                                                                 break
2661                                                         else:
2662                                                                 # File is the correct size--check the checksums for the fetched
2663                                                                 # file NOW, for those users who don't have a stable/continuous
2664                                                                 # net connection. This way we have a chance to try to download
2665                                                                 # from another mirror...
2666                                                                 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2667                                                                 if not verified_ok:
2668                                                                         print reason
2669                                                                         writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2670                                                                                 noiselevel=-1)
2671                                                                         writemsg("!!! Reason: "+reason[0]+"\n",
2672                                                                                 noiselevel=-1)
2673                                                                         writemsg("!!! Got:      %s\n!!! Expected: %s\n" % \
2674                                                                                 (reason[1], reason[2]), noiselevel=-1)
2675                                                                         if reason[0] == "Insufficient data for checksum verification":
2676                                                                                 return 0
2677                                                                         writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2678                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2679                                                                         fetched=0
2680                                                                 else:
2681                                                                         eout = output.EOutput()
2682                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2683                                                                         for x_key in mydigests[myfile].keys():
2684                                                                                 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2685                                                                                 eout.eend(0)
2686                                                                         fetched=2
2687                                                                         break
2688                                         else:
2689                                                 if not myret:
2690                                                         fetched=2
2691                                                         break
2692                                                 elif mydigests!=None:
2693                                                         writemsg("No digest file available and download failed.\n\n",
2694                                                                 noiselevel=-1)
2695                 finally:
2696                         if use_locks and file_lock:
2697                                 portage_locks.unlockfile(file_lock)
2698
2699                 if listonly:
2700                         writemsg_stdout("\n", noiselevel=-1)
2701                 if fetched != 2:
2702                         if restrict_fetch:
2703                                 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2704                                         mysettings["PF"], "has fetch restriction turned on."
2705                                 print "!!! This probably means that this " + \
2706                                         "ebuild's files must be downloaded"
2707                                 print "!!! manually.  See the comments in" + \
2708                                         " the ebuild for more information.\n"
2709                                 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2710                         elif listonly:
2711                                 continue
2712                         elif not filedict[myfile]:
2713                                 writemsg("Warning: No mirrors available for file" + \
2714                                         " '%s'\n" % (myfile), noiselevel=-1)
2715                         else:
2716                                 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2717                                         noiselevel=-1)
2718                         return 0
2719         return 1
2720
2721 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2722         """
2723         Generates a digest file if missing.  Assumes all files are available.
2724         DEPRECATED: this now only is a compability wrapper for 
2725                     portage_manifest.Manifest()
2726         NOTE: manifestonly and overwrite are useless with manifest2 and
2727               are therefore ignored."""
2728         if myportdb is None:
2729                 writemsg("Warning: myportdb not specified to digestgen\n")
2730                 global portdb
2731                 myportdb = portdb
2732         global _doebuild_manifest_exempt_depend
2733         try:
2734                 _doebuild_manifest_exempt_depend += 1
2735                 distfiles_map = {}
2736                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2737                 for cpv in fetchlist_dict:
2738                         try:
2739                                 for myfile in fetchlist_dict[cpv]:
2740                                         distfiles_map.setdefault(myfile, []).append(cpv)
2741                         except portage_exception.InvalidDependString, e:
2742                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2743                                 writemsg("!!! Invalid SRC_URI for '%s'.\n" % cpv, noiselevel=-1)
2744                                 del e
2745                                 return 0
2746                 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
2747                 manifest1_compat = not os.path.exists(
2748                         os.path.join(mytree, "manifest1_obsolete"))
2749                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2750                         fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
2751                 # Don't require all hashes since that can trigger excessive
2752                 # fetches when sufficient digests already exist.  To ease transition
2753                 # while Manifest 1 is being removed, only require hashes that will
2754                 # exist before and after the transition.
2755                 required_hash_types = set()
2756                 required_hash_types.add("size")
2757                 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
2758                 dist_hashes = mf.fhashdict.get("DIST", {})
2759                 missing_hashes = set()
2760                 for myfile in distfiles_map:
2761                         myhashes = dist_hashes.get(myfile)
2762                         if not myhashes:
2763                                 missing_hashes.add(myfile)
2764                                 continue
2765                         if required_hash_types.difference(myhashes):
2766                                 missing_hashes.add(myfile)
2767                 if missing_hashes:
2768                         missing_files = []
2769                         for myfile in missing_hashes:
2770                                 try:
2771                                         os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2772                                 except OSError, e:
2773                                         if e.errno != errno.ENOENT:
2774                                                 raise
2775                                         del e
2776                                         missing_files.append(myfile)
2777                         if missing_files:
2778                                 mytree = os.path.realpath(os.path.dirname(
2779                                         os.path.dirname(mysettings["O"])))
2780                                 fetch_settings = config(clone=mysettings)
2781                                 debug = mysettings.get("PORTAGE_DEBUG") == "1"
2782                                 for myfile in missing_files:
2783                                         success = False
2784                                         for cpv in distfiles_map[myfile]:
2785                                                 myebuild = os.path.join(mysettings["O"],
2786                                                         catsplit(cpv)[1] + ".ebuild")
2787                                                 # for RESTRICT=fetch, mirror, etc...
2788                                                 doebuild_environment(myebuild, "fetch",
2789                                                         mysettings["ROOT"], fetch_settings,
2790                                                         debug, 1, myportdb)
2791                                                 alluris, aalist = myportdb.getfetchlist(
2792                                                         cpv, mytree=mytree, all=True,
2793                                                         mysettings=fetch_settings)
2794                                                 myuris = [uri for uri in alluris \
2795                                                         if os.path.basename(uri) == myfile]
2796                                                 fetch_settings["A"] = myfile # for use by pkg_nofetch()
2797                                                 if fetch(myuris, fetch_settings):
2798                                                         success = True
2799                                                         break
2800                                         if not success:
2801                                                 writemsg(("!!! File %s doesn't exist, can't update " + \
2802                                                         "Manifest\n") % myfile, noiselevel=-1)
2803                                                 return 0
2804                 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2805                 try:
2806                         mf.create(requiredDistfiles=myarchives,
2807                                 assumeDistHashesSometimes=True,
2808                                 assumeDistHashesAlways=(
2809                                 "assume-digests" in mysettings.features))
2810                 except portage_exception.FileNotFound, e:
2811                         writemsg(("!!! File %s doesn't exist, can't update " + \
2812                                 "Manifest\n") % e, noiselevel=-1)
2813                         return 0
2814                 mf.write(sign=False)
2815                 if "assume-digests" not in mysettings.features:
2816                         distlist = mf.fhashdict.get("DIST", {}).keys()
2817                         distlist.sort()
2818                         auto_assumed = []
2819                         for filename in distlist:
2820                                 if not os.path.exists(
2821                                         os.path.join(mysettings["DISTDIR"], filename)):
2822                                         auto_assumed.append(filename)
2823                         if auto_assumed:
2824                                 mytree = os.path.realpath(
2825                                         os.path.dirname(os.path.dirname(mysettings["O"])))
2826                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2827                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
2828                                 pkgs.sort()
2829                                 writemsg_stdout("  digest.assumed" + output.colorize("WARN",
2830                                         str(len(auto_assumed)).rjust(18)) + "\n")
2831                                 for pkg_key in pkgs:
2832                                         fetchlist = myportdb.getfetchlist(pkg_key,
2833                                                 mysettings=mysettings, all=True, mytree=mytree)[1]
2834                                         pv = pkg_key.split("/")[1]
2835                                         for filename in auto_assumed:
2836                                                 if filename in fetchlist:
2837                                                         writemsg_stdout(
2838                                                                 "   digest-%s::%s\n" % (pv, filename))
2839                 return 1
2840         finally:
2841                 _doebuild_manifest_exempt_depend -= 1
2842
2843 def digestParseFile(myfilename, mysettings=None):
2844         """(filename) -- Parses a given file for entries matching:
2845         <checksumkey> <checksum_hex_string> <filename> <filesize>
2846         Ignores lines that don't start with a valid checksum identifier
2847         and returns a dict with the filenames as keys and {checksumkey:checksum}
2848         as the values.
2849         DEPRECATED: this function is now only a compability wrapper for
2850                     portage_manifest.Manifest()."""
2851
2852         mysplit = myfilename.split(os.sep)
2853         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2854                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2855         elif mysplit[-1] == "Manifest":
2856                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2857
2858         if mysettings is None:
2859                 global settings
2860                 mysettings = config(clone=settings)
2861
2862         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2863
2864 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2865         """Verifies checksums.  Assumes all files have been downloaded.
2866         DEPRECATED: this is now only a compability wrapper for 
2867                     portage_manifest.Manifest()."""
2868         if not strict:
2869                 return 1
2870         pkgdir = mysettings["O"]
2871         manifest_path = os.path.join(pkgdir, "Manifest")
2872         if not os.path.exists(manifest_path):
2873                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2874                         noiselevel=-1)
2875                 if strict:
2876                         return 0
2877         mf = Manifest(pkgdir, mysettings["DISTDIR"])
2878         eout = output.EOutput()
2879         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2880         try:
2881                 eout.ebegin("checking ebuild checksums ;-)")
2882                 mf.checkTypeHashes("EBUILD")
2883                 eout.eend(0)
2884                 eout.ebegin("checking auxfile checksums ;-)")
2885                 mf.checkTypeHashes("AUX")
2886                 eout.eend(0)
2887                 eout.ebegin("checking miscfile checksums ;-)")
2888                 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2889                 eout.eend(0)
2890                 for f in myfiles:
2891                         eout.ebegin("checking %s ;-)" % f)
2892                         mf.checkFileHashes(mf.findFile(f), f)
2893                         eout.eend(0)
2894         except KeyError, e:
2895                 eout.eend(1)
2896                 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2897                 return 0
2898         except portage_exception.FileNotFound, e:
2899                 eout.eend(1)
2900                 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2901                         noiselevel=-1)
2902                 return 0
2903         except portage_exception.DigestException, e:
2904                 eout.eend(1)
2905                 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2906                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2907                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2908                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2909                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2910                 return 0
2911         # Make sure that all of the ebuilds are actually listed in the Manifest.
2912         for f in os.listdir(pkgdir):
2913                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2914                         writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2915                                 os.path.join(pkgdir, f), noiselevel=-1)
2916                         return 0
2917         """ epatch will just grab all the patches out of a directory, so we have to
2918         make sure there aren't any foreign files that it might grab."""
2919         filesdir = os.path.join(pkgdir, "files")
2920         for parent, dirs, files in os.walk(filesdir):
2921                 for d in dirs:
2922                         if d.startswith(".") or d == "CVS":
2923                                 dirs.remove(d)
2924                 for f in files:
2925                         if f.startswith("."):
2926                                 continue
2927                         f = os.path.join(parent, f)[len(filesdir) + 1:]
2928                         file_type = mf.findFile(f)
2929                         if file_type != "AUX" and not f.startswith("digest-"):
2930                                 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2931                                         os.path.join(filesdir, f), noiselevel=-1)
2932                                 return 0
2933         return 1
2934
2935 # parse actionmap to spawn ebuild with the appropriate args
2936 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2937         if alwaysdep or "noauto" not in mysettings.features:
2938                 # process dependency first
2939                 if "dep" in actionmap[mydo].keys():
2940                         retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2941                         if retval:
2942                                 return retval
2943         kwargs = actionmap[mydo]["args"]
2944         mysettings["EBUILD_PHASE"] = mydo
2945         phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2946         mysettings["EBUILD_PHASE"] = ""
2947
2948         if not kwargs["droppriv"] and secpass >= 2:
2949                 """ Privileged phases may have left files that need to be made
2950                 writable to a less privileged user."""
2951                 apply_recursive_permissions(mysettings["T"],
2952                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2953                         filemode=060, filemask=0)
2954
2955         if phase_retval == os.EX_OK:
2956                 if mydo == "install":
2957                         # User and group bits that match the "portage" user or group are
2958                         # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2959                         # necessary.  The chown system call may clear S_ISUID and S_ISGID
2960                         # bits, so those bits are restored if necessary.
2961                         inst_uid = int(mysettings["PORTAGE_INST_UID"])
2962                         inst_gid = int(mysettings["PORTAGE_INST_GID"])
2963                         for parent, dirs, files in os.walk(mysettings["D"]):
2964                                 for fname in chain(dirs, files):
2965                                         fpath = os.path.join(parent, fname)
2966                                         mystat = os.lstat(fpath)
2967                                         if mystat.st_uid != portage_uid and \
2968                                                 mystat.st_gid != portage_gid:
2969                                                 continue
2970                                         myuid = -1
2971                                         mygid = -1
2972                                         if mystat.st_uid == portage_uid:
2973                                                 myuid = inst_uid
2974                                         if mystat.st_gid == portage_gid:
2975                                                 mygid = inst_gid
2976                                         apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2977                                                 mode=mystat.st_mode, stat_cached=mystat,
2978                                                 follow_links=False)
2979                         mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2980                         qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2981                         if qa_retval:
2982                                 writemsg("!!! install_qa_check failed; exiting.\n",
2983                                         noiselevel=-1)
2984                         return qa_retval
2985         return phase_retval
2986
2987
2988 def eapi_is_supported(eapi):
2989         return str(eapi).strip() == str(portage_const.EAPI).strip()
2990
2991 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2992
2993         ebuild_path = os.path.abspath(myebuild)
2994         pkg_dir     = os.path.dirname(ebuild_path)
2995
2996         if mysettings.configdict["pkg"].has_key("CATEGORY"):
2997                 cat = mysettings.configdict["pkg"]["CATEGORY"]
2998         else:
2999                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
3000         mypv = os.path.basename(ebuild_path)[:-7]       
3001         mycpv = cat+"/"+mypv
3002         mysplit=pkgsplit(mypv,silent=0)
3003         if mysplit is None:
3004                 raise portage_exception.IncorrectParameter(
3005                         "Invalid ebuild path: '%s'" % myebuild)
3006
3007         if mydo != "depend":
3008                 """For performance reasons, setcpv only triggers reset when it
3009                 detects a package-specific change in config.  For the ebuild
3010                 environment, a reset call is forced in order to ensure that the
3011                 latest env.d variables are used."""
3012                 mysettings.reset(use_cache=use_cache)
3013                 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
3014
3015         mysettings["EBUILD_PHASE"] = mydo
3016
3017         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
3018
3019         # We are disabling user-specific bashrc files.
3020         mysettings["BASH_ENV"] = INVALID_ENV_FILE
3021
3022         if debug: # Otherwise it overrides emerge's settings.
3023                 # We have no other way to set debug... debug can't be passed in
3024                 # due to how it's coded... Don't overwrite this so we can use it.
3025                 mysettings["PORTAGE_DEBUG"] = "1"
3026
3027         mysettings["ROOT"]     = myroot
3028         mysettings["STARTDIR"] = getcwd()
3029
3030         mysettings["EBUILD"]   = ebuild_path
3031         mysettings["O"]        = pkg_dir
3032         mysettings.configdict["pkg"]["CATEGORY"] = cat
3033         mysettings["FILESDIR"] = pkg_dir+"/files"
3034         mysettings["PF"]       = mypv
3035
3036         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
3037         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
3038
3039         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
3040         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
3041         mysettings["PN"] = mysplit[0]
3042         mysettings["PV"] = mysplit[1]
3043         mysettings["PR"] = mysplit[2]
3044
3045         if portage_util.noiselimit < 0:
3046                 mysettings["PORTAGE_QUIET"] = "1"
3047
3048         if mydo != "depend":
3049                 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"]  = \
3050                         mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
3051                 if not eapi_is_supported(eapi):
3052                         # can't do anything with this.
3053                         raise portage_exception.UnsupportedAPIException(mycpv, eapi)
3054                 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
3055                         portage_dep.use_reduce(portage_dep.paren_reduce(
3056                         mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
3057
3058         if mysplit[2] == "r0":
3059                 mysettings["PVR"]=mysplit[1]
3060         else:
3061                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
3062
3063         if mysettings.has_key("PATH"):
3064                 mysplit=mysettings["PATH"].split(":")
3065         else:
3066                 mysplit=[]
3067         if PORTAGE_BIN_PATH not in mysplit:
3068                 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
3069
3070         # Sandbox needs cannonical paths.
3071         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
3072                 mysettings["PORTAGE_TMPDIR"])
3073         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
3074         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
3075         
3076         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
3077         # locations in order to prevent interference.
3078         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
3079                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3080                         mysettings["PKG_TMPDIR"],
3081                         mysettings["CATEGORY"], mysettings["PF"])
3082         else:
3083                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3084                         mysettings["BUILD_PREFIX"],
3085                         mysettings["CATEGORY"], mysettings["PF"])
3086
3087         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
3088         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
3089         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
3090         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
3091
3092         mysettings["PORTAGE_BASHRC"] = os.path.join(
3093                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
3094
3095         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
3096         if (mydo!="depend") or not mysettings.has_key("KV"):
3097                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
3098                 if mykv:
3099                         # Regular source tree
3100                         mysettings["KV"]=mykv
3101                 else:
3102                         mysettings["KV"]=""
3103
3104         # Allow color.map to control colors associated with einfo, ewarn, etc...
3105         mycolors = []
3106         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
3107                 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
3108         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
3109
3110 def prepare_build_dirs(myroot, mysettings, cleanup):
3111
3112         clean_dirs = [mysettings["HOME"]]
3113
3114         # We enable cleanup when we want to make sure old cruft (such as the old
3115         # environment) doesn't interfere with the current phase.
3116         if cleanup:
3117                 clean_dirs.append(mysettings["T"])
3118
3119         for clean_dir in clean_dirs:
3120                 try:
3121                         shutil.rmtree(clean_dir)
3122                 except OSError, oe:
3123                         if errno.ENOENT == oe.errno:
3124                                 pass
3125                         elif errno.EPERM == oe.errno:
3126                                 writemsg("%s\n" % oe, noiselevel=-1)
3127                                 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3128                                         clean_dir, noiselevel=-1)
3129                                 return 1
3130                         else:
3131                                 raise
3132
3133         def makedirs(dir_path):
3134                 try:
3135                         os.makedirs(dir_path)
3136                 except OSError, oe:
3137                         if errno.EEXIST == oe.errno:
3138                                 pass
3139                         elif errno.EPERM == oe.errno:
3140                                 writemsg("%s\n" % oe, noiselevel=-1)
3141                                 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3142                                         dir_path, noiselevel=-1)
3143                                 return False
3144                         else:
3145                                 raise
3146                 return True
3147
3148         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3149
3150         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3151         mydirs.append(os.path.dirname(mydirs[-1]))
3152
3153         try:
3154                 for mydir in mydirs:
3155                         portage_util.ensure_dirs(mydir)
3156                         portage_util.apply_secpass_permissions(mydir,
3157                                 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3158                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3159                         """These directories don't necessarily need to be group writable.
3160                         However, the setup phase is commonly run as a privileged user prior
3161                         to the other phases being run by an unprivileged user.  Currently,
3162                         we use the portage group to ensure that the unprivleged user still
3163                         has write access to these directories in any case."""
3164                         portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3165                         portage_util.apply_secpass_permissions(mysettings[dir_key],
3166                                 uid=portage_uid, gid=portage_gid)
3167         except portage_exception.PermissionDenied, e:
3168                 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3169                 return 1
3170         except portage_exception.OperationNotPermitted, e:
3171                 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3172                 return 1
3173         except portage_exception.FileNotFound, e:
3174                 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3175                 return 1
3176
3177         features_dirs = {
3178                 "ccache":{
3179                         "basedir_var":"CCACHE_DIR",
3180                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3181                         "always_recurse":False},
3182                 "confcache":{
3183                         "basedir_var":"CONFCACHE_DIR",
3184                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3185                         "always_recurse":False},
3186                 "distcc":{
3187                         "basedir_var":"DISTCC_DIR",
3188                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3189                         "subdirs":("lock", "state"),
3190                         "always_recurse":True}
3191         }
3192         dirmode  = 02070
3193         filemode =   060
3194         modemask =    02
3195         for myfeature, kwargs in features_dirs.iteritems():
3196                 if myfeature in mysettings.features:
3197                         basedir = mysettings[kwargs["basedir_var"]]
3198                         if basedir == "":
3199                                 basedir = kwargs["default_dir"]
3200                                 mysettings[kwargs["basedir_var"]] = basedir
3201                         try:
3202                                 mydirs = [mysettings[kwargs["basedir_var"]]]
3203                                 if "subdirs" in kwargs:
3204                                         for subdir in kwargs["subdirs"]:
3205                                                 mydirs.append(os.path.join(basedir, subdir))
3206                                 for mydir in mydirs:
3207                                         modified = portage_util.ensure_dirs(mydir)
3208                                         # Generally, we only want to apply permissions for
3209                                         # initial creation.  Otherwise, we don't know exactly what
3210                                         # permissions the user wants, so should leave them as-is.
3211                                         if modified or kwargs["always_recurse"]:
3212                                                 if modified:
3213                                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3214                                                                 noiselevel=-1)
3215                                                 def onerror(e):
3216                                                         raise   # The feature is disabled if a single error
3217                                                                         # occurs during permissions adjustment.
3218                                                 if not apply_recursive_permissions(mydir,
3219                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3220                                                 filemode=filemode, filemask=modemask, onerror=onerror):
3221                                                         raise portage_exception.OperationNotPermitted(
3222                                                                 "Failed to apply recursive permissions for the portage group.")
3223                         except portage_exception.PortageException, e:
3224                                 mysettings.features.remove(myfeature)
3225                                 mysettings["FEATURES"] = " ".join(mysettings.features)
3226                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3227                                 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3228                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
3229                                 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3230                                         noiselevel=-1)
3231                                 time.sleep(5)
3232
3233         workdir_mode = 0700
3234         try:
3235                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3236                 if mode.isdigit():
3237                         parsed_mode = int(mode, 8)
3238                 elif mode == "":
3239                         raise KeyError()
3240                 else:
3241                         raise ValueError()
3242                 if parsed_mode & 07777 != parsed_mode:
3243                         raise ValueError("Invalid file mode: %s" % mode)
3244                 else:
3245                         workdir_mode = parsed_mode
3246         except KeyError, e:
3247                 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3248         except ValueError, e:
3249                 if len(str(e)) > 0:
3250                         writemsg("%s\n" % e)
3251                 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3252                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3253         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3254         try:
3255                 apply_secpass_permissions(mysettings["WORKDIR"],
3256                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3257         except portage_exception.FileNotFound:
3258                 pass # ebuild.sh will create it
3259
3260         if mysettings.get("PORT_LOGDIR", "") == "":
3261                 while "PORT_LOGDIR" in mysettings:
3262                         del mysettings["PORT_LOGDIR"]
3263         if "PORT_LOGDIR" in mysettings:
3264                 try:
3265                         portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3266                                 uid=portage_uid, gid=portage_gid, mode=02770)
3267                 except portage_exception.PortageException, e:
3268                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3269                         writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3270                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
3271                         writemsg("!!! Disabling logging.\n", noiselevel=-1)
3272                         while "PORT_LOGDIR" in mysettings:
3273                                 del mysettings["PORT_LOGDIR"]
3274         if "PORT_LOGDIR" in mysettings:
3275                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3276                 if not os.path.exists(logid_path):
3277                         f = open(logid_path, "w")
3278                         f.close()
3279                         del f
3280                 logid_time = time.strftime("%Y%m%d-%H%M%S",
3281                         time.gmtime(os.stat(logid_path).st_mtime))
3282                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3283                         mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3284                         (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3285                 del logid_path, logid_time
3286         else:
3287                 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
3288                 # enabled since it is possible that local SELinux security policies
3289                 # do not allow ouput to be piped out of the sesandbox domain.
3290                 if not (mysettings.selinux_enabled() and \
3291                         "sesandbox" in mysettings.features):
3292                         mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3293                                 mysettings["T"], "build.log")
3294
3295 _doebuild_manifest_exempt_depend = 0
3296 _doebuild_manifest_checked = None
3297
3298 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3299         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3300         mydbapi=None, vartree=None, prev_mtimes=None):
3301         
3302         """
3303         Wrapper function that invokes specific ebuild phases through the spawning
3304         of ebuild.sh
3305         
3306         @param myebuild: name of the ebuild to invoke the phase on (CPV)
3307         @type myebuild: String
3308         @param mydo: Phase to run
3309         @type mydo: String
3310         @param myroot: $ROOT (usually '/', see man make.conf)
3311         @type myroot: String
3312         @param mysettings: Portage Configuration
3313         @type mysettings: instance of portage.config
3314         @param debug: Turns on various debug information (eg, debug for spawn)
3315         @type debug: Boolean
3316         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3317         @type listonly: Boolean
3318         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3319         @type fetchonly: Boolean
3320         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3321         @type cleanup: Boolean
3322         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3323         @type dbkey: Dict or String
3324         @param use_cache: Enables the cache
3325         @type use_cache: Boolean
3326         @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3327         @type fetchall: Boolean
3328         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3329         @type tree: String
3330         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3331         @type mydbapi: portdbapi instance
3332         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3333         @type vartree: vartree instance
3334         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3335         @type prev_mtimes: dictionary
3336         @rtype: Boolean
3337         @returns:
3338         1. 0 for success
3339         2. 1 for error
3340         
3341         Most errors have an accompanying error message.
3342         
3343         listonly and fetchonly are only really necessary for operations involving 'fetch'
3344         prev_mtimes are only necessary for merge operations.
3345         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3346         
3347         """
3348         
3349         if not tree:
3350                 writemsg("Warning: tree not specified to doebuild\n")
3351                 tree = "porttree"
3352         global db
3353         
3354         # chunked out deps for each phase, so that ebuild binary can use it 
3355         # to collapse targets down.
3356         actionmap_deps={
3357         "depend": [],
3358         "setup":  [],
3359         "unpack": ["setup"],
3360         "compile":["unpack"],
3361         "test":   ["compile"],
3362         "install":["test"],
3363         "rpm":    ["install"],
3364         "package":["install"],
3365         }
3366         
3367         if mydbapi is None:
3368                 mydbapi = db[myroot][tree].dbapi
3369
3370         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3371                 vartree = db[myroot]["vartree"]
3372
3373         features = mysettings.features
3374
3375         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3376                         "config","setup","depend","fetch","digest",
3377                         "unpack","compile","test","install","rpm","qmerge","merge",
3378                         "package","unmerge", "manifest"]
3379
3380         if mydo not in validcommands:
3381                 validcommands.sort()
3382                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3383                         noiselevel=-1)
3384                 for vcount in range(len(validcommands)):
3385                         if vcount%6 == 0:
3386                                 writemsg("\n!!! ", noiselevel=-1)
3387                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3388                 writemsg("\n", noiselevel=-1)
3389                 return 1
3390
3391         if not os.path.exists(myebuild):
3392                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3393                         noiselevel=-1)
3394                 return 1
3395
3396         global _doebuild_manifest_exempt_depend
3397
3398         if "strict" in features and \
3399                 "digest" not in features and \
3400                 tree == "porttree" and \
3401                 mydo not in ("digest", "manifest", "help") and \
3402                 not _doebuild_manifest_exempt_depend:
3403                 # Always verify the ebuild checksums before executing it.
3404                 pkgdir = os.path.dirname(myebuild)
3405                 manifest_path = os.path.join(pkgdir, "Manifest")
3406                 global _doebuild_manifest_checked
3407                 # Avoid checking the same Manifest several times in a row during a
3408                 # regen with an empty cache.
3409                 if _doebuild_manifest_checked != manifest_path:
3410                         if not os.path.exists(manifest_path):
3411                                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3412                                         noiselevel=-1)
3413                                 return 1
3414                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
3415                         try:
3416                                 mf.checkTypeHashes("EBUILD")
3417                         except portage_exception.FileNotFound, e:
3418                                 writemsg("!!! A file listed in the Manifest " + \
3419                                         "could not be found: %s\n" % str(e), noiselevel=-1)
3420                                 return 1
3421                         except portage_exception.DigestException, e:
3422                                 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3423                                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3424                                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3425                                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3426                                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3427                                 return 1
3428                         # Make sure that all of the ebuilds are actually listed in the
3429                         # Manifest.
3430                         for f in os.listdir(pkgdir):
3431                                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3432                                         writemsg("!!! A file is not listed in the " + \
3433                                         "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3434                                         noiselevel=-1)
3435                                         return 1
3436                         _doebuild_manifest_checked = manifest_path
3437
3438         logfile=None
3439         builddir_lock = None
3440         try:
3441                 if mydo in ("digest", "manifest", "help"):
3442                         # Temporarily exempt the depend phase from manifest checks, in case
3443                         # aux_get calls trigger cache generation.
3444                         _doebuild_manifest_exempt_depend += 1
3445
3446                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3447                         use_cache, mydbapi)
3448
3449                 # get possible slot information from the deps file
3450                 if mydo == "depend":
3451                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3452                         if isinstance(dbkey, dict):
3453                                 mysettings["dbkey"] = ""
3454                                 pr, pw = os.pipe()
3455                                 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3456                                 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3457                                         fd_pipes=fd_pipes, returnpid=True)
3458                                 os.close(pw) # belongs exclusively to the child process now
3459                                 maxbytes = 1024
3460                                 mybytes = []
3461                                 while True:
3462                                         mybytes.append(os.read(pr, maxbytes))
3463                                         if not mybytes[-1]:
3464                                                 break
3465                                 os.close(pr)
3466                                 mybytes = "".join(mybytes)
3467                                 global auxdbkeys
3468                                 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3469                                         dbkey[k] = v
3470                                 retval = os.waitpid(mypids[0], 0)[1]
3471                                 portage_exec.spawned_pids.remove(mypids[0])
3472                                 # If it got a signal, return the signal that was sent, but
3473                                 # shift in order to distinguish it from a return value. (just
3474                                 # like portage_exec.spawn() would do).
3475                                 if retval & 0xff:
3476                                         return (retval & 0xff) << 8
3477                                 # Otherwise, return its exit code.
3478                                 return retval >> 8
3479                         elif dbkey:
3480                                 mysettings["dbkey"] = dbkey
3481                         else:
3482                                 mysettings["dbkey"] = \
3483                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3484
3485                         return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3486
3487                 # Validate dependency metadata here to ensure that ebuilds with invalid
3488                 # data are never installed (even via the ebuild command).
3489                 invalid_dep_exempt_phases = \
3490                         set(["clean", "cleanrm", "help", "prerm", "postrm"])
3491                 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3492                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3493                 misc_keys = ["LICENSE", "PROVIDE", "RESTRICT", "SRC_URI"]
3494                 all_keys = dep_keys + misc_keys
3495                 metadata = dict(izip(all_keys, mydbapi.aux_get(mycpv, all_keys)))
3496                 class FakeTree(object):
3497                         def __init__(self, mydb):
3498                                 self.dbapi = mydb
3499                 dep_check_trees = {myroot:{}}
3500                 dep_check_trees[myroot]["porttree"] = \
3501                         FakeTree(fakedbapi(settings=mysettings))
3502                 for dep_type in dep_keys:
3503                         mycheck = dep_check(metadata[dep_type], None, mysettings,
3504                                 myuse="all", myroot=myroot, trees=dep_check_trees)
3505                         if not mycheck[0]:
3506                                 writemsg("%s: %s\n%s\n" % (
3507                                         dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3508                                 if mydo not in invalid_dep_exempt_phases:
3509                                         return 1
3510                         del dep_type, mycheck
3511                 for k in misc_keys:
3512                         try:
3513                                 portage_dep.use_reduce(
3514                                         portage_dep.paren_reduce(metadata[k]), matchall=True)
3515                         except portage_exception.InvalidDependString, e:
3516                                 writemsg("%s: %s\n%s\n" % (
3517                                         k, metadata[k], str(e)), noiselevel=-1)
3518                                 del e
3519                                 if mydo not in invalid_dep_exempt_phases:
3520                                         return 1
3521                         del k
3522                 del mycpv, dep_keys, metadata, misc_keys, FakeTree, dep_check_trees
3523
3524                 if "PORTAGE_TMPDIR" not in mysettings or \
3525                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3526                         writemsg("The directory specified in your " + \
3527                                 "PORTAGE_TMPDIR variable, '%s',\n" % \
3528                                 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3529                         writemsg("does not exist.  Please create this directory or " + \
3530                                 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3531                         return 1
3532
3533                 # Build directory creation isn't required for any of these.
3534                 if mydo not in ("digest", "fetch", "help", "manifest"):
3535                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3536                         if mystatus:
3537                                 return mystatus
3538                         # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3539                         logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3540                 if mydo == "unmerge":
3541                         return unmerge(mysettings["CATEGORY"],
3542                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
3543
3544                 # if any of these are being called, handle them -- running them out of
3545                 # the sandbox -- and stop now.
3546                 if mydo in ["clean","cleanrm"]:
3547                         return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3548                                 debug=debug, free=1, logfile=None)
3549                 elif mydo == "help":
3550                         return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3551                                 debug=debug, free=1, logfile=logfile)
3552                 elif mydo == "setup":
3553                         infodir = os.path.join(
3554                                 mysettings["PORTAGE_BUILDDIR"], "build-info")
3555                         if os.path.isdir(infodir):
3556                                 """Load USE flags for setup phase of a binary package.
3557                                 Ideally, the environment.bz2 would be used instead."""
3558                                 mysettings.load_infodir(infodir)
3559                         retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3560                                 debug=debug, free=1, logfile=logfile)
3561                         if secpass >= 2:
3562                                 """ Privileged phases may have left files that need to be made
3563                                 writable to a less privileged user."""
3564                                 apply_recursive_permissions(mysettings["T"],
3565                                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3566                                         filemode=060, filemask=0)
3567                         return retval
3568                 elif mydo == "preinst":
3569                         mysettings["IMAGE"] = mysettings["D"]
3570                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3571                                 mysettings, debug=debug, free=1, logfile=logfile)
3572                         if phase_retval == os.EX_OK:
3573                                 # Post phase logic and tasks that have been factored out of
3574                                 # ebuild.sh.
3575                                 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3576                                         "preinst_sfperms", "preinst_selinux_labels",
3577                                         "preinst_suid_scan"]
3578                                 mysettings["EBUILD_PHASE"] = ""
3579                                 phase_retval = spawn(" ".join(myargs),
3580                                         mysettings, debug=debug, free=1, logfile=logfile)
3581                                 if phase_retval != os.EX_OK:
3582                                         writemsg("!!! post preinst failed; exiting.\n",
3583                                                 noiselevel=-1)
3584                         del mysettings["IMAGE"]
3585                         return phase_retval
3586                 elif mydo == "postinst":
3587                         mysettings.load_infodir(mysettings["O"])
3588                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3589                                 mysettings, debug=debug, free=1, logfile=logfile)
3590                         if phase_retval == os.EX_OK:
3591                                 # Post phase logic and tasks that have been factored out of
3592                                 # ebuild.sh.
3593                                 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3594                                 mysettings["EBUILD_PHASE"] = ""
3595                                 phase_retval = spawn(" ".join(myargs),
3596                                         mysettings, debug=debug, free=1, logfile=logfile)
3597                                 if phase_retval != os.EX_OK:
3598                                         writemsg("!!! post postinst failed; exiting.\n",
3599                                                 noiselevel=-1)
3600                         return phase_retval
3601                 elif mydo in ["prerm","postrm","config"]:
3602                         mysettings.load_infodir(mysettings["O"])
3603                         return spawn(EBUILD_SH_BINARY + " " + mydo,
3604                                 mysettings, debug=debug, free=1, logfile=logfile)
3605
3606                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3607
3608                 # Make sure we get the correct tree in case there are overlays.
3609                 mytree = os.path.realpath(
3610                         os.path.dirname(os.path.dirname(mysettings["O"])))
3611                 try:
3612                         newuris, alist = mydbapi.getfetchlist(
3613                                 mycpv, mytree=mytree, mysettings=mysettings)
3614                         alluris, aalist = mydbapi.getfetchlist(
3615                                 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3616                 except portage_exception.InvalidDependString, e:
3617                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3618                         writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
3619                         del e
3620                         return 1
3621                 mysettings["A"] = " ".join(alist)
3622                 mysettings["AA"] = " ".join(aalist)
3623                 if ("mirror" in features) or fetchall:
3624                         fetchme = alluris[:]
3625                         checkme = aalist[:]
3626                 elif mydo == "digest":
3627                         fetchme = alluris[:]
3628                         checkme = aalist[:]
3629                         # Skip files that we already have digests for.
3630                         mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3631                         mydigests = mf.getTypeDigests("DIST")
3632                         required_hash_types = set()
3633                         required_hash_types.add("size")
3634                         required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
3635                         for filename, hashes in mydigests.iteritems():
3636                                 if not required_hash_types.difference(hashes):
3637                                         checkme = [i for i in checkme if i != filename]
3638                                         fetchme = [i for i in fetchme \
3639                                                 if os.path.basename(i) != filename]
3640                                 del filename, hashes
3641                 else:
3642                         fetchme = newuris[:]
3643                         checkme = alist[:]
3644
3645                 # Only try and fetch the files if we are going to need them ...
3646                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3647                 # unpack compile install`, we will try and fetch 4 times :/
3648                 need_distfiles = (mydo in ("fetch", "unpack") or \
3649                         mydo not in ("digest", "manifest") and "noauto" not in features)
3650                 if need_distfiles and not fetch(
3651                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3652                         return 1
3653
3654                 if mydo == "fetch" and listonly:
3655                         return 0
3656
3657                 try:
3658                         if mydo == "manifest":
3659                                 return not digestgen(aalist, mysettings, overwrite=1,
3660                                         manifestonly=1, myportdb=mydbapi)
3661                         elif mydo == "digest":
3662                                 return not digestgen(aalist, mysettings, overwrite=1,
3663                                         myportdb=mydbapi)
3664                         elif "digest" in mysettings.features:
3665                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3666                 except portage_exception.PermissionDenied, e:
3667                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3668                         if mydo in ("digest", "manifest"):
3669                                 return 1
3670
3671                 # See above comment about fetching only when needed
3672                 if not digestcheck(checkme, mysettings, ("strict" in features),
3673                         (mydo not in ["digest","fetch","unpack"] and \
3674                         mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3675                         "noauto" in features)):
3676                         return 1
3677
3678                 if mydo == "fetch":
3679                         return 0
3680
3681                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3682                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3683                         orig_distdir = mysettings["DISTDIR"]
3684                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3685                         edpath = mysettings["DISTDIR"] = \
3686                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3687                         if os.path.exists(edpath):
3688                                 try:
3689                                         if os.path.isdir(edpath) and not os.path.islink(edpath):
3690                                                 shutil.rmtree(edpath)
3691                                         else:
3692                                                 os.unlink(edpath)
3693                                 except OSError:
3694                                         print "!!! Failed reseting ebuild distdir path, " + edpath
3695                                         raise
3696                         os.mkdir(edpath)
3697                         apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3698                         try:
3699                                 for file in alist:
3700                                         os.symlink(os.path.join(orig_distdir, file),
3701                                                 os.path.join(edpath, file))
3702                         except OSError:
3703                                 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3704                                 raise
3705
3706                 #initial dep checks complete; time to process main commands
3707
3708                 nosandbox = (("userpriv" in features) and \
3709                         ("usersandbox" not in features) and \
3710                         ("userpriv" not in mysettings["RESTRICT"]) and \
3711                         ("nouserpriv" not in mysettings["RESTRICT"]))
3712                 if nosandbox and ("userpriv" not in features or \
3713                         "userpriv" in mysettings["RESTRICT"] or \
3714                         "nouserpriv" in mysettings["RESTRICT"]):
3715                         nosandbox = ("sandbox" not in features and \
3716                                 "usersandbox" not in features)
3717
3718                 sesandbox = mysettings.selinux_enabled() and \
3719                         "sesandbox" in mysettings.features
3720                 ebuild_sh = EBUILD_SH_BINARY + " %s"
3721                 misc_sh = MISC_SH_BINARY + " dyn_%s"
3722
3723                 # args are for the to spawn function
3724                 actionmap = {
3725 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":0}},
3726 "setup":  {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1,         "sesandbox":0}},
3727 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":sesandbox}},
3728 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3729 "test":   {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3730 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0,         "sesandbox":sesandbox}},
3731 "rpm":    {"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3732 "package":{"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3733                 }
3734
3735                 # merge the deps in so we have again a 'full' actionmap
3736                 # be glad when this can die.
3737                 for x in actionmap.keys():
3738                         if len(actionmap_deps.get(x, [])):
3739                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3740
3741                 if mydo in actionmap.keys():
3742                         if mydo=="package":
3743                                 portage_util.ensure_dirs(
3744                                         os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3745                                 portage_util.ensure_dirs(
3746                                         os.path.join(mysettings["PKGDIR"], "All"))
3747                         retval = spawnebuild(mydo,
3748                                 actionmap, mysettings, debug, logfile=logfile)
3749                 elif mydo=="qmerge":
3750                         # check to ensure install was run.  this *only* pops up when users
3751                         # forget it and are using ebuild
3752                         if not os.path.exists(
3753                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3754                                 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3755                                         noiselevel=-1)
3756                                 return 1
3757                         # qmerge is a special phase that implies noclean.
3758                         if "noclean" not in mysettings.features:
3759                                 mysettings.features.append("noclean")
3760                         #qmerge is specifically not supposed to do a runtime dep check
3761                         retval = merge(
3762                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3763                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3764                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3765                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3766                 elif mydo=="merge":
3767                         retval = spawnebuild("install", actionmap, mysettings, debug,
3768                                 alwaysdep=1, logfile=logfile)
3769                         if retval == os.EX_OK:
3770                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3771                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3772                                         "build-info"), myroot, mysettings,
3773                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3774                                         vartree=vartree, prev_mtimes=prev_mtimes)
3775                 else:
3776                         print "!!! Unknown mydo:",mydo
3777                         return 1
3778
3779                 if retval != os.EX_OK and tree == "porttree":
3780                         for i in xrange(len(mydbapi.porttrees)-1):
3781                                 t = mydbapi.porttrees[i+1]
3782                                 if myebuild.startswith(t):
3783                                         # Display the non-cannonical path, in case it's different, to
3784                                         # prevent confusion.
3785                                         overlays = mysettings["PORTDIR_OVERLAY"].split()
3786                                         try:
3787                                                 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3788                                                         overlays[i], noiselevel=-1)
3789                                         except IndexError:
3790                                                 pass
3791                                         break
3792                 return retval
3793
3794         finally:
3795                 if builddir_lock:
3796                         portage_locks.unlockdir(builddir_lock)
3797
3798                 # Make sure that DISTDIR is restored to it's normal value before we return!
3799                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3800                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3801                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3802
3803                 if logfile:
3804                         try:
3805                                 if os.stat(logfile).st_size == 0:
3806                                         os.unlink(logfile)
3807                         except OSError:
3808                                 pass
3809
3810                 if mydo in ("digest", "manifest", "help"):
3811                         # If necessary, depend phase has been triggered by aux_get calls
3812                         # and the exemption is no longer needed.
3813                         _doebuild_manifest_exempt_depend -= 1
3814
3815 expandcache={}
3816
3817 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3818         """moves a file from src to dest, preserving all permissions and attributes; mtime will
3819         be preserved even when moving across filesystems.  Returns true on success and false on
3820         failure.  Move is atomic."""
3821         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3822         global lchown
3823         if mysettings is None:
3824                 global settings
3825                 mysettings = settings
3826         selinux_enabled = mysettings.selinux_enabled()
3827         try:
3828                 if not sstat:
3829                         sstat=os.lstat(src)
3830
3831         except SystemExit, e:
3832                 raise
3833         except Exception, e:
3834                 print "!!! Stating source file failed... movefile()"
3835                 print "!!!",e
3836                 return None
3837
3838         destexists=1
3839         try:
3840                 dstat=os.lstat(dest)
3841         except (OSError, IOError):
3842                 dstat=os.lstat(os.path.dirname(dest))
3843                 destexists=0
3844
3845         if bsd_chflags:
3846                 # Check that we can actually unset schg etc flags...
3847                 # Clear the flags on source and destination; we'll reinstate them after merging
3848                 if destexists and dstat.st_flags != 0:
3849                         if bsd_chflags.lchflags(dest, 0) < 0:
3850                                 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3851                                         noiselevel=-1)
3852                 # We might have an immutable flag on the parent dir; save and clear.
3853                 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3854                 if pflags != 0:
3855                         bsd_chflags.lchflags(os.path.dirname(dest), 0)
3856
3857                 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3858                         bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3859                         # This is bad: we can't merge the file with these flags set.
3860                         writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3861                                 noiselevel=-1)
3862                         return None
3863
3864         if destexists:
3865                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3866                         try:
3867                                 os.unlink(dest)
3868                                 destexists=0
3869                         except SystemExit, e:
3870                                 raise
3871                         except Exception, e:
3872                                 pass
3873
3874         if stat.S_ISLNK(sstat[stat.ST_MODE]):
3875                 try:
3876                         target=os.readlink(src)
3877                         if mysettings and mysettings["D"]:
3878                                 if target.find(mysettings["D"])==0:
3879                                         target=target[len(mysettings["D"]):]
3880                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3881                                 os.unlink(dest)
3882                         if selinux_enabled:
3883                                 sid = selinux.get_lsid(src)
3884                                 selinux.secure_symlink(target,dest,sid)
3885                         else:
3886                                 os.symlink(target,dest)
3887                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3888                         return os.lstat(dest)[stat.ST_MTIME]
3889                 except SystemExit, e:
3890                         raise
3891                 except Exception, e:
3892                         print "!!! failed to properly create symlink:"
3893                         print "!!!",dest,"->",target
3894                         print "!!!",e
3895                         return None
3896
3897         renamefailed=1
3898         if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3899                 try:
3900                         if selinux_enabled:
3901                                 ret=selinux.secure_rename(src,dest)
3902                         else:
3903                                 ret=os.rename(src,dest)
3904                         renamefailed=0
3905                 except SystemExit, e:
3906                         raise
3907                 except Exception, e:
3908                         if e[0]!=errno.EXDEV:
3909                                 # Some random error.
3910                                 print "!!! Failed to move",src,"to",dest
3911                                 print "!!!",e
3912                                 return None
3913                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3914         if renamefailed:
3915                 didcopy=0
3916                 if stat.S_ISREG(sstat[stat.ST_MODE]):
3917                         try: # For safety copy then move it over.
3918                                 if selinux_enabled:
3919                                         selinux.secure_copy(src,dest+"#new")
3920                                         selinux.secure_rename(dest+"#new",dest)
3921                                 else:
3922                                         shutil.copyfile(src,dest+"#new")
3923                                         os.rename(dest+"#new",dest)
3924                                 didcopy=1
3925                         except SystemExit, e:
3926                                 raise
3927                         except Exception, e:
3928                                 print '!!! copy',src,'->',dest,'failed.'
3929                                 print "!!!",e
3930                                 return None
3931                 else:
3932                         #we don't yet handle special, so we need to fall back to /bin/mv
3933                         if selinux_enabled:
3934                                 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3935                         else:
3936                                 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3937                                 if a[0]!=0:
3938                                         print "!!! Failed to move special file:"
3939                                         print "!!! '"+src+"' to '"+dest+"'"
3940                                         print "!!!",a
3941                                         return None # failure
3942                 try:
3943                         if didcopy:
3944                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3945                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3946                                 else:
3947                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3948                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3949                                 os.unlink(src)
3950                 except SystemExit, e:
3951                         raise
3952                 except Exception, e:
3953                         print "!!! Failed to chown/chmod/unlink in movefile()"
3954                         print "!!!",dest
3955                         print "!!!",e
3956                         return None
3957
3958         if newmtime:
3959                 os.utime(dest,(newmtime,newmtime))
3960         else:
3961                 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3962                 newmtime=sstat[stat.ST_MTIME]
3963
3964         if bsd_chflags:
3965                 # Restore the flags we saved before moving
3966                 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3967                         writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3968                                 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3969                         return None
3970
3971         return newmtime
3972
3973 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3974         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3975         if not os.access(myroot, os.W_OK):
3976                 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3977                         noiselevel=-1)
3978                 return errno.EACCES
3979         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3980                 vartree=vartree)
3981         return mylink.merge(pkgloc, infloc, myroot, myebuild,
3982                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3983
3984 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3985         mylink = dblink(
3986                 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3987         try:
3988                 mylink.lockdb()
3989                 if mylink.exists():
3990                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3991                                 ldpath_mtimes=ldpath_mtimes)
3992                         if retval == os.EX_OK:
3993                                 mylink.delete()
3994                         return retval
3995                 return os.EX_OK
3996         finally:
3997                 mylink.unlockdb()
3998
3999 def getCPFromCPV(mycpv):
4000         """Calls pkgsplit on a cpv and returns only the cp."""
4001         return pkgsplit(mycpv)[0]
4002
4003 def dep_virtual(mysplit, mysettings):
4004         "Does virtual dependency conversion"
4005         newsplit=[]
4006         myvirtuals = mysettings.getvirtuals()
4007         for x in mysplit:
4008                 if type(x)==types.ListType:
4009                         newsplit.append(dep_virtual(x, mysettings))
4010                 else:
4011                         mykey=dep_getkey(x)
4012                         mychoices = myvirtuals.get(mykey, None)
4013                         if mychoices:
4014                                 if len(mychoices) == 1:
4015                                         a = x.replace(mykey, mychoices[0])
4016                                 else:
4017                                         if x[0]=="!":
4018                                                 # blocker needs "and" not "or(||)".
4019                                                 a=[]
4020                                         else:
4021                                                 a=['||']
4022                                         for y in mychoices:
4023                                                 a.append(x.replace(mykey, y))
4024                                 newsplit.append(a)
4025                         else:
4026                                 newsplit.append(x)
4027         return newsplit
4028
4029 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
4030         trees=None, **kwargs):
4031         """Recursively expand new-style virtuals so as to collapse one or more
4032         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
4033         zero cost regardless of whether or not they are currently installed. Virtual
4034         blockers are supported but only when the virtual expands to a single
4035         atom because it wouldn't necessarily make sense to block all the components
4036         of a compound virtual.  When more than one new-style virtual is matched,
4037         the matches are sorted from highest to lowest versions and the atom is
4038         expanded to || ( highest match ... lowest match )."""
4039         newsplit = []
4040         # According to GLEP 37, RDEPEND is the only dependency type that is valid
4041         # for new-style virtuals.  Repoman should enforce this.
4042         dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
4043         def compare_pkgs(a, b):
4044                 return pkgcmp(b[1], a[1])
4045         portdb = trees[myroot]["porttree"].dbapi
4046         if kwargs["use_binaries"]:
4047                 portdb = trees[myroot]["bintree"].dbapi
4048         myvirtuals = mysettings.getvirtuals()
4049         for x in mysplit:
4050                 if x == "||":
4051                         newsplit.append(x)
4052                         continue
4053                 elif isinstance(x, list):
4054                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
4055                                 mysettings, myroot=myroot, trees=trees, **kwargs))
4056                         continue
4057                 if portage_dep._dep_check_strict and \
4058                         not isvalidatom(x, allow_blockers=True):
4059                         raise portage_exception.ParseError(
4060                                 "invalid atom: '%s'" % x)
4061                 mykey = dep_getkey(x)
4062                 if not mykey.startswith("virtual/"):
4063                         newsplit.append(x)
4064                         continue
4065                 mychoices = myvirtuals.get(mykey, [])
4066                 isblocker = x.startswith("!")
4067                 match_atom = x
4068                 if isblocker:
4069                         match_atom = x[1:]
4070                 pkgs = {}
4071                 for cpv in portdb.match(match_atom):
4072                         # only use new-style matches
4073                         if cpv.startswith("virtual/"):
4074                                 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], portdb)
4075                 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
4076                         vardb = trees[myroot]["vartree"].dbapi
4077                         for cpv in vardb.match(match_atom):
4078                                 # only use new-style matches
4079                                 if cpv.startswith("virtual/"):
4080                                         if cpv in pkgs:
4081                                                 continue
4082                                         pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], vardb)
4083                 if not (pkgs or mychoices):
4084                         # This one couldn't be expanded as a new-style virtual.  Old-style
4085                         # virtuals have already been expanded by dep_virtual, so this one
4086                         # is unavailable and dep_zapdeps will identify it as such.  The
4087                         # atom is not eliminated here since it may still represent a
4088                         # dependency that needs to be satisfied.
4089                         newsplit.append(x)
4090                         continue
4091                 if not pkgs and len(mychoices) == 1:
4092                         newsplit.append(x.replace(mykey, mychoices[0]))
4093                         continue
4094                 pkgs = pkgs.values()
4095                 pkgs.sort(compare_pkgs) # Prefer higher versions.
4096                 if isblocker:
4097                         a = []
4098                 else:
4099                         a = ['||']
4100                 for y in pkgs:
4101                         depstring = " ".join(y[2].aux_get(y[0], dep_keys))
4102                         if edebug:
4103                                 print "Virtual Parent:   ", y[0]
4104                                 print "Virtual Depstring:", depstring
4105                         mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
4106                                 trees=trees, **kwargs)
4107                         if not mycheck[0]:
4108                                 raise portage_exception.ParseError(
4109                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
4110                         if isblocker:
4111                                 virtual_atoms = [atom for atom in mycheck[1] \
4112                                         if not atom.startswith("!")]
4113                                 if len(virtual_atoms) == 1:
4114                                         # It wouldn't make sense to block all the components of a
4115                                         # compound virtual, so only a single atom block is allowed.
4116                                         a.append("!" + virtual_atoms[0])
4117                         else:
4118                                 mycheck[1].append("="+y[0]) # pull in the new-style virtual
4119                                 a.append(mycheck[1])
4120                 # Plain old-style virtuals.  New-style virtuals are preferred.
4121                 for y in mychoices:
4122                         a.append(x.replace(mykey, y))
4123                 if isblocker and not a:
4124                         # Probably a compound virtual.  Pass the atom through unprocessed.
4125                         newsplit.append(x)
4126                         continue
4127                 newsplit.append(a)
4128         return newsplit
4129
4130 def dep_eval(deplist):
4131         if not deplist:
4132                 return 1
4133         if deplist[0]=="||":
4134                 #or list; we just need one "1"
4135                 for x in deplist[1:]:
4136                         if type(x)==types.ListType:
4137                                 if dep_eval(x)==1:
4138                                         return 1
4139                         elif x==1:
4140                                         return 1
4141                 #XXX: unless there's no available atoms in the list
4142                 #in which case we need to assume that everything is
4143                 #okay as some ebuilds are relying on an old bug.
4144                 if len(deplist) == 1:
4145                         return 1
4146                 return 0
4147         else:
4148                 for x in deplist:
4149                         if type(x)==types.ListType:
4150                                 if dep_eval(x)==0:
4151                                         return 0
4152                         elif x==0 or x==2:
4153                                 return 0
4154                 return 1
4155
4156 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
4157         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
4158         Returned deplist contains steps that must be taken to satisfy dependencies."""
4159         if trees is None:
4160                 global db
4161                 trees = db
4162         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
4163         if not reduced or unreduced == ["||"] or dep_eval(reduced):
4164                 return []
4165
4166         if unreduced[0] != "||":
4167                 unresolved = []
4168                 for dep, satisfied in izip(unreduced, reduced):
4169                         if isinstance(dep, list):
4170                                 unresolved += dep_zapdeps(dep, satisfied, myroot,
4171                                         use_binaries=use_binaries, trees=trees)
4172                         elif not satisfied:
4173                                 unresolved.append(dep)
4174                 return unresolved
4175
4176         # We're at a ( || atom ... ) type level and need to make a choice
4177         deps = unreduced[1:]
4178         satisfieds = reduced[1:]
4179
4180         # Our preference order is for an the first item that:
4181         # a) contains all unmasked packages with the same key as installed packages
4182         # b) contains all unmasked packages
4183         # c) contains masked installed packages
4184         # d) is the first item
4185
4186         preferred = []
4187         preferred_any_slot = []
4188         possible_upgrades = []
4189         other = []
4190
4191         # Alias the trees we'll be checking availability against
4192         vardb = None
4193         if "vartree" in trees[myroot]:
4194                 vardb = trees[myroot]["vartree"].dbapi
4195         if use_binaries:
4196                 mydbapi = trees[myroot]["bintree"].dbapi
4197         else:
4198                 mydbapi = trees[myroot]["porttree"].dbapi
4199
4200         # Sort the deps into preferred (installed) and other
4201         # with values of [[required_atom], availablility]
4202         for dep, satisfied in izip(deps, satisfieds):
4203                 if isinstance(dep, list):
4204                         atoms = dep_zapdeps(dep, satisfied, myroot,
4205                                 use_binaries=use_binaries, trees=trees)
4206                 else:
4207                         atoms = [dep]
4208
4209                 if not vardb:
4210                         # called by repoman
4211                         other.append((atoms, None, False))
4212                         continue
4213
4214                 all_available = True
4215                 versions = {}
4216                 for atom in atoms:
4217                         avail_pkg = best(mydbapi.match(atom))
4218                         if avail_pkg:
4219                                 avail_slot = "%s:%s" % (dep_getkey(atom),
4220                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4221                         elif not avail_pkg and use_binaries:
4222                                 # With --usepkgonly, count installed packages as "available".
4223                                 # Note that --usepkgonly currently has no package.mask support.
4224                                 # See bug #149816.
4225                                 avail_pkg = best(vardb.match(atom))
4226                                 if avail_pkg:
4227                                         avail_slot = "%s:%s" % (dep_getkey(atom),
4228                                                 vardb.aux_get(avail_pkg, ["SLOT"])[0])
4229                         if not avail_pkg:
4230                                 all_available = False
4231                                 break
4232
4233                         versions[avail_slot] = avail_pkg
4234
4235                 this_choice = (atoms, versions, all_available)
4236                 if all_available:
4237                         # The "all installed" criterion is not version or slot specific.
4238                         # If any version of a package is installed then we assume that it
4239                         # is preferred over other possible packages choices.
4240                         all_installed = True
4241                         for atom in set([dep_getkey(atom) for atom in atoms]):
4242                                 # New-style virtuals have zero cost to install.
4243                                 if not vardb.match(atom) and not atom.startswith("virtual/"):
4244                                         all_installed = False
4245                                         break
4246                         all_installed_slots = False
4247                         if all_installed:
4248                                 all_installed_slots = True
4249                                 for slot_atom in versions:
4250                                         # New-style virtuals have zero cost to install.
4251                                         if not vardb.match(slot_atom) and \
4252                                                 not slot_atom.startswith("virtual/"):
4253                                                 all_installed_slots = False
4254                                                 break
4255                         if all_installed:
4256                                 if all_installed_slots:
4257                                         preferred.append(this_choice)
4258                                 else:
4259                                         preferred_any_slot.append(this_choice)
4260                         else:
4261                                 possible_upgrades.append(this_choice)
4262                 else:
4263                         other.append(this_choice)
4264
4265         # Compare the "all_installed" choices against the "all_available" choices
4266         # for possible missed upgrades.  The main purpose of this code is to find
4267         # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4268         # into || ( highest version ... lowest version ).  We want to prefer the
4269         # highest all_available version of the new-style virtual when there is a
4270         # lower all_installed version.
4271         preferred.extend(preferred_any_slot)
4272         preferred.extend(possible_upgrades)
4273         possible_upgrades = preferred[1:]
4274         for possible_upgrade in possible_upgrades:
4275                 atoms, versions, all_available = possible_upgrade
4276                 myslots = set(versions)
4277                 for other_choice in preferred:
4278                         if possible_upgrade is other_choice:
4279                                 # possible_upgrade will not be promoted, so move on
4280                                 break
4281                         o_atoms, o_versions, o_all_available = other_choice
4282                         intersecting_slots = myslots.intersection(o_versions)
4283                         if not intersecting_slots:
4284                                 continue
4285                         has_upgrade = False
4286                         has_downgrade = False
4287                         for myslot in intersecting_slots:
4288                                 myversion = versions[myslot]
4289                                 o_version = o_versions[myslot]
4290                                 if myversion != o_version:
4291                                         if myversion == best([myversion, o_version]):
4292                                                 has_upgrade = True
4293                                         else:
4294                                                 has_downgrade = True
4295                                                 break
4296                         if has_upgrade and not has_downgrade:
4297                                 preferred.remove(possible_upgrade)
4298                                 o_index = preferred.index(other_choice)
4299                                 preferred.insert(o_index, possible_upgrade)
4300                                 break
4301
4302         # preferred now contains a) and c) from the order above with
4303         # the masked flag differentiating the two. other contains b)
4304         # and d) so adding other to preferred will give us a suitable
4305         # list to iterate over.
4306         preferred.extend(other)
4307
4308         for allow_masked in (False, True):
4309                 for atoms, versions, all_available in preferred:
4310                         if all_available or allow_masked:
4311                                 return atoms
4312
4313         assert(False) # This point should not be reachable
4314
4315
4316 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4317         if not len(mydep):
4318                 return mydep
4319         if mydep[0]=="*":
4320                 mydep=mydep[1:]
4321         orig_dep = mydep
4322         mydep = dep_getcpv(orig_dep)
4323         myindex = orig_dep.index(mydep)
4324         prefix = orig_dep[:myindex]
4325         postfix = orig_dep[myindex+len(mydep):]
4326         return prefix + cpv_expand(
4327                 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4328
4329 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4330         use_cache=1, use_binaries=0, myroot="/", trees=None):
4331         """Takes a depend string and parses the condition."""
4332         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4333         #check_config_instance(mysettings)
4334         if trees is None:
4335                 trees = globals()["db"]
4336         if use=="yes":
4337                 if myuse is None:
4338                         #default behavior
4339                         myusesplit = mysettings["USE"].split()
4340                 else:
4341                         myusesplit = myuse
4342                         # We've been given useflags to use.
4343                         #print "USE FLAGS PASSED IN."
4344                         #print myuse
4345                         #if "bindist" in myusesplit:
4346                         #       print "BINDIST is set!"
4347                         #else:
4348                         #       print "BINDIST NOT set."
4349         else:
4350                 #we are being run by autouse(), don't consult USE vars yet.
4351                 # WE ALSO CANNOT USE SETTINGS
4352                 myusesplit=[]
4353
4354         #convert parenthesis to sublists
4355         mysplit = portage_dep.paren_reduce(depstring)
4356
4357         mymasks = set()
4358         useforce = set()
4359         useforce.add(mysettings["ARCH"])
4360         if use == "all":
4361                 # This masking/forcing is only for repoman.  In other cases, relevant
4362                 # masking/forcing should have already been applied via
4363                 # config.regenerate().  Also, binary or installed packages may have
4364                 # been built with flags that are now masked, and it would be
4365                 # inconsistent to mask them now.  Additionally, myuse may consist of
4366                 # flags from a parent package that is being merged to a $ROOT that is
4367                 # different from the one that mysettings represents.
4368                 mymasks.update(mysettings.usemask)
4369                 mymasks.update(mysettings.archlist())
4370                 mymasks.discard(mysettings["ARCH"])
4371                 useforce.update(mysettings.useforce)
4372                 useforce.difference_update(mymasks)
4373         try:
4374                 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4375                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4376         except portage_exception.InvalidDependString, e:
4377                 return [0, str(e)]
4378
4379         # Do the || conversions
4380         mysplit=portage_dep.dep_opconvert(mysplit)
4381
4382         if mysplit == []:
4383                 #dependencies were reduced to nothing
4384                 return [1,[]]
4385
4386         # Recursively expand new-style virtuals so as to
4387         # collapse one or more levels of indirection.
4388         try:
4389                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4390                         use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4391                         use_binaries=use_binaries, myroot=myroot, trees=trees)
4392         except portage_exception.ParseError, e:
4393                 return [0, str(e)]
4394
4395         mysplit2=mysplit[:]
4396         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4397         if mysplit2 is None:
4398                 return [0,"Invalid token"]
4399
4400         writemsg("\n\n\n", 1)
4401         writemsg("mysplit:  %s\n" % (mysplit), 1)
4402         writemsg("mysplit2: %s\n" % (mysplit2), 1)
4403
4404         myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4405                 use_binaries=use_binaries, trees=trees)
4406         mylist = flatten(myzaps)
4407         writemsg("myzaps:   %s\n" % (myzaps), 1)
4408         writemsg("mylist:   %s\n" % (mylist), 1)
4409         #remove duplicates
4410         mydict={}
4411         for x in mylist:
4412                 mydict[x]=1
4413         writemsg("mydict:   %s\n" % (mydict), 1)
4414         return [1,mydict.keys()]
4415
4416 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4417         "Reduces the deplist to ones and zeros"
4418         deplist=mydeplist[:]
4419         for mypos in xrange(len(deplist)):
4420                 if type(deplist[mypos])==types.ListType:
4421                         #recurse
4422                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4423                 elif deplist[mypos]=="||":
4424                         pass
4425                 else:
4426                         mykey = dep_getkey(deplist[mypos])
4427                         if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4428                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4429                                 deplist[mypos]=True
4430                         elif mydbapi is None:
4431                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
4432                                 # return all of deps the deps that have been selected
4433                                 # (excluding those satisfied by package.provided).
4434                                 deplist[mypos] = False
4435                         else:
4436                                 if mode:
4437                                         mydep=mydbapi.xmatch(mode,deplist[mypos])
4438                                 else:
4439                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4440                                 if mydep!=None:
4441                                         tmp=(len(mydep)>=1)
4442                                         if deplist[mypos][0]=="!":
4443                                                 tmp=False
4444                                         deplist[mypos]=tmp
4445                                 else:
4446                                         #encountered invalid string
4447                                         return None
4448         return deplist
4449
4450 def cpv_getkey(mycpv):
4451         myslash=mycpv.split("/")
4452         mysplit=pkgsplit(myslash[-1])
4453         mylen=len(myslash)
4454         if mylen==2:
4455                 return myslash[0]+"/"+mysplit[0]
4456         elif mylen==1:
4457                 return mysplit[0]
4458         else:
4459                 return mysplit
4460
4461 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4462         mysplit=mykey.split("/")
4463         if settings is None:
4464                 settings = globals()["settings"]
4465         virts = settings.getvirtuals("/")
4466         virts_p = settings.get_virts_p("/")
4467         if len(mysplit)==1:
4468                 if mydb and type(mydb)==types.InstanceType:
4469                         for x in settings.categories:
4470                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4471                                         return x+"/"+mykey
4472                         if virts_p.has_key(mykey):
4473                                 return(virts_p[mykey][0])
4474                 return "null/"+mykey
4475         elif mydb:
4476                 if type(mydb)==types.InstanceType:
4477                         if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4478                                 return virts[mykey][0]
4479                 return mykey
4480
4481 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4482         """Given a string (packagename or virtual) expand it into a valid
4483         cat/package string. Virtuals use the mydb to determine which provided
4484         virtual is a valid choice and defaults to the first element when there
4485         are no installed/available candidates."""
4486         myslash=mycpv.split("/")
4487         mysplit=pkgsplit(myslash[-1])
4488         if settings is None:
4489                 settings = globals()["settings"]
4490         virts = settings.getvirtuals("/")
4491         virts_p = settings.get_virts_p("/")
4492         if len(myslash)>2:
4493                 # this is illegal case.
4494                 mysplit=[]
4495                 mykey=mycpv
4496         elif len(myslash)==2:
4497                 if mysplit:
4498                         mykey=myslash[0]+"/"+mysplit[0]
4499                 else:
4500                         mykey=mycpv
4501                 if mydb and virts and mykey in virts:
4502                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4503                         if hasattr(mydb, "cp_list"):
4504                                 if not mydb.cp_list(mykey, use_cache=use_cache):
4505                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4506                                         mykey_orig = mykey[:]
4507                                         for vkey in virts[mykey]:
4508                                                 if mydb.cp_list(vkey,use_cache=use_cache):
4509                                                         mykey = vkey
4510                                                         writemsg("virts chosen: %s\n" % (mykey), 1)
4511                                                         break
4512                                         if mykey == mykey_orig:
4513                                                 mykey=virts[mykey][0]
4514                                                 writemsg("virts defaulted: %s\n" % (mykey), 1)
4515                         #we only perform virtual expansion if we are passed a dbapi
4516         else:
4517                 #specific cpv, no category, ie. "foo-1.0"
4518                 if mysplit:
4519                         myp=mysplit[0]
4520                 else:
4521                         # "foo" ?
4522                         myp=mycpv
4523                 mykey=None
4524                 matches=[]
4525                 if mydb:
4526                         for x in settings.categories:
4527                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4528                                         matches.append(x+"/"+myp)
4529                 if len(matches) > 1:
4530                         virtual_name_collision = False
4531                         if len(matches) == 2:
4532                                 for x in matches:
4533                                         if not x.startswith("virtual/"):
4534                                                 # Assume that the non-virtual is desired.  This helps
4535                                                 # avoid the ValueError for invalid deps that come from
4536                                                 # installed packages (during reverse blocker detection,
4537                                                 # for example).
4538                                                 mykey = x
4539                                         else:
4540                                                 virtual_name_collision = True
4541                         if not virtual_name_collision:
4542                                 raise ValueError, matches
4543                 elif matches:
4544                         mykey=matches[0]
4545
4546                 if not mykey and type(mydb)!=types.ListType:
4547                         if virts_p.has_key(myp):
4548                                 mykey=virts_p[myp][0]
4549                         #again, we only perform virtual expansion if we have a dbapi (not a list)
4550                 if not mykey:
4551                         mykey="null/"+myp
4552         if mysplit:
4553                 if mysplit[2]=="r0":
4554                         return mykey+"-"+mysplit[1]
4555                 else:
4556                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4557         else:
4558                 return mykey
4559
4560 def getmaskingreason(mycpv, settings=None, portdb=None):
4561         from portage_util import grablines
4562         if settings is None:
4563                 settings = globals()["settings"]
4564         if portdb is None:
4565                 portdb = globals()["portdb"]
4566         mysplit = catpkgsplit(mycpv)
4567         if not mysplit:
4568                 raise ValueError("invalid CPV: %s" % mycpv)
4569         if not portdb.cpv_exists(mycpv):
4570                 raise KeyError("CPV %s does not exist" % mycpv)
4571         mycp=mysplit[0]+"/"+mysplit[1]
4572
4573         # XXX- This is a temporary duplicate of code from the config constructor.
4574         locations = [os.path.join(settings["PORTDIR"], "profiles")]
4575         locations.extend(settings.profiles)
4576         for ov in settings["PORTDIR_OVERLAY"].split():
4577                 profdir = os.path.join(normalize_path(ov), "profiles")
4578                 if os.path.isdir(profdir):
4579                         locations.append(profdir)
4580         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4581                 USER_CONFIG_PATH.lstrip(os.path.sep)))
4582         locations.reverse()
4583         pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4584         pmasklines = []
4585         while pmasklists: # stack_lists doesn't preserve order so it can't be used
4586                 pmasklines.extend(pmasklists.pop(0))
4587         del pmasklists
4588
4589         if settings.pmaskdict.has_key(mycp):
4590                 for x in settings.pmaskdict[mycp]:
4591                         if mycpv in portdb.xmatch("match-all", x):
4592                                 comment = ""
4593                                 l = "\n"
4594                                 comment_valid = -1
4595                                 for i in xrange(len(pmasklines)):
4596                                         l = pmasklines[i].strip()
4597                                         if l == "":
4598                                                 comment = ""
4599                                                 comment_valid = -1
4600                                         elif l[0] == "#":
4601                                                 comment += (l+"\n")
4602                                                 comment_valid = i + 1
4603                                         elif l == x:
4604                                                 if comment_valid != i:
4605                                                         comment = ""
4606                                                 return comment
4607                                         elif comment_valid != -1:
4608                                                 # Apparently this comment applies to muliple masks, so
4609                                                 # it remains valid until a blank line is encountered.
4610                                                 comment_valid += 1
4611         return None
4612
4613 def getmaskingstatus(mycpv, settings=None, portdb=None):
4614         if settings is None:
4615                 settings = globals()["settings"]
4616         if portdb is None:
4617                 portdb = globals()["portdb"]
4618         mysplit = catpkgsplit(mycpv)
4619         if not mysplit:
4620                 raise ValueError("invalid CPV: %s" % mycpv)
4621         if not portdb.cpv_exists(mycpv):
4622                 raise KeyError("CPV %s does not exist" % mycpv)
4623         mycp=mysplit[0]+"/"+mysplit[1]
4624
4625         rValue = []
4626
4627         # profile checking
4628         revmaskdict=settings.prevmaskdict
4629         if revmaskdict.has_key(mycp):
4630                 for x in revmaskdict[mycp]:
4631                         if x[0]=="*":
4632                                 myatom = x[1:]
4633                         else:
4634                                 myatom = x
4635                         if not match_to_list(mycpv, [myatom]):
4636                                 rValue.append("profile")
4637                                 break
4638
4639         # package.mask checking
4640         maskdict=settings.pmaskdict
4641         unmaskdict=settings.punmaskdict
4642         if maskdict.has_key(mycp):
4643                 for x in maskdict[mycp]:
4644                         if mycpv in portdb.xmatch("match-all", x):
4645                                 unmask=0
4646                                 if unmaskdict.has_key(mycp):
4647                                         for z in unmaskdict[mycp]:
4648                                                 if mycpv in portdb.xmatch("match-all",z):
4649                                                         unmask=1
4650                                                         break
4651                                 if unmask==0:
4652                                         rValue.append("package.mask")
4653
4654         # keywords checking
4655         try:
4656                 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4657         except KeyError:
4658                 # The "depend" phase apparently failed for some reason.  An associated
4659                 # error message will have already been printed to stderr.
4660                 return ["corruption"]
4661         if not eapi_is_supported(eapi):
4662                 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4663         mygroups = mygroups.split()
4664         pgroups = settings["ACCEPT_KEYWORDS"].split()
4665         myarch = settings["ARCH"]
4666         if pgroups and myarch not in pgroups:
4667                 """For operating systems other than Linux, ARCH is not necessarily a
4668                 valid keyword."""
4669                 myarch = pgroups[0].lstrip("~")
4670         pkgdict = settings.pkeywordsdict
4671
4672         cp = dep_getkey(mycpv)
4673         if pkgdict.has_key(cp):
4674                 matches = match_to_list(mycpv, pkgdict[cp].keys())
4675                 for match in matches:
4676                         pgroups.extend(pkgdict[cp][match])
4677                 if matches:
4678                         inc_pgroups = []
4679                         for x in pgroups:
4680                                 if x != "-*" and x.startswith("-"):
4681                                         try:
4682                                                 inc_pgroups.remove(x[1:])
4683                                         except ValueError:
4684                                                 pass
4685                                 if x not in inc_pgroups:
4686                                         inc_pgroups.append(x)
4687                         pgroups = inc_pgroups
4688                         del inc_pgroups
4689
4690         kmask = "missing"
4691
4692         for keyword in pgroups:
4693                 if keyword in mygroups:
4694                         kmask=None
4695
4696         if kmask:
4697                 fallback = None
4698                 for gp in mygroups:
4699                         if gp=="*":
4700                                 kmask=None
4701                                 break
4702                         elif gp=="-"+myarch:
4703                                 kmask="-"+myarch
4704                                 break
4705                         elif gp=="~"+myarch:
4706                                 kmask="~"+myarch
4707                                 break
4708
4709         if kmask:
4710                 rValue.append(kmask+" keyword")
4711         return rValue
4712
4713 class portagetree:
4714         def __init__(self, root="/", virtual=None, clone=None, settings=None):
4715
4716                 if clone:
4717                         writemsg("portagetree.__init__(): deprecated " + \
4718                                 "use of clone parameter\n", noiselevel=-1)
4719                         self.root=clone.root
4720                         self.portroot=clone.portroot
4721                         self.pkglines=clone.pkglines
4722                 else:
4723                         self.root=root
4724                         if settings is None:
4725                                 settings = globals()["settings"]
4726                         self.settings = settings
4727                         self.portroot=settings["PORTDIR"]
4728                         self.virtual=virtual
4729                         self.dbapi = portdbapi(
4730                                 settings["PORTDIR"], mysettings=settings)
4731
4732         def dep_bestmatch(self,mydep):
4733                 "compatibility method"
4734                 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4735                 if mymatch is None:
4736                         return ""
4737                 return mymatch
4738
4739         def dep_match(self,mydep):
4740                 "compatibility method"
4741                 mymatch=self.dbapi.xmatch("match-visible",mydep)
4742                 if mymatch is None:
4743                         return []
4744                 return mymatch
4745
4746         def exists_specific(self,cpv):
4747                 return self.dbapi.cpv_exists(cpv)
4748
4749         def getallnodes(self):
4750                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
4751                 masked package for nodes in this nodes list."""
4752                 return self.dbapi.cp_all()
4753
4754         def getname(self,pkgname):
4755                 "returns file location for this particular package (DEPRECATED)"
4756                 if not pkgname:
4757                         return ""
4758                 mysplit=pkgname.split("/")
4759                 psplit=pkgsplit(mysplit[1])
4760                 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4761
4762         def resolve_specific(self,myspec):
4763                 cps=catpkgsplit(myspec)
4764                 if not cps:
4765                         return None
4766                 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4767                         settings=self.settings)
4768                 mykey=mykey+"-"+cps[2]
4769                 if cps[3]!="r0":
4770                         mykey=mykey+"-"+cps[3]
4771                 return mykey
4772
4773         def depcheck(self,mycheck,use="yes",myusesplit=None):
4774                 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4775
4776         def getslot(self,mycatpkg):
4777                 "Get a slot for a catpkg; assume it exists."
4778                 myslot = ""
4779                 try:
4780                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4781                 except SystemExit, e:
4782                         raise
4783                 except Exception, e:
4784                         pass
4785                 return myslot
4786
4787
4788 class dbapi:
4789         def __init__(self):
4790                 pass
4791
4792         def close_caches(self):
4793                 pass
4794
4795         def cp_list(self,cp,use_cache=1):
4796                 return
4797
4798         def cpv_all(self):
4799                 cpv_list = []
4800                 for cp in self.cp_all():
4801                         cpv_list.extend(self.cp_list(cp))
4802                 return cpv_list
4803
4804         def aux_get(self,mycpv,mylist):
4805                 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4806                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4807                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4808                 raise NotImplementedError
4809
4810         def match(self,origdep,use_cache=1):
4811                 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4812                 mykey=dep_getkey(mydep)
4813                 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4814                 myslot = portage_dep.dep_getslot(mydep)
4815                 if myslot is not None:
4816                         mylist = [cpv for cpv in mylist \
4817                                 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4818                 return mylist
4819
4820         def match2(self,mydep,mykey,mylist):
4821                 writemsg("DEPRECATED: dbapi.match2\n")
4822                 match_from_list(mydep,mylist)
4823
4824         def invalidentry(self, mypath):
4825                 if re.search("portage_lockfile$",mypath):
4826                         if not os.environ.has_key("PORTAGE_MASTER_PID"):
4827                                 writemsg("Lockfile removed: %s\n" % mypath, 1)
4828                                 portage_locks.unlockfile((mypath,None,None))
4829                         else:
4830                                 # Nothing we can do about it. We're probably sandboxed.
4831                                 pass
4832                 elif re.search(".*/-MERGING-(.*)",mypath):
4833                         if os.path.exists(mypath):
4834                                 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4835                 else:
4836                         writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4837
4838
4839
4840 class fakedbapi(dbapi):
4841         "This is a dbapi to use for the emptytree function.  It's empty, but things can be added to it."
4842         def __init__(self, settings=None):
4843                 self.cpvdict={}
4844                 self.cpdict={}
4845                 if settings is None:
4846                         settings = globals()["settings"]
4847                 self.settings = settings
4848                 self._match_cache = {}
4849
4850         def _clear_cache(self):
4851                 if self._match_cache:
4852                         self._match_cache = {}
4853
4854         def match(self, origdep, use_cache=1):
4855                 result = self._match_cache.get(origdep, None)
4856                 if result is not None:
4857                         return result[:]
4858                 result = dbapi.match(self, origdep, use_cache=use_cache)
4859                 self._match_cache[origdep] = result
4860                 return result[:]
4861
4862         def cpv_exists(self,mycpv):
4863                 return self.cpvdict.has_key(mycpv)
4864
4865         def cp_list(self,mycp,use_cache=1):
4866                 if not self.cpdict.has_key(mycp):
4867                         return []
4868                 else:
4869                         return self.cpdict[mycp]
4870
4871         def cp_all(self):
4872                 returnme=[]
4873                 for x in self.cpdict.keys():
4874                         returnme.extend(self.cpdict[x])
4875                 return returnme
4876
4877         def cpv_all(self):
4878                 return self.cpvdict.keys()
4879
4880         def cpv_inject(self, mycpv, metadata=None):
4881                 """Adds a cpv from the list of available packages."""
4882                 self._clear_cache()
4883                 mycp=cpv_getkey(mycpv)
4884                 self.cpvdict[mycpv] = metadata
4885                 myslot = None
4886                 if metadata:
4887                         myslot = metadata.get("SLOT", None)
4888                 if myslot and mycp in self.cpdict:
4889                         # If necessary, remove another package in the same SLOT.
4890                         for cpv in self.cpdict[mycp]:
4891                                 if mycpv != cpv:
4892                                         other_metadata = self.cpvdict[cpv]
4893                                         if other_metadata:
4894                                                 if myslot == other_metadata.get("SLOT", None):
4895                                                         self.cpv_remove(cpv)
4896                                                         break
4897                 if mycp not in self.cpdict:
4898                         self.cpdict[mycp] = []
4899                 if not mycpv in self.cpdict[mycp]:
4900                         self.cpdict[mycp].append(mycpv)
4901
4902         def cpv_remove(self,mycpv):
4903                 """Removes a cpv from the list of available packages."""
4904                 self._clear_cache()
4905                 mycp=cpv_getkey(mycpv)
4906                 if self.cpvdict.has_key(mycpv):
4907                         del     self.cpvdict[mycpv]
4908                 if not self.cpdict.has_key(mycp):
4909                         return
4910                 while mycpv in self.cpdict[mycp]:
4911                         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4912                 if not len(self.cpdict[mycp]):
4913                         del self.cpdict[mycp]
4914
4915         def aux_get(self, mycpv, wants):
4916                 if not self.cpv_exists(mycpv):
4917                         raise KeyError(mycpv)
4918                 metadata = self.cpvdict[mycpv]
4919                 if not metadata:
4920                         return ["" for x in wants]
4921                 return [metadata.get(x, "") for x in wants]
4922
4923         def aux_update(self, cpv, values):
4924                 self._clear_cache()
4925                 self.cpvdict[cpv].update(values)
4926
4927 class bindbapi(fakedbapi):
4928         def __init__(self, mybintree=None, settings=None):
4929                 self.bintree = mybintree
4930                 self.cpvdict={}
4931                 self.cpdict={}
4932                 if settings is None:
4933                         settings = globals()["settings"]
4934                 self.settings = settings
4935                 self._match_cache = {}
4936                 # Selectively cache metadata in order to optimize dep matching.
4937                 self._aux_cache_keys = set(["SLOT"])
4938                 self._aux_cache = {}
4939
4940         def match(self, *pargs, **kwargs):
4941                 if self.bintree and not self.bintree.populated:
4942                         self.bintree.populate()
4943                 return fakedbapi.match(self, *pargs, **kwargs)
4944
4945         def aux_get(self,mycpv,wants):
4946                 if self.bintree and not self.bintree.populated:
4947                         self.bintree.populate()
4948                 cache_me = False
4949                 if not set(wants).difference(self._aux_cache_keys):
4950                         aux_cache = self._aux_cache.get(mycpv)
4951                         if aux_cache is not None:
4952                                 return [aux_cache[x] for x in wants]
4953                         cache_me = True
4954                 mysplit = mycpv.split("/")
4955                 mylist  = []
4956                 tbz2name = mysplit[1]+".tbz2"
4957                 if self.bintree and not self.bintree.isremote(mycpv):
4958                         tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4959                         getitem = tbz2.getfile
4960                 else:
4961                         getitem = self.bintree.remotepkgs[tbz2name].get
4962                 mydata = {}
4963                 mykeys = wants
4964                 if cache_me:
4965                         mykeys = self._aux_cache_keys.union(wants)
4966                 for x in mykeys:
4967                         myval = getitem(x)
4968                         # myval is None if the key doesn't exist
4969                         # or the tbz2 is corrupt.
4970                         if myval:
4971                                 mydata[x] = " ".join(myval.split())
4972                 if "EAPI" in mykeys:
4973                         if not mydata.setdefault("EAPI", "0"):
4974                                 mydata["EAPI"] = "0"
4975                 if cache_me:
4976                         aux_cache = {}
4977                         for x in self._aux_cache_keys:
4978                                 aux_cache[x] = mydata.get(x, "")
4979                         self._aux_cache[mycpv] = aux_cache
4980                 return [mydata.get(x, "") for x in wants]
4981
4982         def aux_update(self, cpv, values):
4983                 if not self.bintree.populated:
4984                         self.bintree.populate()
4985                 tbz2path = self.bintree.getname(cpv)
4986                 if not os.path.exists(tbz2path):
4987                         raise KeyError(cpv)
4988                 mytbz2 = xpak.tbz2(tbz2path)
4989                 mydata = mytbz2.get_data()
4990                 mydata.update(values)
4991                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4992
4993         def cp_list(self, *pargs, **kwargs):
4994                 if not self.bintree.populated:
4995                         self.bintree.populate()
4996                 return fakedbapi.cp_list(self, *pargs, **kwargs)
4997
4998         def cpv_all(self):
4999                 if not self.bintree.populated:
5000                         self.bintree.populate()
5001                 return fakedbapi.cpv_all(self)
5002
5003 class vardbapi(dbapi):
5004         def __init__(self, root, categories=None, settings=None, vartree=None):
5005                 self.root       = root[:]
5006                 #cache for category directory mtimes
5007                 self.mtdircache = {}
5008                 #cache for dependency checks
5009                 self.matchcache = {}
5010                 #cache for cp_list results
5011                 self.cpcache    = {}
5012                 self.blockers   = None
5013                 if settings is None:
5014                         settings = globals()["settings"]
5015                 self.settings = settings
5016                 if categories is None:
5017                         categories = settings.categories
5018                 self.categories = categories[:]
5019                 if vartree is None:
5020                         vartree = globals()["db"][root]["vartree"]
5021                 self.vartree = vartree
5022                 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
5023                         "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
5024                 self._aux_cache = None
5025                 self._aux_cache_version = "1"
5026                 self._aux_cache_filename = os.path.join(self.root,
5027                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
5028
5029         def cpv_exists(self,mykey):
5030                 "Tells us whether an actual ebuild exists on disk (no masking)"
5031                 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
5032
5033         def cpv_counter(self,mycpv):
5034                 "This method will grab the COUNTER. Returns a counter value."
5035                 try:
5036                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
5037                 except (KeyError, ValueError):
5038                         pass
5039                 cdir=self.root+VDB_PATH+"/"+mycpv
5040                 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
5041
5042                 # We write our new counter value to a new file that gets moved into
5043                 # place to avoid filesystem corruption on XFS (unexpected reboot.)
5044                 corrupted=0
5045                 if os.path.exists(cpath):
5046                         cfile=open(cpath, "r")
5047                         try:
5048                                 counter=long(cfile.readline())
5049                         except ValueError:
5050                                 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
5051                                 counter=long(0)
5052                                 corrupted=1
5053                         cfile.close()
5054                 elif os.path.exists(cdir):
5055                         mys = pkgsplit(mycpv)
5056                         myl = self.match(mys[0],use_cache=0)
5057                         print mys,myl
5058                         if len(myl) == 1:
5059                                 try:
5060                                         # Only one package... Counter doesn't matter.
5061                                         write_atomic(cpath, "1")
5062                                         counter = 1
5063                                 except SystemExit, e:
5064                                         raise
5065                                 except Exception, e:
5066                                         writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5067                                                 noiselevel=-1)
5068                                         writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5069                                                 noiselevel=-1)
5070                                         writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
5071                                         writemsg("!!! %s\n" % e, noiselevel=-1)
5072                                         sys.exit(1)
5073                         else:
5074                                 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5075                                         noiselevel=-1)
5076                                 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5077                                         noiselevel=-1)
5078                                 writemsg("!!! remerge the package.\n", noiselevel=-1)
5079                                 sys.exit(1)
5080                 else:
5081                         counter=long(0)
5082                 if corrupted:
5083                         # update new global counter file
5084                         write_atomic(cpath, str(counter))
5085                 return counter
5086
5087         def cpv_inject(self,mycpv):
5088                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
5089                 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
5090                 counter = self.counter_tick(self.root, mycpv=mycpv)
5091                 # write local package counter so that emerge clean does the right thing
5092                 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
5093
5094         def isInjected(self,mycpv):
5095                 if self.cpv_exists(mycpv):
5096                         if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
5097                                 return True
5098                         if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
5099                                 return True
5100                 return False
5101
5102         def move_ent(self,mylist):
5103                 origcp=mylist[1]
5104                 newcp=mylist[2]
5105
5106                 # sanity check
5107                 for cp in [origcp,newcp]:
5108                         if not (isvalidatom(cp) and isjustname(cp)):
5109                                 raise portage_exception.InvalidPackageName(cp)
5110                 origmatches=self.match(origcp,use_cache=0)
5111                 if not origmatches:
5112                         return
5113                 for mycpv in origmatches:
5114                         mycpsplit=catpkgsplit(mycpv)
5115                         mynewcpv=newcp+"-"+mycpsplit[2]
5116                         mynewcat=newcp.split("/")[0]
5117                         if mycpsplit[3]!="r0":
5118                                 mynewcpv += "-"+mycpsplit[3]
5119                         mycpsplit_new = catpkgsplit(mynewcpv)
5120                         origpath=self.root+VDB_PATH+"/"+mycpv
5121                         if not os.path.exists(origpath):
5122                                 continue
5123                         writemsg_stdout("@")
5124                         if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
5125                                 #create the directory
5126                                 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
5127                         newpath=self.root+VDB_PATH+"/"+mynewcpv
5128                         if os.path.exists(newpath):
5129                                 #dest already exists; keep this puppy where it is.
5130                                 continue
5131                         os.rename(origpath, newpath)
5132
5133                         # We need to rename the ebuild now.
5134                         old_pf = catsplit(mycpv)[1]
5135                         new_pf = catsplit(mynewcpv)[1]
5136                         if new_pf != old_pf:
5137                                 try:
5138                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
5139                                                 os.path.join(newpath, new_pf + ".ebuild"))
5140                                 except OSError, e:
5141                                         if e.errno != errno.ENOENT:
5142                                                 raise
5143                                         del e
5144                                 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
5145
5146                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
5147                         fixdbentries([mylist], newpath)
5148
5149         def update_ents(self, update_iter):
5150                 """Run fixdbentries on all installed packages (time consuming).  Like
5151                 fixpackages, this should be run from a helper script and display
5152                 a progress indicator."""
5153                 dbdir = os.path.join(self.root, VDB_PATH)
5154                 for catdir in listdir(dbdir):
5155                         catdir = dbdir+"/"+catdir
5156                         if os.path.isdir(catdir):
5157                                 for pkgdir in listdir(catdir):
5158                                         pkgdir = catdir+"/"+pkgdir
5159                                         if os.path.isdir(pkgdir):
5160                                                 fixdbentries(update_iter, pkgdir)
5161
5162         def move_slot_ent(self,mylist):
5163                 pkg=mylist[1]
5164                 origslot=mylist[2]
5165                 newslot=mylist[3]
5166
5167                 if not isvalidatom(pkg):
5168                         raise portage_exception.InvalidAtom(pkg)
5169
5170                 origmatches=self.match(pkg,use_cache=0)
5171                 
5172                 if not origmatches:
5173                         return
5174                 for mycpv in origmatches:
5175                         origpath=self.root+VDB_PATH+"/"+mycpv
5176                         if not os.path.exists(origpath):
5177                                 continue
5178
5179                         slot=grabfile(origpath+"/SLOT");
5180                         if (not slot):
5181                                 continue
5182
5183                         if (slot[0]!=origslot):
5184                                 continue
5185
5186                         writemsg_stdout("s")
5187                         write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
5188
5189         def cp_list(self,mycp,use_cache=1):
5190                 mysplit=mycp.split("/")
5191                 if mysplit[0] == '*':
5192                         mysplit[0] = mysplit[0][1:]
5193                 try:
5194                         mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5195                 except OSError:
5196                         mystat=0
5197                 if use_cache and self.cpcache.has_key(mycp):
5198                         cpc=self.cpcache[mycp]
5199                         if cpc[0]==mystat:
5200                                 return cpc[1]
5201                 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5202
5203                 if (list is None):
5204                         return []
5205                 returnme=[]
5206                 for x in list:
5207                         if x.startswith("."):
5208                                 continue
5209                         if x[0] == '-':
5210                                 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5211                                 continue
5212                         ps=pkgsplit(x)
5213                         if not ps:
5214                                 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5215                                 continue
5216                         if len(mysplit) > 1:
5217                                 if ps[0]==mysplit[1]:
5218                                         returnme.append(mysplit[0]+"/"+x)
5219                 if use_cache:
5220                         self.cpcache[mycp]=[mystat,returnme]
5221                 elif self.cpcache.has_key(mycp):
5222                         del self.cpcache[mycp]
5223                 return returnme
5224
5225         def cpv_all(self,use_cache=1):
5226                 returnme=[]
5227                 basepath = self.root+VDB_PATH+"/"
5228
5229                 for x in self.categories:
5230                         for y in listdir(basepath+x,EmptyOnError=1):
5231                                 if y.startswith("."):
5232                                         continue
5233                                 subpath = x+"/"+y
5234                                 # -MERGING- should never be a cpv, nor should files.
5235                                 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5236                                         returnme += [subpath]
5237                 return returnme
5238
5239         def cp_all(self,use_cache=1):
5240                 mylist = self.cpv_all(use_cache=use_cache)
5241                 d={}
5242                 for y in mylist:
5243                         if y[0] == '*':
5244                                 y = y[1:]
5245                         mysplit=catpkgsplit(y)
5246                         if not mysplit:
5247                                 self.invalidentry(self.root+VDB_PATH+"/"+y)
5248                                 continue
5249                         d[mysplit[0]+"/"+mysplit[1]] = None
5250                 return d.keys()
5251
5252         def checkblockers(self,origdep):
5253                 pass
5254
5255         def match(self,origdep,use_cache=1):
5256                 "caching match function"
5257                 mydep = dep_expand(
5258                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5259                 mykey=dep_getkey(mydep)
5260                 mycat=mykey.split("/")[0]
5261                 if not use_cache:
5262                         if self.matchcache.has_key(mycat):
5263                                 del self.mtdircache[mycat]
5264                                 del self.matchcache[mycat]
5265                         mymatch = match_from_list(mydep,
5266                                 self.cp_list(mykey, use_cache=use_cache))
5267                         myslot = portage_dep.dep_getslot(mydep)
5268                         if myslot is not None:
5269                                 mymatch = [cpv for cpv in mymatch \
5270                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5271                         return mymatch
5272                 try:
5273                         curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5274                 except (IOError, OSError):
5275                         curmtime=0
5276
5277                 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5278                         # clear cache entry
5279                         self.mtdircache[mycat]=curmtime
5280                         self.matchcache[mycat]={}
5281                 if not self.matchcache[mycat].has_key(mydep):
5282                         mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5283                         myslot = portage_dep.dep_getslot(mydep)
5284                         if myslot is not None:
5285                                 mymatch = [cpv for cpv in mymatch \
5286                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5287                         self.matchcache[mycat][mydep]=mymatch
5288                 return self.matchcache[mycat][mydep][:]
5289
5290         def findname(self, mycpv):
5291                 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5292
5293         def flush_cache(self):
5294                 """If the current user has permission and the internal aux_get cache has
5295                 been updated, save it to disk and mark it unmodified.  This is called
5296                 by emerge after it has loaded the full vdb for use in dependency
5297                 calculations.  Currently, the cache is only written if the user has
5298                 superuser privileges (since that's required to obtain a lock), but all
5299                 users have read access and benefit from faster metadata lookups (as
5300                 long as at least part of the cache is still valid)."""
5301                 if self._aux_cache is not None and \
5302                         self._aux_cache["modified"] and \
5303                         secpass >= 2:
5304                         valid_nodes = set(self.cpv_all())
5305                         for cpv in self._aux_cache["packages"].keys():
5306                                 if cpv not in valid_nodes:
5307                                         del self._aux_cache["packages"][cpv]
5308                         del self._aux_cache["modified"]
5309                         try:
5310                                 f = atomic_ofstream(self._aux_cache_filename)
5311                                 cPickle.dump(self._aux_cache, f, -1)
5312                                 f.close()
5313                                 portage_util.apply_secpass_permissions(
5314                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
5315                         except (IOError, OSError), e:
5316                                 pass
5317                         self._aux_cache["modified"] = False
5318
5319         def aux_get(self, mycpv, wants):
5320                 """This automatically caches selected keys that are frequently needed
5321                 by emerge for dependency calculations.  The cached metadata is
5322                 considered valid if the mtime of the package directory has not changed
5323                 since the data was cached.  The cache is stored in a pickled dict
5324                 object with the following format:
5325
5326                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5327
5328                 If an error occurs while loading the cache pickle or the version is
5329                 unrecognized, the cache will simple be recreated from scratch (it is
5330                 completely disposable).
5331                 """
5332                 if not self._aux_cache_keys.intersection(wants):
5333                         return self._aux_get(mycpv, wants)
5334                 if self._aux_cache is None:
5335                         try:
5336                                 f = open(self._aux_cache_filename)
5337                                 mypickle = cPickle.Unpickler(f)
5338                                 mypickle.find_global = None
5339                                 self._aux_cache = mypickle.load()
5340                                 f.close()
5341                                 del f
5342                         except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5343                                 pass
5344                         if not self._aux_cache or \
5345                                 not isinstance(self._aux_cache, dict) or \
5346                                 self._aux_cache.get("version") != self._aux_cache_version or \
5347                                 not self._aux_cache.get("packages"):
5348                                 self._aux_cache = {"version":self._aux_cache_version}
5349                                 self._aux_cache["packages"] = {}
5350                         self._aux_cache["modified"] = False
5351                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5352                 mydir_stat = None
5353                 try:
5354                         mydir_stat = os.stat(mydir)
5355                 except OSError, e:
5356                         if e.errno != errno.ENOENT:
5357                                 raise
5358                         raise KeyError(mycpv)
5359                 mydir_mtime = long(mydir_stat.st_mtime)
5360                 pkg_data = self._aux_cache["packages"].get(mycpv)
5361                 mydata = {}
5362                 cache_valid = False
5363                 if pkg_data:
5364                         cache_mtime, metadata = pkg_data
5365                         cache_valid = cache_mtime == mydir_mtime
5366                 if cache_valid:
5367                         cache_incomplete = self._aux_cache_keys.difference(metadata)
5368                         if cache_incomplete:
5369                                 # Allow self._aux_cache_keys to change without a cache version
5370                                 # bump and efficiently recycle partial cache whenever possible.
5371                                 cache_valid = False
5372                                 pull_me = cache_incomplete.union(wants)
5373                         else:
5374                                 pull_me = set(wants).difference(self._aux_cache_keys)
5375                         mydata.update(metadata)
5376                 else:
5377                         pull_me = self._aux_cache_keys.union(wants)
5378                 if pull_me:
5379                         # pull any needed data and cache it
5380                         aux_keys = list(pull_me)
5381                         for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5382                                 mydata[k] = v
5383                         if not cache_valid:
5384                                 cache_data = {}
5385                                 for aux_key in self._aux_cache_keys:
5386                                         cache_data[aux_key] = mydata[aux_key]
5387                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5388                                 self._aux_cache["modified"] = True
5389                 return [mydata[x] for x in wants]
5390
5391         def _aux_get(self, mycpv, wants):
5392                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5393                 try:
5394                         if not stat.S_ISDIR(os.stat(mydir).st_mode):
5395                                 raise KeyError(mycpv)
5396                 except OSError, e:
5397                         if e.errno == errno.ENOENT:
5398                                 raise KeyError(mycpv)
5399                         del e
5400                         raise
5401                 results = []
5402                 for x in wants:
5403                         try:
5404                                 myf = open(os.path.join(mydir, x), "r")
5405                                 try:
5406                                         myd = myf.read()
5407                                 finally:
5408                                         myf.close()
5409                                 myd = " ".join(myd.split())
5410                         except IOError:
5411                                 myd = ""
5412                         if x == "EAPI" and not myd:
5413                                 results.append("0")
5414                         else:
5415                                 results.append(myd)
5416                 return results
5417
5418         def aux_update(self, cpv, values):
5419                 cat, pkg = cpv.split("/")
5420                 mylink = dblink(cat, pkg, self.root, self.settings,
5421                 treetype="vartree", vartree=self.vartree)
5422                 if not mylink.exists():
5423                         raise KeyError(cpv)
5424                 for k, v in values.iteritems():
5425                         mylink.setfile(k, v)
5426
5427         def counter_tick(self,myroot,mycpv=None):
5428                 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5429
5430         def get_counter_tick_core(self,myroot,mycpv=None):
5431                 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5432
5433         def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5434                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
5435                 cpath=myroot+"var/cache/edb/counter"
5436                 changed=0
5437                 min_counter = 0
5438                 if mycpv:
5439                         mysplit = pkgsplit(mycpv)
5440                         for x in self.match(mysplit[0],use_cache=0):
5441                                 if x==mycpv:
5442                                         continue
5443                                 try:
5444                                         old_counter = long(self.aux_get(x,["COUNTER"])[0])
5445                                         writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5446                                 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5447                                         old_counter = 0
5448                                         writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5449                                 if old_counter > min_counter:
5450                                         min_counter = old_counter
5451
5452                 # We write our new counter value to a new file that gets moved into
5453                 # place to avoid filesystem corruption.
5454                 find_counter = ("find '%s' -type f -name COUNTER | " + \
5455                         "while read f; do echo $(<\"${f}\"); done | " + \
5456                         "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5457                 if os.path.exists(cpath):
5458                         cfile=open(cpath, "r")
5459                         try:
5460                                 counter=long(cfile.readline())
5461                         except (ValueError,OverflowError):
5462                                 try:
5463                                         counter = long(commands.getoutput(find_counter).strip())
5464                                         writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5465                                                 noiselevel=-1)
5466                                         changed=1
5467                                 except (ValueError,OverflowError):
5468                                         writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5469                                                 noiselevel=-1)
5470                                         writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5471                                                 noiselevel=-1)
5472                                         writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5473                                         sys.exit(2)
5474                         cfile.close()
5475                 else:
5476                         try:
5477                                 counter = long(commands.getoutput(find_counter).strip())
5478                                 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5479                                         noiselevel=-1)
5480                         except ValueError: # Value Error for long(), probably others for commands.getoutput
5481                                 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5482                                 counter=long(0)
5483                         changed=1
5484
5485                 if counter < min_counter:
5486                         counter = min_counter+1000
5487                         changed = 1
5488
5489                 if incrementing or changed:
5490
5491                         #increment counter
5492                         counter += 1
5493                         # update new global counter file
5494                         write_atomic(cpath, str(counter))
5495                 return counter
5496
5497 class vartree(object):
5498         "this tree will scan a var/db/pkg database located at root (passed to init)"
5499         def __init__(self, root="/", virtual=None, clone=None, categories=None,
5500                 settings=None):
5501                 if clone:
5502                         writemsg("vartree.__init__(): deprecated " + \
5503                                 "use of clone parameter\n", noiselevel=-1)
5504                         self.root       = clone.root[:]
5505                         self.dbapi      = copy.deepcopy(clone.dbapi)
5506                         self.populated  = 1
5507                         self.settings   = config(clone=clone.settings)
5508                 else:
5509                         self.root       = root[:]
5510                         if settings is None:
5511                                 settings = globals()["settings"]
5512                         self.settings = settings # for key_expand calls
5513                         if categories is None:
5514                                 categories = settings.categories
5515                         self.dbapi = vardbapi(self.root, categories=categories,
5516                                 settings=settings, vartree=self)
5517                         self.populated  = 1
5518
5519         def zap(self,mycpv):
5520                 return
5521
5522         def inject(self,mycpv):
5523                 return
5524
5525         def get_provide(self,mycpv):
5526                 myprovides=[]
5527                 mylines = None
5528                 try:
5529                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5530                         if mylines:
5531                                 myuse = myuse.split()
5532                                 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5533                                 for myprovide in mylines:
5534                                         mys = catpkgsplit(myprovide)
5535                                         if not mys:
5536                                                 mys = myprovide.split("/")
5537                                         myprovides += [mys[0] + "/" + mys[1]]
5538                         return myprovides
5539                 except SystemExit, e:
5540                         raise
5541                 except Exception, e:
5542                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
5543                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5544                                 noiselevel=-1)
5545                         if mylines:
5546                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5547                                         noiselevel=-1)
5548                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5549                         return []
5550
5551         def get_all_provides(self):
5552                 myprovides = {}
5553                 for node in self.getallcpv():
5554                         for mykey in self.get_provide(node):
5555                                 if myprovides.has_key(mykey):
5556                                         myprovides[mykey] += [node]
5557                                 else:
5558                                         myprovides[mykey]  = [node]
5559                 return myprovides
5560
5561         def dep_bestmatch(self,mydep,use_cache=1):
5562                 "compatibility method -- all matches, not just visible ones"
5563                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5564                 mymatch = best(self.dbapi.match(
5565                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5566                         use_cache=use_cache))
5567                 if mymatch is None:
5568                         return ""
5569                 else:
5570                         return mymatch
5571
5572         def dep_match(self,mydep,use_cache=1):
5573                 "compatibility method -- we want to see all matches, not just visible ones"
5574                 #mymatch=match(mydep,self.dbapi)
5575                 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5576                 if mymatch is None:
5577                         return []
5578                 else:
5579                         return mymatch
5580
5581         def exists_specific(self,cpv):
5582                 return self.dbapi.cpv_exists(cpv)
5583
5584         def getallcpv(self):
5585                 """temporary function, probably to be renamed --- Gets a list of all
5586                 category/package-versions installed on the system."""
5587                 return self.dbapi.cpv_all()
5588
5589         def getallnodes(self):
5590                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
5591                 masked package for nodes in this nodes list."""
5592                 return self.dbapi.cp_all()
5593
5594         def exists_specific_cat(self,cpv,use_cache=1):
5595                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5596                         settings=self.settings)
5597                 a=catpkgsplit(cpv)
5598                 if not a:
5599                         return 0
5600                 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5601                 for x in mylist:
5602                         b=pkgsplit(x)
5603                         if not b:
5604                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5605                                 continue
5606                         if a[1]==b[0]:
5607                                 return 1
5608                 return 0
5609
5610         def getebuildpath(self,fullpackage):
5611                 cat,package=fullpackage.split("/")
5612                 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5613
5614         def getnode(self,mykey,use_cache=1):
5615                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5616                         settings=self.settings)
5617                 if not mykey:
5618                         return []
5619                 mysplit=mykey.split("/")
5620                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5621                 returnme=[]
5622                 for x in mydirlist:
5623                         mypsplit=pkgsplit(x)
5624                         if not mypsplit:
5625                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5626                                 continue
5627                         if mypsplit[0]==mysplit[1]:
5628                                 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5629                                 returnme.append(appendme)
5630                 return returnme
5631
5632
5633         def getslot(self,mycatpkg):
5634                 "Get a slot for a catpkg; assume it exists."
5635                 try:
5636                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5637                 except KeyError:
5638                         return ""
5639
5640         def hasnode(self,mykey,use_cache):
5641                 """Does the particular node (cat/pkg key) exist?"""
5642                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5643                         settings=self.settings)
5644                 mysplit=mykey.split("/")
5645                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5646                 for x in mydirlist:
5647                         mypsplit=pkgsplit(x)
5648                         if not mypsplit:
5649                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5650                                 continue
5651                         if mypsplit[0]==mysplit[1]:
5652                                 return 1
5653                 return 0
5654
5655         def populate(self):
5656                 self.populated=1
5657
5658 auxdbkeys=[
5659   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
5660         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
5661         'KEYWORDS',  'INHERITED', 'IUSE',      'CDEPEND',
5662         'PDEPEND',   'PROVIDE', 'EAPI',
5663         'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5664         'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5665         ]
5666 auxdbkeylen=len(auxdbkeys)
5667
5668 def close_portdbapi_caches():
5669         for i in portdbapi.portdbapi_instances:
5670                 i.close_caches()
5671
5672
5673 class portdbapi(dbapi):
5674         """this tree will scan a portage directory located at root (passed to init)"""
5675         portdbapi_instances = []
5676
5677         def __init__(self,porttree_root,mysettings=None):
5678                 portdbapi.portdbapi_instances.append(self)
5679
5680                 if mysettings:
5681                         self.mysettings = mysettings
5682                 else:
5683                         global settings
5684                         self.mysettings = config(clone=settings)
5685                 self._categories = set(self.mysettings.categories)
5686                 # This is strictly for use in aux_get() doebuild calls when metadata
5687                 # is generated by the depend phase.  It's safest to use a clone for
5688                 # this purpose because doebuild makes many changes to the config
5689                 # instance that is passed in.
5690                 self.doebuild_settings = config(clone=self.mysettings)
5691
5692                 self.manifestVerifyLevel  = None
5693                 self.manifestVerifier     = None
5694                 self.manifestCache        = {}    # {location: [stat, md5]}
5695                 self.manifestMissingCache = []
5696
5697                 if "gpg" in self.mysettings.features:
5698                         self.manifestVerifyLevel   = portage_gpg.EXISTS
5699                         if "strict" in self.mysettings.features:
5700                                 self.manifestVerifyLevel = portage_gpg.MARGINAL
5701                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5702                         elif "severe" in self.mysettings.features:
5703                                 self.manifestVerifyLevel = portage_gpg.TRUSTED
5704                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5705                         else:
5706                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5707
5708                 #self.root=settings["PORTDIR"]
5709                 self.porttree_root = os.path.realpath(porttree_root)
5710
5711                 self.depcachedir = self.mysettings.depcachedir[:]
5712
5713                 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5714                 if self.tmpfs and not os.path.exists(self.tmpfs):
5715                         self.tmpfs = None
5716                 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5717                         self.tmpfs = None
5718                 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5719                         self.tmpfs = None
5720
5721                 self.eclassdb = eclass_cache.cache(self.porttree_root,
5722                         overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5723
5724                 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5725
5726                 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5727                 self.xcache={}
5728                 self.frozen=0
5729
5730                 self.porttrees = [self.porttree_root] + \
5731                         [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5732                 self.auxdbmodule  = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5733                 self.auxdb        = {}
5734                 self._init_cache_dirs()
5735                 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5736                 # ~harring
5737                 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5738                 if secpass < 1:
5739                         from cache import metadata_overlay, volatile
5740                         for x in self.porttrees:
5741                                 db_ro = self.auxdbmodule(self.depcachedir, x,
5742                                         filtered_auxdbkeys, gid=portage_gid, readonly=True)
5743                                 self.auxdb[x] = metadata_overlay.database(
5744                                         self.depcachedir, x, filtered_auxdbkeys,
5745                                         gid=portage_gid, db_rw=volatile.database,
5746                                         db_ro=db_ro)
5747                 else:
5748                         for x in self.porttrees:
5749                                 # location, label, auxdbkeys
5750                                 self.auxdb[x] = self.auxdbmodule(
5751                                         self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5752                 # Selectively cache metadata in order to optimize dep matching.
5753                 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5754                 self._aux_cache = {}
5755
5756         def _init_cache_dirs(self):
5757                 """Create /var/cache/edb/dep and adjust permissions for the portage
5758                 group."""
5759
5760                 dirmode  = 02070
5761                 filemode =   060
5762                 modemask =    02
5763
5764                 try:
5765                         for mydir in (self.depcachedir,):
5766                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5767                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5768                                                 noiselevel=-1)
5769                                         def onerror(e):
5770                                                 raise # bail out on the first error that occurs during recursion
5771                                         if not apply_recursive_permissions(mydir,
5772                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5773                                                 filemode=filemode, filemask=modemask, onerror=onerror):
5774                                                 raise portage_exception.OperationNotPermitted(
5775                                                         "Failed to apply recursive permissions for the portage group.")
5776                 except portage_exception.PortageException, e:
5777                         pass
5778
5779         def close_caches(self):
5780                 for x in self.auxdb.keys():
5781                         self.auxdb[x].sync()
5782                 self.auxdb.clear()
5783
5784         def flush_cache(self):
5785                 for x in self.auxdb.values():
5786                         x.sync()
5787
5788         def finddigest(self,mycpv):
5789                 try:
5790                         mydig   = self.findname2(mycpv)[0]
5791                         if not mydig:
5792                                 return ""
5793                         mydigs  = mydig.split("/")[:-1]
5794                         mydig   = "/".join(mydigs)
5795                         mysplit = mycpv.split("/")
5796                 except OSError:
5797                         return ""
5798                 return mydig+"/files/digest-"+mysplit[-1]
5799
5800         def findname(self,mycpv):
5801                 return self.findname2(mycpv)[0]
5802
5803         def findname2(self, mycpv, mytree=None):
5804                 """ 
5805                 Returns the location of the CPV, and what overlay it was in.
5806                 Searches overlays first, then PORTDIR; this allows us to return the first
5807                 matching file.  As opposed to starting in portdir and then doing overlays
5808                 second, we would have to exhaustively search the overlays until we found
5809                 the file we wanted.
5810                 """
5811                 if not mycpv:
5812                         return "",0
5813                 mysplit=mycpv.split("/")
5814                 psplit=pkgsplit(mysplit[1])
5815
5816                 if mytree:
5817                         mytrees = [mytree]
5818                 else:
5819                         mytrees = self.porttrees[:]
5820                         mytrees.reverse()
5821                 if psplit:
5822                         for x in mytrees:
5823                                 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5824                                 if os.access(file, os.R_OK):
5825                                         return[file, x]
5826                 return None, 0
5827
5828         def aux_get(self, mycpv, mylist, mytree=None):
5829                 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5830                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5831                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5832                 cache_me = False
5833                 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5834                         aux_cache = self._aux_cache.get(mycpv)
5835                         if aux_cache is not None:
5836                                 return [aux_cache[x] for x in mylist]
5837                         cache_me = True
5838                 global auxdbkeys,auxdbkeylen
5839                 cat,pkg = mycpv.split("/", 1)
5840
5841                 myebuild, mylocation = self.findname2(mycpv, mytree)
5842
5843                 if not myebuild:
5844                         writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5845                                 noiselevel=1)
5846                         writemsg("!!!            %s\n" % myebuild, noiselevel=1)
5847                         raise KeyError(mycpv)
5848
5849                 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5850                 if "gpg" in self.mysettings.features:
5851                         try:
5852                                 mys = portage_gpg.fileStats(myManifestPath)
5853                                 if (myManifestPath in self.manifestCache) and \
5854                                    (self.manifestCache[myManifestPath] == mys):
5855                                         pass
5856                                 elif self.manifestVerifier:
5857                                         if not self.manifestVerifier.verify(myManifestPath):
5858                                                 # Verification failed the desired level.
5859                                                 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5860
5861                                 if ("severe" in self.mysettings.features) and \
5862                                    (mys != portage_gpg.fileStats(myManifestPath)):
5863                                         raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5864
5865                         except portage_exception.InvalidSignature, e:
5866                                 if ("strict" in self.mysettings.features) or \
5867                                    ("severe" in self.mysettings.features):
5868                                         raise
5869                                 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5870                         except portage_exception.MissingSignature, e:
5871                                 if ("severe" in self.mysettings.features):
5872                                         raise
5873                                 if ("strict" in self.mysettings.features):
5874                                         if myManifestPath not in self.manifestMissingCache:
5875                                                 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5876                                                 self.manifestMissingCache.insert(0,myManifestPath)
5877                         except (OSError,portage_exception.FileNotFound), e:
5878                                 if ("strict" in self.mysettings.features) or \
5879                                    ("severe" in self.mysettings.features):
5880                                         raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5881                                 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5882                                         noiselevel=-1)
5883
5884
5885                 if os.access(myebuild, os.R_OK):
5886                         emtime=os.stat(myebuild)[stat.ST_MTIME]
5887                 else:
5888                         writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5889                                 noiselevel=-1)
5890                         writemsg("!!!            %s\n" % myebuild,
5891                                 noiselevel=-1)
5892                         raise KeyError
5893
5894                 try:
5895                         mydata = self.auxdb[mylocation][mycpv]
5896                         if emtime != long(mydata.get("_mtime_", 0)):
5897                                 doregen = True
5898                         elif len(mydata.get("_eclasses_", [])) > 0:
5899                                 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5900                         else:
5901                                 doregen = False
5902                                 
5903                 except KeyError:
5904                         doregen = True
5905                 except CacheError:
5906                         doregen = True
5907                         try:                            del self.auxdb[mylocation][mycpv]
5908                         except KeyError:        pass
5909
5910                 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5911
5912                 if doregen:
5913                         writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5914                         writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5915
5916                         self.doebuild_settings.reset()
5917                         mydata = {}
5918                         myret = doebuild(myebuild, "depend",
5919                                 self.doebuild_settings["ROOT"], self.doebuild_settings,
5920                                 dbkey=mydata, tree="porttree", mydbapi=self)
5921                         if myret != os.EX_OK:
5922                                 raise KeyError(mycpv)
5923
5924                         if "EAPI" not in mydata or not mydata["EAPI"].strip():
5925                                 mydata["EAPI"] = "0"
5926
5927                         if not eapi_is_supported(mydata["EAPI"]):
5928                                 # if newer version, wipe everything and negate eapi
5929                                 eapi = mydata["EAPI"]
5930                                 mydata = {}
5931                                 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5932                                 mydata["EAPI"] = "-"+eapi
5933
5934                         if mydata.get("INHERITED", False):
5935                                 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5936                         else:
5937                                 mydata["_eclasses_"] = {}
5938                         
5939                         del mydata["INHERITED"]
5940
5941                         mydata["_mtime_"] = emtime
5942
5943                         self.auxdb[mylocation][mycpv] = mydata
5944
5945                 if not mydata.setdefault("EAPI", "0"):
5946                         mydata["EAPI"] = "0"
5947
5948                 #finally, we look at our internal cache entry and return the requested data.
5949                 returnme = []
5950                 for x in mylist:
5951                         if x == "INHERITED":
5952                                 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5953                         else:
5954                                 returnme.append(mydata.get(x,""))
5955
5956                 if cache_me:
5957                         aux_cache = {}
5958                         for x in self._aux_cache_keys:
5959                                 aux_cache[x] = mydata.get(x, "")
5960                         self._aux_cache[mycpv] = aux_cache
5961
5962                 return returnme
5963
5964         def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5965                 if mysettings is None:
5966                         mysettings = self.mysettings
5967                 try:
5968                         myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5969                 except KeyError:
5970                         print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5971                         sys.exit(1)
5972
5973                 if useflags is None:
5974                         useflags = mysettings["USE"].split()
5975
5976                 myurilist = portage_dep.paren_reduce(myuris)
5977                 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5978                 newuris = flatten(myurilist)
5979
5980                 myfiles = []
5981                 for x in newuris:
5982                         mya = os.path.basename(x)
5983                         if not mya in myfiles:
5984                                 myfiles.append(mya)
5985                 return [newuris, myfiles]
5986
5987         def getfetchsizes(self,mypkg,useflags=None,debug=0):
5988                 # returns a filename:size dictionnary of remaining downloads
5989                 myebuild = self.findname(mypkg)
5990                 pkgdir = os.path.dirname(myebuild)
5991                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5992                 checksums = mf.getDigests()
5993                 if not checksums:
5994                         if debug: print "[empty/missing/bad digest]: "+mypkg
5995                         return None
5996                 filesdict={}
5997                 if useflags is None:
5998                         myuris, myfiles = self.getfetchlist(mypkg,all=1)
5999                 else:
6000                         myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
6001                 #XXX: maybe this should be improved: take partial downloads
6002                 # into account? check checksums?
6003                 for myfile in myfiles:
6004                         if myfile not in checksums:
6005                                 if debug:
6006                                         writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
6007                                 continue
6008                         file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
6009                         mystat = None
6010                         try:
6011                                 mystat = os.stat(file_path)
6012                         except OSError, e:
6013                                 pass
6014                         if mystat is None:
6015                                 existing_size = 0
6016                         else:
6017                                 existing_size = mystat.st_size
6018                         remaining_size = int(checksums[myfile]["size"]) - existing_size
6019                         if remaining_size > 0:
6020                                 # Assume the download is resumable.
6021                                 filesdict[myfile] = remaining_size
6022                         elif remaining_size < 0:
6023                                 # The existing file is too large and therefore corrupt.
6024                                 filesdict[myfile] = int(checksums[myfile]["size"])
6025                 return filesdict
6026
6027         def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
6028                 if not useflags:
6029                         if mysettings:
6030                                 useflags = mysettings["USE"].split()
6031                 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
6032                 myebuild = self.findname(mypkg)
6033                 pkgdir = os.path.dirname(myebuild)
6034                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
6035                 mysums = mf.getDigests()
6036
6037                 failures = {}
6038                 for x in myfiles:
6039                         if not mysums or x not in mysums:
6040                                 ok     = False
6041                                 reason = "digest missing"
6042                         else:
6043                                 try:
6044                                         ok, reason = portage_checksum.verify_all(
6045                                                 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
6046                                 except portage_exception.FileNotFound, e:
6047                                         ok = False
6048                                         reason = "File Not Found: '%s'" % str(e)
6049                         if not ok:
6050                                 failures[x] = reason
6051                 if failures:
6052                         return False
6053                 return True
6054
6055         def cpv_exists(self,mykey):
6056                 "Tells us whether an actual ebuild exists on disk (no masking)"
6057                 cps2=mykey.split("/")
6058                 cps=catpkgsplit(mykey,silent=0)
6059                 if not cps:
6060                         #invalid cat/pkg-v
6061                         return 0
6062                 if self.findname(cps[0]+"/"+cps2[1]):
6063                         return 1
6064                 else:
6065                         return 0
6066
6067         def cp_all(self):
6068                 "returns a list of all keys in our tree"
6069                 d={}
6070                 for x in self.mysettings.categories:
6071                         for oroot in self.porttrees:
6072                                 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
6073                                         d[x+"/"+y] = None
6074                 l = d.keys()
6075                 l.sort()
6076                 return l
6077
6078         def p_list(self,mycp):
6079                 d={}
6080                 for oroot in self.porttrees:
6081                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6082                                 if x[-7:]==".ebuild":
6083                                         d[x[:-7]] = None
6084                 return d.keys()
6085
6086         def cp_list(self, mycp, use_cache=1, mytree=None):
6087                 mysplit=mycp.split("/")
6088                 invalid_category = mysplit[0] not in self._categories
6089                 d={}
6090                 if mytree:
6091                         mytrees = [mytree]
6092                 else:
6093                         mytrees = self.porttrees
6094                 for oroot in mytrees:
6095                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6096                                 if x.endswith(".ebuild"):
6097                                         pf = x[:-7]
6098                                         ps = pkgsplit(pf)
6099                                         if not ps:
6100                                                 writemsg("\nInvalid ebuild name: %s\n" % \
6101                                                         os.path.join(oroot, mycp, x), noiselevel=-1)
6102                                                 continue
6103                                         d[mysplit[0]+"/"+pf] = None
6104                 if invalid_category and d:
6105                         writemsg(("\n!!! '%s' has a category that is not listed in " + \
6106                                 "/etc/portage/categories\n") % mycp, noiselevel=-1)
6107                         return []
6108                 return d.keys()
6109
6110         def freeze(self):
6111                 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
6112                         self.xcache[x]={}
6113                 self.frozen=1
6114
6115         def melt(self):
6116                 self.xcache={}
6117                 self.frozen=0
6118
6119         def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
6120                 "caching match function; very trick stuff"
6121                 #if no updates are being made to the tree, we can consult our xcache...
6122                 if self.frozen:
6123                         try:
6124                                 return self.xcache[level][origdep][:]
6125                         except KeyError:
6126                                 pass
6127
6128                 if not mydep:
6129                         #this stuff only runs on first call of xmatch()
6130                         #create mydep, mykey from origdep
6131                         mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
6132                         mykey=dep_getkey(mydep)
6133
6134                 if level=="list-visible":
6135                         #a list of all visible packages, not called directly (just by xmatch())
6136                         #myval=self.visible(self.cp_list(mykey))
6137                         myval=self.gvisible(self.visible(self.cp_list(mykey)))
6138                 elif level=="bestmatch-visible":
6139                         #dep match -- best match of all visible packages
6140                         myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
6141                         #get all visible matches (from xmatch()), then choose the best one
6142                 elif level=="bestmatch-list":
6143                         #dep match -- find best match but restrict search to sublist
6144                         myval=best(match_from_list(mydep,mylist))
6145                         #no point is calling xmatch again since we're not caching list deps
6146                 elif level=="match-list":
6147                         #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
6148                         myval=match_from_list(mydep,mylist)
6149                 elif level=="match-visible":
6150                         #dep match -- find all visible matches
6151                         myval = match_from_list(mydep,
6152                                 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
6153                         #get all visible packages, then get the matching ones
6154                 elif level=="match-all":
6155                         #match *all* visible *and* masked packages
6156                         myval=match_from_list(mydep,self.cp_list(mykey))
6157                 else:
6158                         print "ERROR: xmatch doesn't handle",level,"query!"
6159                         raise KeyError
6160                 myslot = portage_dep.dep_getslot(mydep)
6161                 if myslot is not None:
6162                         slotmatches = []
6163                         for cpv in myval:
6164                                 try:
6165                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot:
6166                                                 slotmatches.append(cpv)
6167                                 except KeyError:
6168                                         pass # ebuild masked by corruption
6169                         myval = slotmatches
6170                 if self.frozen and (level not in ["match-list","bestmatch-list"]):
6171                         self.xcache[level][mydep]=myval
6172                         if origdep and origdep != mydep:
6173                                 self.xcache[level][origdep] = myval
6174                 return myval[:]
6175
6176         def match(self,mydep,use_cache=1):
6177                 return self.xmatch("match-visible",mydep)
6178
6179         def visible(self,mylist):
6180                 """two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
6181                 packages file to remove invisible entries, returning remaining items.  This function assumes
6182                 that all entries in mylist have the same category and package name."""
6183                 if (mylist is None) or (len(mylist)==0):
6184                         return []
6185                 newlist=mylist[:]
6186                 #first, we mask out packages in the package.mask file
6187                 mykey=newlist[0]
6188                 cpv=catpkgsplit(mykey)
6189                 if not cpv:
6190                         #invalid cat/pkg-v
6191                         print "visible(): invalid cat/pkg-v:",mykey
6192                         return []
6193                 mycp=cpv[0]+"/"+cpv[1]
6194                 maskdict=self.mysettings.pmaskdict
6195                 unmaskdict=self.mysettings.punmaskdict
6196                 if maskdict.has_key(mycp):
6197                         for x in maskdict[mycp]:
6198                                 mymatches=self.xmatch("match-all",x)
6199                                 if mymatches is None:
6200                                         #error in package.mask file; print warning and continue:
6201                                         print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
6202                                         continue
6203                                 for y in mymatches:
6204                                         unmask=0
6205                                         if unmaskdict.has_key(mycp):
6206                                                 for z in unmaskdict[mycp]:
6207                                                         mymatches_unmask=self.xmatch("match-all",z)
6208                                                         if y in mymatches_unmask:
6209                                                                 unmask=1
6210                                                                 break
6211                                         if unmask==0:
6212                                                 try:
6213                                                         newlist.remove(y)
6214                                                 except ValueError:
6215                                                         pass
6216
6217                 revmaskdict=self.mysettings.prevmaskdict
6218                 if revmaskdict.has_key(mycp):
6219                         for x in revmaskdict[mycp]:
6220                                 #important: only match against the still-unmasked entries...
6221                                 #notice how we pass "newlist" to the xmatch() call below....
6222                                 #Without this, ~ deps in the packages files are broken.
6223                                 mymatches=self.xmatch("match-list",x,mylist=newlist)
6224                                 if mymatches is None:
6225                                         #error in packages file; print warning and continue:
6226                                         print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6227                                         continue
6228                                 pos=0
6229                                 while pos<len(newlist):
6230                                         if newlist[pos] not in mymatches:
6231                                                 del newlist[pos]
6232                                         else:
6233                                                 pos += 1
6234                 return newlist
6235
6236         def gvisible(self,mylist):
6237                 "strip out group-masked (not in current group) entries"
6238
6239                 if mylist is None:
6240                         return []
6241                 newlist=[]
6242
6243                 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6244                 pkgdict = self.mysettings.pkeywordsdict
6245                 for mycpv in mylist:
6246                         try:
6247                                 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6248                         except KeyError:
6249                                 continue
6250                         except portage_exception.PortageException, e:
6251                                 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6252                                         mycpv, noiselevel=-1)
6253                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6254                                 del e
6255                                 continue
6256                         mygroups=keys.split()
6257                         # Repoman may modify this attribute as necessary.
6258                         pgroups = accept_keywords[:]
6259                         match=0
6260                         cp = dep_getkey(mycpv)
6261                         if pkgdict.has_key(cp):
6262                                 matches = match_to_list(mycpv, pkgdict[cp].keys())
6263                                 for atom in matches:
6264                                         pgroups.extend(pkgdict[cp][atom])
6265                                 if matches:
6266                                         inc_pgroups = []
6267                                         for x in pgroups:
6268                                                 # The -* special case should be removed once the tree 
6269                                                 # is clean of KEYWORDS=-* crap
6270                                                 if x != "-*" and x.startswith("-"):
6271                                                         try:
6272                                                                 inc_pgroups.remove(x[1:])
6273                                                         except ValueError:
6274                                                                 pass
6275                                                 if x not in inc_pgroups:
6276                                                         inc_pgroups.append(x)
6277                                         pgroups = inc_pgroups
6278                                         del inc_pgroups
6279                         hasstable = False
6280                         hastesting = False
6281                         for gp in mygroups:
6282                                 if gp=="*":
6283                                         writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6284                                                 noiselevel=-1)
6285                                         match=1
6286                                         break
6287                                 elif gp in pgroups:
6288                                         match=1
6289                                         break
6290                                 elif gp[0] == "~":
6291                                         hastesting = True
6292                                 elif gp[0] != "-":
6293                                         hasstable = True
6294                         if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
6295                                 match=1
6296                         if match and eapi_is_supported(eapi):
6297                                 newlist.append(mycpv)
6298                 return newlist
6299
6300 class binarytree(object):
6301         "this tree scans for a list of all packages available in PKGDIR"
6302         def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6303                 if clone:
6304                         writemsg("binarytree.__init__(): deprecated " + \
6305                                 "use of clone parameter\n", noiselevel=-1)
6306                         # XXX This isn't cloning. It's an instance of the same thing.
6307                         self.root=clone.root
6308                         self.pkgdir=clone.pkgdir
6309                         self.dbapi=clone.dbapi
6310                         self.populated=clone.populated
6311                         self.tree=clone.tree
6312                         self.remotepkgs=clone.remotepkgs
6313                         self.invalids=clone.invalids
6314                         self.settings = clone.settings
6315                 else:
6316                         self.root=root
6317                         #self.pkgdir=settings["PKGDIR"]
6318                         self.pkgdir = normalize_path(pkgdir)
6319                         self.dbapi = bindbapi(self, settings=settings)
6320                         self.populated=0
6321                         self.tree={}
6322                         self.remotepkgs={}
6323                         self.invalids=[]
6324                         self.settings = settings
6325                         self._pkg_paths = {}
6326
6327         def move_ent(self,mylist):
6328                 if not self.populated:
6329                         self.populate()
6330                 origcp=mylist[1]
6331                 newcp=mylist[2]
6332                 # sanity check
6333                 for cp in [origcp,newcp]:
6334                         if not (isvalidatom(cp) and isjustname(cp)):
6335                                 raise portage_exception.InvalidPackageName(cp)
6336                 origcat = origcp.split("/")[0]
6337                 mynewcat=newcp.split("/")[0]
6338                 origmatches=self.dbapi.cp_list(origcp)
6339                 if not origmatches:
6340                         return
6341                 for mycpv in origmatches:
6342
6343                         mycpsplit=catpkgsplit(mycpv)
6344                         mynewcpv=newcp+"-"+mycpsplit[2]
6345                         if mycpsplit[3]!="r0":
6346                                 mynewcpv += "-"+mycpsplit[3]
6347                         myoldpkg=mycpv.split("/")[1]
6348                         mynewpkg=mynewcpv.split("/")[1]
6349
6350                         if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6351                                 writemsg("!!! Cannot update binary: Destination exists.\n",
6352                                         noiselevel=-1)
6353                                 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6354                                 continue
6355
6356                         tbz2path=self.getname(mycpv)
6357                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6358                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6359                                         noiselevel=-1)
6360                                 continue
6361
6362                         #print ">>> Updating data in:",mycpv
6363                         writemsg_stdout("%")
6364                         mytbz2 = xpak.tbz2(tbz2path)
6365                         mydata = mytbz2.get_data()
6366                         updated_items = update_dbentries([mylist], mydata)
6367                         mydata.update(updated_items)
6368                         mydata["CATEGORY"] = mynewcat+"\n"
6369                         if mynewpkg != myoldpkg:
6370                                 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6371                                 del mydata[myoldpkg+".ebuild"]
6372                                 mydata["PF"] = mynewpkg + "\n"
6373                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6374
6375                         self.dbapi.cpv_remove(mycpv)
6376                         del self._pkg_paths[mycpv]
6377                         new_path = self.getname(mynewcpv)
6378                         self._pkg_paths[mynewcpv] = os.path.join(
6379                                 *new_path.split(os.path.sep)[-2:])
6380                         if new_path != mytbz2:
6381                                 try:
6382                                         os.makedirs(os.path.dirname(new_path))
6383                                 except OSError, e:
6384                                         if e.errno != errno.EEXIST:
6385                                                 raise
6386                                         del e
6387                                 os.rename(tbz2path, new_path)
6388                                 self._remove_symlink(mycpv)
6389                                 if new_path.split(os.path.sep)[-2] == "All":
6390                                         self._create_symlink(mynewcpv)
6391                         self.dbapi.cpv_inject(mynewcpv)
6392
6393                 return 1
6394
6395         def _remove_symlink(self, cpv):
6396                 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6397                 the ${PKGDIR}/${CATEGORY} directory if empty.  The file will not be
6398                 removed if os.path.islink() returns False."""
6399                 mycat, mypkg = catsplit(cpv)
6400                 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6401                 if os.path.islink(mylink):
6402                         """Only remove it if it's really a link so that this method never
6403                         removes a real package that was placed here to avoid a collision."""
6404                         os.unlink(mylink)
6405                 try:
6406                         os.rmdir(os.path.join(self.pkgdir, mycat))
6407                 except OSError, e:
6408                         if e.errno not in (errno.ENOENT,
6409                                 errno.ENOTEMPTY, errno.EEXIST):
6410                                 raise
6411                         del e
6412
6413         def _create_symlink(self, cpv):
6414                 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6415                 ${PKGDIR}/${CATEGORY} directory, if necessary).  Any file that may
6416                 exist in the location of the symlink will first be removed."""
6417                 mycat, mypkg = catsplit(cpv)
6418                 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6419                 try:
6420                         os.makedirs(os.path.dirname(full_path))
6421                 except OSError, e:
6422                         if e.errno != errno.EEXIST:
6423                                 raise
6424                         del e
6425                 try:
6426                         os.unlink(full_path)
6427                 except OSError, e:
6428                         if e.errno != errno.ENOENT:
6429                                 raise
6430                         del e
6431                 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6432
6433         def move_slot_ent(self, mylist):
6434                 if not self.populated:
6435                         self.populate()
6436                 pkg=mylist[1]
6437                 origslot=mylist[2]
6438                 newslot=mylist[3]
6439                 
6440                 if not isvalidatom(pkg):
6441                         raise portage_exception.InvalidAtom(pkg)
6442                 
6443                 origmatches=self.dbapi.match(pkg)
6444                 if not origmatches:
6445                         return
6446                 for mycpv in origmatches:
6447                         mycpsplit=catpkgsplit(mycpv)
6448                         myoldpkg=mycpv.split("/")[1]
6449                         tbz2path=self.getname(mycpv)
6450                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6451                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6452                                         noiselevel=-1)
6453                                 continue
6454
6455                         #print ">>> Updating data in:",mycpv
6456                         mytbz2 = xpak.tbz2(tbz2path)
6457                         mydata = mytbz2.get_data()
6458
6459                         slot = mydata["SLOT"]
6460                         if (not slot):
6461                                 continue
6462
6463                         if (slot[0]!=origslot):
6464                                 continue
6465
6466                         writemsg_stdout("S")
6467                         mydata["SLOT"] = newslot+"\n"
6468                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6469                 return 1
6470
6471         def update_ents(self, update_iter):
6472                 if len(update_iter) == 0:
6473                         return
6474                 if not self.populated:
6475                         self.populate()
6476
6477                 for mycpv in self.dbapi.cp_all():
6478                         tbz2path=self.getname(mycpv)
6479                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6480                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6481                                         noiselevel=-1)
6482                                 continue
6483                         #print ">>> Updating binary data:",mycpv
6484                         writemsg_stdout("*")
6485                         mytbz2 = xpak.tbz2(tbz2path)
6486                         mydata = mytbz2.get_data()
6487                         updated_items = update_dbentries(update_iter, mydata)
6488                         if len(updated_items) > 0:
6489                                 mydata.update(updated_items)
6490                                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6491                 return 1
6492
6493         def prevent_collision(self, cpv):
6494                 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6495                 use for a given cpv.  If a collision will occur with an existing
6496                 package from another category, the existing package will be bumped to
6497                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6498                 if not self.populated:
6499                         # Try to avoid the population routine when possible, so that
6500                         # FEATURES=buildpkg doesn't always force population.
6501                         mycat, mypkg = catsplit(cpv)
6502                         myfile = mypkg + ".tbz2"
6503                         full_path = os.path.join(self.pkgdir, "All", myfile)
6504                         if not os.path.exists(full_path):
6505                                 return
6506                         tbz2_cat = xpak.tbz2(full_path).getfile("CATEGORY")
6507                         if tbz2_cat and tbz2_cat.strip() == mycat:
6508                                 return
6509                 full_path = self.getname(cpv)
6510                 if "All" == full_path.split(os.path.sep)[-2]:
6511                         return
6512                 """Move a colliding package if it exists.  Code below this point only
6513                 executes in rare cases."""
6514                 mycat, mypkg = catsplit(cpv)
6515                 myfile = mypkg + ".tbz2"
6516                 mypath = os.path.join("All", myfile)
6517                 dest_path = os.path.join(self.pkgdir, mypath)
6518                 if os.path.exists(dest_path):
6519                         # For invalid packages, other_cat could be None.
6520                         other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6521                         if other_cat:
6522                                 other_cat = other_cat.strip()
6523                                 self._move_from_all(other_cat + "/" + mypkg)
6524                 """The file may or may not exist. Move it if necessary and update
6525                 internal state for future calls to getname()."""
6526                 self._move_to_all(cpv)
6527
6528         def _move_to_all(self, cpv):
6529                 """If the file exists, move it.  Whether or not it exists, update state
6530                 for future getname() calls."""
6531                 mycat , mypkg = catsplit(cpv)
6532                 myfile = mypkg + ".tbz2"
6533                 src_path = os.path.join(self.pkgdir, mycat, myfile)
6534                 try:
6535                         mystat = os.lstat(src_path)
6536                 except OSError, e:
6537                         mystat = None
6538                 if mystat and stat.S_ISREG(mystat.st_mode):
6539                         try:
6540                                 os.makedirs(os.path.join(self.pkgdir, "All"))
6541                         except OSError, e:
6542                                 if e.errno != errno.EEXIST:
6543                                         raise
6544                                 del e
6545                         os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6546                         self._create_symlink(cpv)
6547                 self._pkg_paths[cpv] = os.path.join("All", myfile)
6548
6549         def _move_from_all(self, cpv):
6550                 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6551                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6552                 self._remove_symlink(cpv)
6553                 mycat , mypkg = catsplit(cpv)
6554                 myfile = mypkg + ".tbz2"
6555                 mypath = os.path.join(mycat, myfile)
6556                 dest_path = os.path.join(self.pkgdir, mypath)
6557                 try:
6558                         os.makedirs(os.path.dirname(dest_path))
6559                 except OSError, e:
6560                         if e.errno != errno.EEXIST:
6561                                 raise
6562                         del e
6563                 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6564                 self._pkg_paths[cpv] = mypath
6565
6566         def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6567                 "populates the binarytree"
6568                 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6569                         return 0
6570
6571                 categories = set(self.settings.categories)
6572
6573                 if not getbinpkgsonly:
6574                         pkg_paths = {}
6575                         dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6576                         if "All" in dirs:
6577                                 dirs.remove("All")
6578                         dirs.sort()
6579                         dirs.insert(0, "All")
6580                         for mydir in dirs:
6581                                 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6582                                         if not myfile.endswith(".tbz2"):
6583                                                 continue
6584                                         mypath = os.path.join(mydir, myfile)
6585                                         full_path = os.path.join(self.pkgdir, mypath)
6586                                         if os.path.islink(full_path):
6587                                                 continue
6588                                         mytbz2 = xpak.tbz2(full_path)
6589                                         # For invalid packages, mycat could be None.
6590                                         mycat = mytbz2.getfile("CATEGORY")
6591                                         mypf = mytbz2.getfile("PF")
6592                                         mypkg = myfile[:-5]
6593                                         if not mycat or not mypf:
6594                                                 #old-style or corrupt package
6595                                                 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6596                                                         noiselevel=-1)
6597                                                 writemsg("!!! This binary package is not " + \
6598                                                         "recoverable and should be deleted.\n",
6599                                                         noiselevel=-1)
6600                                                 self.invalids.append(mypkg)
6601                                                 continue
6602                                         mycat = mycat.strip()
6603                                         if mycat != mydir and mydir != "All":
6604                                                 continue
6605                                         if mypkg != mypf.strip():
6606                                                 continue
6607                                         mycpv = mycat + "/" + mypkg
6608                                         if mycpv in pkg_paths:
6609                                                 # All is first, so it's preferred.
6610                                                 continue
6611                                         if mycat not in categories:
6612                                                 writemsg(("!!! Binary package has an " + \
6613                                                         "unrecognized category: '%s'\n") % full_path,
6614                                                         noiselevel=-1)
6615                                                 writemsg(("!!! '%s' has a category that is not" + \
6616                                                         " listed in /etc/portage/categories\n") % mycpv,
6617                                                         noiselevel=-1)
6618                                                 continue
6619                                         pkg_paths[mycpv] = mypath
6620                                         self.dbapi.cpv_inject(mycpv)
6621                         self._pkg_paths = pkg_paths
6622
6623                 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6624                         writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6625                                 noiselevel=-1)
6626
6627                 if getbinpkgs and \
6628                         self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6629                         try:
6630                                 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6631                                 if chunk_size < 8:
6632                                         chunk_size = 8
6633                         except (ValueError, KeyError):
6634                                 chunk_size = 3000
6635
6636                         writemsg(green("Fetching binary packages info...\n"))
6637                         self.remotepkgs = getbinpkg.dir_get_metadata(
6638                                 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6639                         writemsg(green("  -- DONE!\n\n"))
6640
6641                         for mypkg in self.remotepkgs.keys():
6642                                 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6643                                         #old-style or corrupt package
6644                                         writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6645                                                 noiselevel=-1)
6646                                         del self.remotepkgs[mypkg]
6647                                         continue
6648                                 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6649                                 fullpkg=mycat+"/"+mypkg[:-5]
6650                                 if mycat not in categories:
6651                                         writemsg(("!!! Remote binary package has an " + \
6652                                                 "unrecognized category: '%s'\n") % fullpkg,
6653                                                 noiselevel=-1)
6654                                         writemsg(("!!! '%s' has a category that is not" + \
6655                                                 " listed in /etc/portage/categories\n") % fullpkg,
6656                                                 noiselevel=-1)
6657                                         continue
6658                                 mykey=dep_getkey(fullpkg)
6659                                 try:
6660                                         # invalid tbz2's can hurt things.
6661                                         #print "cpv_inject("+str(fullpkg)+")"
6662                                         self.dbapi.cpv_inject(fullpkg)
6663                                         #print "  -- Injected"
6664                                 except SystemExit, e:
6665                                         raise
6666                                 except:
6667                                         writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6668                                                 noiselevel=-1)
6669                                         del self.remotepkgs[mypkg]
6670                                         continue
6671                 self.populated=1
6672
6673         def inject(self,cpv):
6674                 return self.dbapi.cpv_inject(cpv)
6675
6676         def exists_specific(self,cpv):
6677                 if not self.populated:
6678                         self.populate()
6679                 return self.dbapi.match(
6680                         dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6681
6682         def dep_bestmatch(self,mydep):
6683                 "compatibility method -- all matches, not just visible ones"
6684                 if not self.populated:
6685                         self.populate()
6686                 writemsg("\n\n", 1)
6687                 writemsg("mydep: %s\n" % mydep, 1)
6688                 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6689                 writemsg("mydep: %s\n" % mydep, 1)
6690                 mykey=dep_getkey(mydep)
6691                 writemsg("mykey: %s\n" % mykey, 1)
6692                 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6693                 writemsg("mymatch: %s\n" % mymatch, 1)
6694                 if mymatch is None:
6695                         return ""
6696                 return mymatch
6697
6698         def getname(self,pkgname):
6699                 """Returns a file location for this package.  The default location is
6700                 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6701                 in the rare event of a collision.  The prevent_collision() method can
6702                 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6703                 specific cpv."""
6704                 if not self.populated:
6705                         self.populate()
6706                 mycpv = pkgname
6707                 mypath = self._pkg_paths.get(mycpv, None)
6708                 if mypath:
6709                         return os.path.join(self.pkgdir, mypath)
6710                 mycat, mypkg = catsplit(mycpv)
6711                 mypath = os.path.join("All", mypkg + ".tbz2")
6712                 if mypath in self._pkg_paths.values():
6713                         mypath = os.path.join(mycat, mypkg + ".tbz2")
6714                 self._pkg_paths[mycpv] = mypath # cache for future lookups
6715                 return os.path.join(self.pkgdir, mypath)
6716
6717         def isremote(self,pkgname):
6718                 "Returns true if the package is kept remotely."
6719                 mysplit=pkgname.split("/")
6720                 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6721                 return remote
6722
6723         def get_use(self,pkgname):
6724                 mysplit=pkgname.split("/")
6725                 if self.isremote(pkgname):
6726                         return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6727                 tbz2=xpak.tbz2(self.getname(pkgname))
6728                 return tbz2.getfile("USE").split()
6729
6730         def gettbz2(self,pkgname):
6731                 "fetches the package from a remote site, if necessary."
6732                 print "Fetching '"+str(pkgname)+"'"
6733                 mysplit  = pkgname.split("/")
6734                 tbz2name = mysplit[1]+".tbz2"
6735                 if not self.isremote(pkgname):
6736                         if (tbz2name not in self.invalids):
6737                                 return
6738                         else:
6739                                 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6740                                         noiselevel=-1)
6741                 mydest = self.pkgdir+"/All/"
6742                 try:
6743                         os.makedirs(mydest, 0775)
6744                 except (OSError, IOError):
6745                         pass
6746                 return getbinpkg.file_get(
6747                         self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6748                         mydest, fcmd=self.settings["RESUMECOMMAND"])
6749
6750         def getslot(self,mycatpkg):
6751                 "Get a slot for a catpkg; assume it exists."
6752                 myslot = ""
6753                 try:
6754                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6755                 except SystemExit, e:
6756                         raise
6757                 except Exception, e:
6758                         pass
6759                 return myslot
6760
6761 class dblink:
6762         """
6763         This class provides an interface to the installed package database
6764         At present this is implemented as a text backend in /var/db/pkg.
6765         """
6766         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6767                 vartree=None):
6768                 """
6769                 Creates a DBlink object for a given CPV.
6770                 The given CPV may not be present in the database already.
6771                 
6772                 @param cat: Category
6773                 @type cat: String
6774                 @param pkg: Package (PV)
6775                 @type pkg: String
6776                 @param myroot: Typically ${ROOT}
6777                 @type myroot: String (Path)
6778                 @param mysettings: Typically portage.config
6779                 @type mysettings: An instance of portage.config
6780                 @param treetype: one of ['porttree','bintree','vartree']
6781                 @type treetype: String
6782                 @param vartree: an instance of vartree corresponding to myroot.
6783                 @type vartree: vartree
6784                 """
6785                 
6786                 self.cat     = cat
6787                 self.pkg     = pkg
6788                 self.mycpv   = self.cat+"/"+self.pkg
6789                 self.mysplit = pkgsplit(self.mycpv)
6790                 self.treetype = treetype
6791                 if vartree is None:
6792                         global db
6793                         vartree = db[myroot]["vartree"]
6794                 self.vartree = vartree
6795
6796                 self.dbroot   = normalize_path(os.path.join(myroot, VDB_PATH))
6797                 self.dbcatdir = self.dbroot+"/"+cat
6798                 self.dbpkgdir = self.dbcatdir+"/"+pkg
6799                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6800                 self.dbdir    = self.dbpkgdir
6801
6802                 self._lock_vdb = None
6803
6804                 self.settings = mysettings
6805                 if self.settings==1:
6806                         raise ValueError
6807
6808                 self.myroot=myroot
6809                 protect_obj = portage_util.ConfigProtect(myroot,
6810                         mysettings.get("CONFIG_PROTECT","").split(),
6811                         mysettings.get("CONFIG_PROTECT_MASK","").split())
6812                 self.updateprotect = protect_obj.updateprotect
6813                 self._config_protect = protect_obj
6814                 self._installed_instance = None
6815                 self.contentscache=[]
6816                 self._contents_inodes = None
6817
6818         def lockdb(self):
6819                 if self._lock_vdb:
6820                         raise AssertionError("Lock already held.")
6821                 # At least the parent needs to exist for the lock file.
6822                 portage_util.ensure_dirs(self.dbroot)
6823                 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6824
6825         def unlockdb(self):
6826                 if self._lock_vdb:
6827                         portage_locks.unlockdir(self._lock_vdb)
6828                         self._lock_vdb = None
6829
6830         def getpath(self):
6831                 "return path to location of db information (for >>> informational display)"
6832                 return self.dbdir
6833
6834         def exists(self):
6835                 "does the db entry exist?  boolean."
6836                 return os.path.exists(self.dbdir)
6837
6838         def create(self):
6839                 "create the skeleton db directory structure.  No contents, virtuals, provides or anything.  Also will create /var/db/pkg if necessary."
6840                 """
6841                 This function should never get called (there is no reason to use it).
6842                 """
6843                 # XXXXX Delete this eventually
6844                 raise Exception, "This is bad. Don't use it."
6845                 if not os.path.exists(self.dbdir):
6846                         os.makedirs(self.dbdir)
6847
6848         def delete(self):
6849                 """
6850                 Remove this entry from the database
6851                 """
6852                 if not os.path.exists(self.dbdir):
6853                         return
6854                 try:
6855                         for x in listdir(self.dbdir):
6856                                 os.unlink(self.dbdir+"/"+x)
6857                         os.rmdir(self.dbdir)
6858                 except OSError, e:
6859                         print "!!! Unable to remove db entry for this package."
6860                         print "!!! It is possible that a directory is in this one. Portage will still"
6861                         print "!!! register this package as installed as long as this directory exists."
6862                         print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6863                         print "!!! "+str(e)
6864                         print
6865                         sys.exit(1)
6866
6867         def clearcontents(self):
6868                 """
6869                 For a given db entry (self), erase the CONTENTS values.
6870                 """
6871                 if os.path.exists(self.dbdir+"/CONTENTS"):
6872                         os.unlink(self.dbdir+"/CONTENTS")
6873
6874         def getcontents(self):
6875                 """
6876                 Get the installed files of a given package (aka what that package installed)
6877                 """
6878                 if not os.path.exists(self.dbdir+"/CONTENTS"):
6879                         return None
6880                 if self.contentscache != []:
6881                         return self.contentscache
6882                 pkgfiles={}
6883                 myc=open(self.dbdir+"/CONTENTS","r")
6884                 mylines=myc.readlines()
6885                 myc.close()
6886                 null_byte = "\0"
6887                 contents_file = os.path.join(self.dbdir, "CONTENTS")
6888                 pos = 0
6889                 for line in mylines:
6890                         pos += 1
6891                         if null_byte in line:
6892                                 # Null bytes are a common indication of corruption.
6893                                 writemsg("!!! Null byte found in contents " + \
6894                                         "file, line %d: '%s'\n" % (pos, contents_file),
6895                                         noiselevel=-1)
6896                                 continue
6897                         mydat = line.split()
6898                         # we do this so we can remove from non-root filesystems
6899                         # (use the ROOT var to allow maintenance on other partitions)
6900                         try:
6901                                 mydat[1] = normalize_path(os.path.join(
6902                                         self.myroot, mydat[1].lstrip(os.path.sep)))
6903                                 if mydat[0]=="obj":
6904                                         #format: type, mtime, md5sum
6905                                         pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6906                                 elif mydat[0]=="dir":
6907                                         #format: type
6908                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6909                                 elif mydat[0]=="sym":
6910                                         #format: type, mtime, dest
6911                                         x=len(mydat)-1
6912                                         if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6913                                                 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6914                                                 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6915                                                 x=len(mydat)-1
6916                                         splitter=-1
6917                                         while(x>=0):
6918                                                 if mydat[x]=="->":
6919                                                         splitter=x
6920                                                         break
6921                                                 x=x-1
6922                                         if splitter==-1:
6923                                                 return None
6924                                         pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6925                                 elif mydat[0]=="dev":
6926                                         #format: type
6927                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6928                                 elif mydat[0]=="fif":
6929                                         #format: type
6930                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6931                                 else:
6932                                         return None
6933                         except (KeyError,IndexError):
6934                                 print "portage: CONTENTS line",pos,"corrupt!"
6935                 self.contentscache=pkgfiles
6936                 return pkgfiles
6937
6938         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6939                 ldpath_mtimes=None):
6940                 """
6941                 Calls prerm
6942                 Unmerges a given package (CPV)
6943                 calls postrm
6944                 calls cleanrm
6945                 calls env_update
6946                 
6947                 @param pkgfiles: files to unmerge (generally self.getcontents() )
6948                 @type pkgfiles: Dictionary
6949                 @param trimworld: Remove CPV from world file if True, not if False
6950                 @type trimworld: Boolean
6951                 @param cleanup: cleanup to pass to doebuild (see doebuild)
6952                 @type cleanup: Boolean
6953                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6954                 @type ldpath_mtimes: Dictionary
6955                 @rtype: Integer
6956                 @returns:
6957                 1. os.EX_OK if everything went well.
6958                 2. return code of the failed phase (for prerm, postrm, cleanrm)
6959                 
6960                 Notes:
6961                 The caller must ensure that lockdb() and unlockdb() are called
6962                 before and after this method.
6963                 """
6964
6965                 contents = self.getcontents()
6966                 # Now, don't assume that the name of the ebuild is the same as the
6967                 # name of the dir; the package may have been moved.
6968                 myebuildpath = None
6969                 mystuff = listdir(self.dbdir, EmptyOnError=1)
6970                 for x in mystuff:
6971                         if x.endswith(".ebuild"):
6972                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6973                                 if x[:-7] != self.pkg:
6974                                         # Clean up after vardbapi.move_ent() breakage in
6975                                         # portage versions before 2.1.2
6976                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
6977                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6978                                 break
6979
6980                 self.settings.load_infodir(self.dbdir)
6981                 if myebuildpath:
6982                         try:
6983                                 doebuild_environment(myebuildpath, "prerm", self.myroot,
6984                                         self.settings, 0, 0, self.vartree.dbapi)
6985                         except portage_exception.UnsupportedAPIException, e:
6986                                 # Sometimes this happens due to corruption of the EAPI file.
6987                                 writemsg("!!! FAILED prerm: %s\n" % \
6988                                         os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
6989                                 writemsg("%s\n" % str(e), noiselevel=-1)
6990                                 return 1
6991                         catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6992                         portage_util.ensure_dirs(os.path.dirname(catdir),
6993                                 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6994                 builddir_lock = None
6995                 catdir_lock = None
6996                 try:
6997                         if myebuildpath:
6998                                 catdir_lock = portage_locks.lockdir(catdir)
6999                                 portage_util.ensure_dirs(catdir,
7000                                         uid=portage_uid, gid=portage_gid,
7001                                         mode=070, mask=0)
7002                                 builddir_lock = portage_locks.lockdir(
7003                                         self.settings["PORTAGE_BUILDDIR"])
7004                                 try:
7005                                         portage_locks.unlockdir(catdir_lock)
7006                                 finally:
7007                                         catdir_lock = None
7008                                 # Eventually, we'd like to pass in the saved ebuild env here...
7009                                 retval = doebuild(myebuildpath, "prerm", self.myroot,
7010                                         self.settings, cleanup=cleanup, use_cache=0,
7011                                         mydbapi=self.vartree.dbapi, tree="vartree",
7012                                         vartree=self.vartree)
7013                                 # XXX: Decide how to handle failures here.
7014                                 if retval != os.EX_OK:
7015                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
7016                                         return retval
7017
7018                         self._unmerge_pkgfiles(pkgfiles)
7019
7020                         if myebuildpath:
7021                                 retval = doebuild(myebuildpath, "postrm", self.myroot,
7022                                          self.settings, use_cache=0, tree="vartree",
7023                                          mydbapi=self.vartree.dbapi, vartree=self.vartree)
7024
7025                                 # process logs created during pre/postrm
7026                                 elog_process(self.mycpv, self.settings)
7027
7028                                 # XXX: Decide how to handle failures here.
7029                                 if retval != os.EX_OK:
7030                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
7031                                         return retval
7032                                 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
7033                                         tree="vartree", mydbapi=self.vartree.dbapi,
7034                                         vartree=self.vartree)
7035
7036                 finally:
7037                         if builddir_lock:
7038                                 portage_locks.unlockdir(builddir_lock)
7039                         try:
7040                                 if myebuildpath and not catdir_lock:
7041                                         # Lock catdir for removal if empty.
7042                                         catdir_lock = portage_locks.lockdir(catdir)
7043                         finally:
7044                                 if catdir_lock:
7045                                         try:
7046                                                 os.rmdir(catdir)
7047                                         except OSError, e:
7048                                                 if e.errno not in (errno.ENOENT,
7049                                                         errno.ENOTEMPTY, errno.EEXIST):
7050                                                         raise
7051                                                 del e
7052                                         portage_locks.unlockdir(catdir_lock)
7053                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
7054                         contents=contents)
7055                 return os.EX_OK
7056
7057         def _unmerge_pkgfiles(self, pkgfiles):
7058                 """
7059                 
7060                 Unmerges the contents of a package from the liveFS
7061                 Removes the VDB entry for self
7062                 
7063                 @param pkgfiles: typically self.getcontents()
7064                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
7065                 @rtype: None
7066                 """
7067                 global dircache
7068                 dircache={}
7069
7070                 if not pkgfiles:
7071                         writemsg_stdout("No package files given... Grabbing a set.\n")
7072                         pkgfiles=self.getcontents()
7073
7074                 if pkgfiles:
7075                         mykeys=pkgfiles.keys()
7076                         mykeys.sort()
7077                         mykeys.reverse()
7078
7079                         #process symlinks second-to-last, directories last.
7080                         mydirs=[]
7081                         modprotect="/lib/modules/"
7082                         for objkey in mykeys:
7083                                 obj = normalize_path(objkey)
7084                                 if obj[:2]=="//":
7085                                         obj=obj[1:]
7086                                 statobj = None
7087                                 try:
7088                                         statobj = os.stat(obj)
7089                                 except OSError:
7090                                         pass
7091                                 lstatobj = None
7092                                 try:
7093                                         lstatobj = os.lstat(obj)
7094                                 except (OSError, AttributeError):
7095                                         pass
7096                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
7097                                 if statobj is None:
7098                                         if not islink:
7099                                                 #we skip this if we're dealing with a symlink
7100                                                 #because os.stat() will operate on the
7101                                                 #link target rather than the link itself.
7102                                                 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
7103                                                 continue
7104                                 # next line includes a tweak to protect modules from being unmerged,
7105                                 # but we don't protect modules from being overwritten if they are
7106                                 # upgraded. We effectively only want one half of the config protection
7107                                 # functionality for /lib/modules. For portage-ng both capabilities
7108                                 # should be able to be independently specified.
7109                                 if obj.startswith(modprotect):
7110                                         writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
7111                                         continue
7112
7113                                 lmtime=str(lstatobj[stat.ST_MTIME])
7114                                 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
7115                                         writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
7116                                         continue
7117
7118                                 if pkgfiles[objkey][0]=="dir":
7119                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
7120                                                 writemsg_stdout("--- !dir   %s %s\n" % ("dir", obj))
7121                                                 continue
7122                                         mydirs.append(obj)
7123                                 elif pkgfiles[objkey][0]=="sym":
7124                                         if not islink:
7125                                                 writemsg_stdout("--- !sym   %s %s\n" % ("sym", obj))
7126                                                 continue
7127                                         try:
7128                                                 os.unlink(obj)
7129                                                 writemsg_stdout("<<<        %s %s\n" % ("sym",obj))
7130                                         except (OSError,IOError),e:
7131                                                 writemsg_stdout("!!!        %s %s\n" % ("sym",obj))
7132                                 elif pkgfiles[objkey][0]=="obj":
7133                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
7134                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7135                                                 continue
7136                                         mymd5 = None
7137                                         try:
7138                                                 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
7139                                         except portage_exception.FileNotFound, e:
7140                                                 # the file has disappeared between now and our stat call
7141                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7142                                                 continue
7143
7144                                         # string.lower is needed because db entries used to be in upper-case.  The
7145                                         # string.lower allows for backwards compatibility.
7146                                         if mymd5 != pkgfiles[objkey][2].lower():
7147                                                 writemsg_stdout("--- !md5   %s %s\n" % ("obj", obj))
7148                                                 continue
7149                                         try:
7150                                                 if statobj.st_mode & (stat.S_ISUID | stat.S_ISGID):
7151                                                         # Always blind chmod 0 before unlinking to avoid race conditions.
7152                                                         os.chmod(obj, 0000)
7153                                                         if statobj.st_nlink > 1:
7154                                                                 writemsg("setXid: "+str(statobj.st_nlink-1)+ \
7155                                                                         " hardlinks to '%s'\n" % obj)
7156                                                 os.unlink(obj)
7157                                         except (OSError,IOError),e:
7158                                                 pass
7159                                         writemsg_stdout("<<<        %s %s\n" % ("obj",obj))
7160                                 elif pkgfiles[objkey][0]=="fif":
7161                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
7162                                                 writemsg_stdout("--- !fif   %s %s\n" % ("fif", obj))
7163                                                 continue
7164                                         writemsg_stdout("---        %s %s\n" % ("fif",obj))
7165                                 elif pkgfiles[objkey][0]=="dev":
7166                                         writemsg_stdout("---        %s %s\n" % ("dev",obj))
7167
7168                         mydirs.sort()
7169                         mydirs.reverse()
7170
7171                         for obj in mydirs:
7172                                 try:
7173                                         os.rmdir(obj)
7174                                         writemsg_stdout("<<<        %s %s\n" % ("dir",obj))
7175                                 except (OSError, IOError):
7176                                         writemsg_stdout("--- !empty dir %s\n" % obj)
7177
7178                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
7179                 self.vartree.zap(self.mycpv)
7180
7181         def isowner(self,filename,destroot):
7182                 """ 
7183                 Check if filename is a new file or belongs to this package
7184                 (for this or a previous version)
7185                 
7186                 @param filename:
7187                 @type filename:
7188                 @param destroot:
7189                 @type destroot:
7190                 @rtype: Boolean
7191                 @returns:
7192                 1. True if this package owns the file.
7193                 2. False if this package does not own the file.
7194                 """
7195                 destfile = normalize_path(
7196                         os.path.join(destroot, filename.lstrip(os.path.sep)))
7197                 try:
7198                         mylstat = os.lstat(destfile)
7199                 except (OSError, IOError):
7200                         return True
7201
7202                 pkgfiles = self.getcontents()
7203                 if pkgfiles and filename in pkgfiles:
7204                         return True
7205                 if pkgfiles:
7206                         if self._contents_inodes is None:
7207                                 self._contents_inodes = set()
7208                                 for x in pkgfiles:
7209                                         try:
7210                                                 lstat = os.lstat(x)
7211                                                 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
7212                                         except OSError:
7213                                                 pass
7214                         if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
7215                                  return True
7216
7217                 return False
7218
7219         def isprotected(self, filename):
7220                 """In cases where an installed package in the same slot owns a
7221                 protected file that will be merged, bump the mtime on the installed
7222                 file in order to ensure that it isn't unmerged."""
7223                 if not self._config_protect.isprotected(filename):
7224                         return False
7225                 if self._installed_instance is None:
7226                         return True
7227                 mydata = self._installed_instance.getcontents().get(filename, None)
7228                 if mydata is None:
7229                         return True
7230
7231                 # Bump the mtime in order to ensure that the old config file doesn't
7232                 # get unmerged.  The user will have an opportunity to merge the new
7233                 # config with the old one.
7234                 try:
7235                         os.utime(filename, None)
7236                 except OSError, e:
7237                         if e.errno != errno.ENOENT:
7238                                 raise
7239                         del e
7240                         # The file has disappeared, so it's not protected.
7241                         return False
7242                 return True
7243
7244         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
7245                 mydbapi=None, prev_mtimes=None):
7246                 """
7247                 
7248                 This function does the following:
7249                 
7250                 Collision Protection.
7251                 calls doebuild(mydo=pkg_preinst)
7252                 Merges the package to the livefs
7253                 unmerges old version (if required)
7254                 calls doebuild(mydo=pkg_postinst)
7255                 calls env_update
7256                 
7257                 @param srcroot: Typically this is ${D}
7258                 @type srcroot: String (Path)
7259                 @param destroot: Path to merge to (usually ${ROOT})
7260                 @type destroot: String (Path)
7261                 @param inforoot: root of the vardb entry ?
7262                 @type inforoot: String (Path)
7263                 @param myebuild: path to the ebuild that we are processing
7264                 @type myebuild: String (Path)
7265                 @param mydbapi: dbapi which is handed to doebuild.
7266                 @type mydbapi: portdbapi instance
7267                 @param prev_mtimes: { Filename:mtime } mapping for env_update
7268                 @type prev_mtimes: Dictionary
7269                 @rtype: Boolean
7270                 @returns:
7271                 1. 0 on success
7272                 2. 1 on failure
7273                 
7274                 secondhand is a list of symlinks that have been skipped due to their target
7275                 not existing; we will merge these symlinks at a later time.
7276                 """
7277                 if not os.path.isdir(srcroot):
7278                         writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7279                         noiselevel=-1)
7280                         return 1
7281
7282                 if not os.path.exists(self.dbcatdir):
7283                         os.makedirs(self.dbcatdir)
7284
7285                 otherversions=[]
7286                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7287                         otherversions.append(v.split("/")[1])
7288
7289                 slot_matches = self.vartree.dbapi.match(
7290                         "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7291                 if slot_matches:
7292                         # Used by self.isprotected().
7293                         self._installed_instance = dblink(self.cat,
7294                                 catsplit(slot_matches[0])[1], destroot, self.settings,
7295                                 vartree=self.vartree)
7296
7297                 # check for package collisions
7298                 if "collision-protect" in self.settings.features:
7299                         collision_ignore = set([normalize_path(myignore) for myignore in \
7300                                 self.settings.get("COLLISION_IGNORE", "").split()])
7301                         myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7302
7303                         # the linkcheck only works if we are in srcroot
7304                         mycwd = getcwd()
7305                         os.chdir(srcroot)
7306                         mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7307                         myfilelist.extend(mysymlinks)
7308                         mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7309                         del mysymlinks
7310
7311
7312                         stopmerge=False
7313                         starttime=time.time()
7314                         i=0
7315
7316                         otherpkg=[]
7317                         mypkglist=[]
7318
7319                         if self.pkg in otherversions:
7320                                 otherversions.remove(self.pkg)  # we already checked this package
7321
7322                         myslot = self.settings["SLOT"]
7323                         for v in otherversions:
7324                                 # only allow versions with same slot to overwrite files
7325                                 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7326                                         mypkglist.append(
7327                                                 dblink(self.cat, v, destroot, self.settings,
7328                                                         vartree=self.vartree))
7329
7330                         collisions = []
7331
7332                         print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7333                         for f in myfilelist:
7334                                 nocheck = False
7335                                 # listdir isn't intelligent enough to exclude symlinked dirs,
7336                                 # so we have to do it ourself
7337                                 for s in mysymlinked_directories:
7338                                         if f.startswith(s):
7339                                                 nocheck = True
7340                                                 break
7341                                 if nocheck:
7342                                         continue
7343                                 i=i+1
7344                                 if i % 1000 == 0:
7345                                         print str(i)+" files checked ..."
7346                                 if f[0] != "/":
7347                                         f="/"+f
7348                                 isowned = False
7349                                 for ver in [self]+mypkglist:
7350                                         if (ver.isowner(f, destroot) or ver.isprotected(f)):
7351                                                 isowned = True
7352                                                 break
7353                                 if not isowned:
7354                                         collisions.append(f)
7355                                         print "existing file "+f+" is not owned by this package"
7356                                         stopmerge=True
7357                                         if collision_ignore:
7358                                                 if f in collision_ignore:
7359                                                         stopmerge = False
7360                                                 else:
7361                                                         for myignore in collision_ignore:
7362                                                                 if f.startswith(myignore + os.path.sep):
7363                                                                         stopmerge = False
7364                                                                         break
7365                         #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7366                         if stopmerge:
7367                                 print red("*")+" This package is blocked because it wants to overwrite"
7368                                 print red("*")+" files belonging to other packages (see messages above)."
7369                                 print red("*")+" If you have no clue what this is all about report it "
7370                                 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7371                                 print
7372                                 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7373                                 print
7374                                 print
7375                                 print "Searching all installed packages for file collisions..."
7376                                 print "Press Ctrl-C to Stop"
7377                                 print
7378                                 """ Note: The isowner calls result in a stat call for *every*
7379                                 single installed file, since the inode numbers are used to work
7380                                 around the problem of ambiguous paths caused by symlinked files
7381                                 and/or directories.  Though it is slow, it is as accurate as
7382                                 possible."""
7383                                 found_owner = False
7384                                 for cpv in self.vartree.dbapi.cpv_all():
7385                                         cat, pkg = catsplit(cpv)
7386                                         mylink = dblink(cat, pkg, destroot, self.settings,
7387                                                 vartree=self.vartree)
7388                                         mycollisions = []
7389                                         for f in collisions:
7390                                                 if mylink.isowner(f, destroot):
7391                                                         mycollisions.append(f)
7392                                         if mycollisions:
7393                                                 found_owner = True
7394                                                 print " * %s:" % cpv
7395                                                 print
7396                                                 for f in mycollisions:
7397                                                         print "     '%s'" % \
7398                                                                 os.path.join(destroot, f.lstrip(os.path.sep))
7399                                                 print
7400                                 if not found_owner:
7401                                         print "None of the installed packages claim the above file(s)."
7402                                         print
7403                                 sys.exit(1)
7404                         try:
7405                                 os.chdir(mycwd)
7406                         except OSError:
7407                                 pass
7408
7409                 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7410                         """ The merge process may move files out of the image directory,
7411                         which causes invalidation of the .installed flag."""
7412                         try:
7413                                 os.unlink(os.path.join(
7414                                         os.path.dirname(normalize_path(srcroot)), ".installed"))
7415                         except OSError, e:
7416                                 if e.errno != errno.ENOENT:
7417                                         raise
7418                                 del e
7419
7420                 # get old contents info for later unmerging
7421                 oldcontents = self.getcontents()
7422
7423                 self.dbdir = self.dbtmpdir
7424                 self.delete()
7425                 if not os.path.exists(self.dbtmpdir):
7426                         os.makedirs(self.dbtmpdir)
7427
7428                 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7429
7430                 # run preinst script
7431                 if myebuild is None:
7432                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7433                 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7434                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7435                         vartree=self.vartree)
7436
7437                 # XXX: Decide how to handle failures here.
7438                 if a != os.EX_OK:
7439                         writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7440                         return a
7441
7442                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7443                 for x in listdir(inforoot):
7444                         self.copyfile(inforoot+"/"+x)
7445
7446                 # get current counter value (counter_tick also takes care of incrementing it)
7447                 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7448                 # XXX bis: leads to some invalidentry() call through cp_all().
7449                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7450                 # write local package counter for recording
7451                 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7452                 lcfile.write(str(counter))
7453                 lcfile.close()
7454
7455                 # open CONTENTS file (possibly overwriting old one) for recording
7456                 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7457
7458                 self.updateprotect()
7459
7460                 #if we have a file containing previously-merged config file md5sums, grab it.
7461                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7462                 cfgfiledict = grabdict(conf_mem_file)
7463                 if self.settings.has_key("NOCONFMEM"):
7464                         cfgfiledict["IGNORE"]=1
7465                 else:
7466                         cfgfiledict["IGNORE"]=0
7467
7468                 # Timestamp for files being merged.  Use time() - 1 in order to prevent
7469                 # a collision with timestamps that are bumped by the utime() call
7470                 # inside isprotected().  This ensures that the new and old config have
7471                 # different timestamps (for the benefit of programs like rsync that
7472                 # that need distiguishable timestamps to detect file changes).
7473                 mymtime = long(time.time() - 1)
7474
7475                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7476                 prevmask   = os.umask(0)
7477                 secondhand = []
7478
7479                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7480                 # "second hand" of symlinks to merge later
7481                 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7482                         return 1
7483
7484                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
7485                 # broken symlinks.  We'll merge them too.
7486                 lastlen=0
7487                 while len(secondhand) and len(secondhand)!=lastlen:
7488                         # clear the thirdhand.  Anything from our second hand that
7489                         # couldn't get merged will be added to thirdhand.
7490
7491                         thirdhand=[]
7492                         self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7493
7494                         #swap hands
7495                         lastlen=len(secondhand)
7496
7497                         # our thirdhand now becomes our secondhand.  It's ok to throw
7498                         # away secondhand since thirdhand contains all the stuff that
7499                         # couldn't be merged.
7500                         secondhand = thirdhand
7501
7502                 if len(secondhand):
7503                         # force merge of remaining symlinks (broken or circular; oh well)
7504                         self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7505
7506                 #restore umask
7507                 os.umask(prevmask)
7508
7509                 #if we opened it, close it
7510                 outfile.flush()
7511                 outfile.close()
7512
7513                 if os.path.exists(self.dbpkgdir):
7514                         writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7515                         self.dbdir = self.dbpkgdir
7516                         self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7517                         self.dbdir = self.dbtmpdir
7518                         writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7519
7520                 # We hold both directory locks.
7521                 self.dbdir = self.dbpkgdir
7522                 self.delete()
7523                 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7524                 contents = self.getcontents()
7525
7526                 #write out our collection of md5sums
7527                 if cfgfiledict.has_key("IGNORE"):
7528                         del cfgfiledict["IGNORE"]
7529
7530                 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7531                 if not os.path.exists(my_private_path):
7532                         os.makedirs(my_private_path)
7533                         os.chown(my_private_path, os.getuid(), portage_gid)
7534                         os.chmod(my_private_path, 02770)
7535
7536                 writedict(cfgfiledict, conf_mem_file)
7537                 del conf_mem_file
7538
7539                 #do postinst script
7540                 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7541                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7542
7543                 # XXX: Decide how to handle failures here.
7544                 if a != os.EX_OK:
7545                         writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7546                         return a
7547
7548                 downgrade = False
7549                 for v in otherversions:
7550                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7551                                 downgrade = True
7552
7553                 #update environment settings, library paths. DO NOT change symlinks.
7554                 env_update(makelinks=(not downgrade),
7555                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7556                         contents=contents)
7557                 #dircache may break autoclean because it remembers the -MERGING-pkg file
7558                 global dircache
7559                 if dircache.has_key(self.dbcatdir):
7560                         del dircache[self.dbcatdir]
7561                 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7562
7563                 # Process ebuild logfiles
7564                 elog_process(self.mycpv, self.settings)
7565                 if "noclean" not in self.settings.features:
7566                         doebuild(myebuild, "clean", destroot, self.settings,
7567                                 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7568                 return os.EX_OK
7569
7570         def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7571                 """
7572                 
7573                 This function handles actual merging of the package contents to the livefs.
7574                 It also handles config protection.
7575                 
7576                 @param srcroot: Where are we copying files from (usually ${D})
7577                 @type srcroot: String (Path)
7578                 @param destroot: Typically ${ROOT}
7579                 @type destroot: String (Path)
7580                 @param outfile: File to log operations to
7581                 @type outfile: File Object
7582                 @param secondhand: A set of items to merge in pass two (usually
7583                 or symlinks that point to non-existing files that may get merged later)
7584                 @type secondhand: List
7585                 @param stufftomerge: Either a diretory to merge, or a list of items.
7586                 @type stufftomerge: String or List
7587                 @param cfgfiledict: { File:mtime } mapping for config_protected files
7588                 @type cfgfiledict: Dictionary
7589                 @param thismtime: The current time (typically long(time.time())
7590                 @type thismtime: Long
7591                 @rtype: None or Boolean
7592                 @returns:
7593                 1. True on failure
7594                 2. None otherwise
7595                 
7596                 """
7597                 from os.path import sep, join
7598                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7599                 destroot = normalize_path(destroot).rstrip(sep) + sep
7600                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
7601                 if type(stufftomerge)==types.StringType:
7602                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
7603                         mergelist = listdir(join(srcroot, stufftomerge))
7604                         offset=stufftomerge
7605                 else:
7606                         mergelist=stufftomerge
7607                         offset=""
7608                 for x in mergelist:
7609                         mysrc = join(srcroot, offset, x)
7610                         mydest = join(destroot, offset, x)
7611                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7612                         myrealdest = join(sep, offset, x)
7613                         # stat file once, test using S_* macros many times (faster that way)
7614                         try:
7615                                 mystat=os.lstat(mysrc)
7616                         except OSError, e:
7617                                 writemsg("\n")
7618                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7619                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
7620                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
7621                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7622                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
7623                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
7624                                 sys.exit(1)
7625                         except Exception, e:
7626                                 writemsg("\n")
7627                                 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7628                                 writemsg(red("!!!        A stat call returned the following error for the following file:"))
7629                                 writemsg(    "!!!        Please ensure that your filesystem is intact, otherwise report\n")
7630                                 writemsg(    "!!!        this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7631                                 writemsg(    "!!!        File:  "+str(mysrc)+"\n", noiselevel=-1)
7632                                 writemsg(    "!!!        Error: "+str(e)+"\n", noiselevel=-1)
7633                                 sys.exit(1)
7634
7635
7636                         mymode=mystat[stat.ST_MODE]
7637                         # handy variables; mydest is the target object on the live filesystems;
7638                         # mysrc is the source object in the temporary install dir
7639                         try:
7640                                 mydmode = os.lstat(mydest).st_mode
7641                         except OSError, e:
7642                                 if e.errno != errno.ENOENT:
7643                                         raise
7644                                 del e
7645                                 #dest file doesn't exist
7646                                 mydmode=None
7647
7648                         if stat.S_ISLNK(mymode):
7649                                 # we are merging a symbolic link
7650                                 myabsto=abssymlink(mysrc)
7651                                 if myabsto.startswith(srcroot):
7652                                         myabsto=myabsto[len(srcroot):]
7653                                 myabsto = myabsto.lstrip(sep)
7654                                 myto=os.readlink(mysrc)
7655                                 if self.settings and self.settings["D"]:
7656                                         if myto.startswith(self.settings["D"]):
7657                                                 myto=myto[len(self.settings["D"]):]
7658                                 # myrealto contains the path of the real file to which this symlink points.
7659                                 # we can simply test for existence of this file to see if the target has been merged yet
7660                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
7661                                 if mydmode!=None:
7662                                         #destination exists
7663                                         if not stat.S_ISLNK(mydmode):
7664                                                 if stat.S_ISDIR(mydmode):
7665                                                         # directory in the way: we can't merge a symlink over a directory
7666                                                         # we won't merge this, continue with next file...
7667                                                         continue
7668
7669                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7670                                                         # Kill file blocking installation of symlink to dir #71787
7671                                                         pass
7672                                                 elif self.isprotected(mydest):
7673                                                         # Use md5 of the target in ${D} if it exists...
7674                                                         try:
7675                                                                 newmd5 = portage_checksum.perform_md5(
7676                                                                         join(srcroot, myabsto))
7677                                                         except portage_exception.FileNotFound:
7678                                                                 # Maybe the target is merged already.
7679                                                                 try:
7680                                                                         newmd5 = portage_checksum.perform_md5(
7681                                                                                 myrealto)
7682                                                                 except portage_exception.FileNotFound:
7683                                                                         newmd5 = None
7684                                                         mydest = new_protect_filename(mydest,newmd5=newmd5)
7685
7686                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7687                                 if (secondhand!=None) and (not os.path.exists(myrealto)):
7688                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
7689                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
7690                                         # it later.
7691                                         secondhand.append(mysrc[len(srcroot):])
7692                                         continue
7693                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7694                                 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7695                                 if mymtime!=None:
7696                                         writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7697                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7698                                 else:
7699                                         print "!!! Failed to move file."
7700                                         print "!!!",mydest,"->",myto
7701                                         sys.exit(1)
7702                         elif stat.S_ISDIR(mymode):
7703                                 # we are merging a directory
7704                                 if mydmode!=None:
7705                                         # destination exists
7706
7707                                         if bsd_chflags:
7708                                                 # Save then clear flags on dest.
7709                                                 dflags=bsd_chflags.lgetflags(mydest)
7710                                                 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7711                                                         writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7712                                                                 noiselevel=-1)
7713
7714                                         if not os.access(mydest, os.W_OK):
7715                                                 pkgstuff = pkgsplit(self.pkg)
7716                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7717                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7718                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
7719                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7720                                                 writemsg("!!! And finish by running this: env-update\n\n")
7721                                                 return 1
7722
7723                                         if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7724                                                 # a symlink to an existing directory will work for us; keep it:
7725                                                 writemsg_stdout("--- %s/\n" % mydest)
7726                                                 if bsd_chflags:
7727                                                         bsd_chflags.lchflags(mydest, dflags)
7728                                         else:
7729                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
7730                                                 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7731                                                         sys.exit(1)
7732                                                 print "bak",mydest,mydest+".backup"
7733                                                 #now create our directory
7734                                                 if self.settings.selinux_enabled():
7735                                                         sid = selinux.get_sid(mysrc)
7736                                                         selinux.secure_mkdir(mydest,sid)
7737                                                 else:
7738                                                         os.mkdir(mydest)
7739                                                 if bsd_chflags:
7740                                                         bsd_chflags.lchflags(mydest, dflags)
7741                                                 os.chmod(mydest,mystat[0])
7742                                                 os.chown(mydest,mystat[4],mystat[5])
7743                                                 writemsg_stdout(">>> %s/\n" % mydest)
7744                                 else:
7745                                         #destination doesn't exist
7746                                         if self.settings.selinux_enabled():
7747                                                 sid = selinux.get_sid(mysrc)
7748                                                 selinux.secure_mkdir(mydest,sid)
7749                                         else:
7750                                                 os.mkdir(mydest)
7751                                         os.chmod(mydest,mystat[0])
7752                                         os.chown(mydest,mystat[4],mystat[5])
7753                                         writemsg_stdout(">>> %s/\n" % mydest)
7754                                 outfile.write("dir "+myrealdest+"\n")
7755                                 # recurse and merge this directory
7756                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
7757                                         join(offset, x), cfgfiledict, thismtime):
7758                                         return 1
7759                         elif stat.S_ISREG(mymode):
7760                                 # we are merging a regular file
7761                                 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7762                                 # calculate config file protection stuff
7763                                 mydestdir=os.path.dirname(mydest)
7764                                 moveme=1
7765                                 zing="!!!"
7766                                 if mydmode!=None:
7767                                         # destination file exists
7768                                         if stat.S_ISDIR(mydmode):
7769                                                 # install of destination is blocked by an existing directory with the same name
7770                                                 moveme=0
7771                                                 writemsg_stdout("!!! %s\n" % mydest)
7772                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7773                                                 cfgprot=0
7774                                                 # install of destination is blocked by an existing regular file,
7775                                                 # or by a symlink to an existing regular file;
7776                                                 # now, config file management may come into play.
7777                                                 # we only need to tweak mydest if cfg file management is in play.
7778                                                 if self.isprotected(mydest):
7779                                                         # we have a protection path; enable config file management.
7780                                                         destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7781                                                         if mymd5==destmd5:
7782                                                                 #file already in place; simply update mtimes of destination
7783                                                                 os.utime(mydest,(thismtime,thismtime))
7784                                                                 zing="---"
7785                                                                 moveme=0
7786                                                         else:
7787                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7788                                                                         """ An identical update has previously been
7789                                                                         merged.  Skip it unless the user has chosen
7790                                                                         --noconfmem."""
7791                                                                         zing = "-o-"
7792                                                                         moveme = cfgfiledict["IGNORE"]
7793                                                                         cfgprot = cfgfiledict["IGNORE"]
7794                                                                 else:
7795                                                                         moveme = 1
7796                                                                         cfgprot = 1
7797                                                         if moveme:
7798                                                                 # Merging a new file, so update confmem.
7799                                                                 cfgfiledict[myrealdest] = [mymd5]
7800                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7801                                                                 """A previously remembered update has been
7802                                                                 accepted, so it is removed from confmem."""
7803                                                                 del cfgfiledict[myrealdest]
7804                                                 if cfgprot:
7805                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
7806
7807                                 # whether config protection or not, we merge the new file the
7808                                 # same way.  Unless moveme=0 (blocking directory)
7809                                 if moveme:
7810                                         mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7811                                         if mymtime is None:
7812                                                 sys.exit(1)
7813                                         zing=">>>"
7814                                 else:
7815                                         mymtime = long(time.time())
7816                                         # We need to touch the destination so that on --update the
7817                                         # old package won't yank the file with it. (non-cfgprot related)
7818                                         os.utime(mydest, (mymtime, mymtime))
7819                                         zing="---"
7820                                 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7821
7822                                         # XXX kludge, can be killed when portage stops relying on
7823                                         # md5+mtime, and uses refcounts
7824                                         # alright, we've fooled w/ mtime on the file; this pisses off static archives
7825                                         # basically internal mtime != file's mtime, so the linker (falsely) thinks
7826                                         # the archive is stale, and needs to have it's toc rebuilt.
7827
7828                                         myf = open(mydest, "r+")
7829
7830                                         # ar mtime field is digits padded with spaces, 12 bytes.
7831                                         lms=str(thismtime+5).ljust(12)
7832                                         myf.seek(0)
7833                                         magic=myf.read(8)
7834                                         if magic != "!<arch>\n":
7835                                                 # not an archive (dolib.a from portage.py makes it here fex)
7836                                                 myf.close()
7837                                         else:
7838                                                 st = os.stat(mydest)
7839                                                 while myf.tell() < st.st_size - 12:
7840                                                         # skip object name
7841                                                         myf.seek(16,1)
7842
7843                                                         # update mtime
7844                                                         myf.write(lms)
7845
7846                                                         # skip uid/gid/mperm
7847                                                         myf.seek(20,1)
7848
7849                                                         # read the archive member's size
7850                                                         x=long(myf.read(10))
7851
7852                                                         # skip the trailing newlines, and add the potential
7853                                                         # extra padding byte if it's not an even size
7854                                                         myf.seek(x + 2 + (x % 2),1)
7855
7856                                                 # and now we're at the end. yay.
7857                                                 myf.close()
7858                                                 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7859                                         os.utime(mydest,(thismtime,thismtime))
7860
7861                                 if mymtime!=None:
7862                                         zing=">>>"
7863                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7864                                 writemsg_stdout("%s %s\n" % (zing,mydest))
7865                         else:
7866                                 # we are merging a fifo or device node
7867                                 zing="!!!"
7868                                 if mydmode is None:
7869                                         # destination doesn't exist
7870                                         if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7871                                                 zing=">>>"
7872                                         else:
7873                                                 sys.exit(1)
7874                                 if stat.S_ISFIFO(mymode):
7875                                         outfile.write("fif %s\n" % myrealdest)
7876                                 else:
7877                                         outfile.write("dev %s\n" % myrealdest)
7878                                 writemsg_stdout(zing+" "+mydest+"\n")
7879
7880         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7881                 mydbapi=None, prev_mtimes=None):
7882                 try:
7883                         self.lockdb()
7884                         return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7885                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7886                 finally:
7887                         self.unlockdb()
7888
7889         def getstring(self,name):
7890                 "returns contents of a file with whitespace converted to spaces"
7891                 if not os.path.exists(self.dbdir+"/"+name):
7892                         return ""
7893                 myfile=open(self.dbdir+"/"+name,"r")
7894                 mydata=myfile.read().split()
7895                 myfile.close()
7896                 return " ".join(mydata)
7897
7898         def copyfile(self,fname):
7899                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7900
7901         def getfile(self,fname):
7902                 if not os.path.exists(self.dbdir+"/"+fname):
7903                         return ""
7904                 myfile=open(self.dbdir+"/"+fname,"r")
7905                 mydata=myfile.read()
7906                 myfile.close()
7907                 return mydata
7908
7909         def setfile(self,fname,data):
7910                 write_atomic(os.path.join(self.dbdir, fname), data)
7911
7912         def getelements(self,ename):
7913                 if not os.path.exists(self.dbdir+"/"+ename):
7914                         return []
7915                 myelement=open(self.dbdir+"/"+ename,"r")
7916                 mylines=myelement.readlines()
7917                 myreturn=[]
7918                 for x in mylines:
7919                         for y in x[:-1].split():
7920                                 myreturn.append(y)
7921                 myelement.close()
7922                 return myreturn
7923
7924         def setelements(self,mylist,ename):
7925                 myelement=open(self.dbdir+"/"+ename,"w")
7926                 for x in mylist:
7927                         myelement.write(x+"\n")
7928                 myelement.close()
7929
7930         def isregular(self):
7931                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
7932                 return os.path.exists(self.dbdir+"/CATEGORY")
7933
7934 class FetchlistDict(UserDict.DictMixin):
7935         """This provide a mapping interface to retrieve fetch lists.  It's used
7936         to allow portage_manifest.Manifest to access fetch lists via a standard
7937         mapping interface rather than use the dbapi directly."""
7938         def __init__(self, pkgdir, settings, mydbapi):
7939                 """pkgdir is a directory containing ebuilds and settings is passed into
7940                 portdbapi.getfetchlist for __getitem__ calls."""
7941                 self.pkgdir = pkgdir
7942                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7943                 self.settings = settings
7944                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7945                 self.portdb = mydbapi
7946         def __getitem__(self, pkg_key):
7947                 """Returns the complete fetch list for a given package."""
7948                 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7949                         all=True, mytree=self.mytree)[1]
7950         def has_key(self, pkg_key):
7951                 """Returns true if the given package exists within pkgdir."""
7952                 return pkg_key in self.keys()
7953         def keys(self):
7954                 """Returns keys for all packages within pkgdir"""
7955                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7956
7957 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7958         """will merge a .tbz2 file, returning a list of runtime dependencies
7959                 that must be satisfied, or None if there was a merge error.     This
7960                 code assumes the package exists."""
7961         global db
7962         if mydbapi is None:
7963                 mydbapi = db[myroot]["bintree"].dbapi
7964         if vartree is None:
7965                 vartree = db[myroot]["vartree"]
7966         if mytbz2[-5:]!=".tbz2":
7967                 print "!!! Not a .tbz2 file"
7968                 return 1
7969
7970         tbz2_lock = None
7971         builddir_lock = None
7972         catdir_lock = None
7973         try:
7974                 """ Don't lock the tbz2 file because the filesytem could be readonly or
7975                 shared by a cluster."""
7976                 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7977
7978                 mypkg = os.path.basename(mytbz2)[:-5]
7979                 xptbz2 = xpak.tbz2(mytbz2)
7980                 mycat = xptbz2.getfile("CATEGORY")
7981                 if not mycat:
7982                         writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7983                                 noiselevel=-1)
7984                         return 1
7985                 mycat = mycat.strip()
7986
7987                 # These are the same directories that would be used at build time.
7988                 builddir = os.path.join(
7989                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7990                 catdir = os.path.dirname(builddir)
7991                 pkgloc = os.path.join(builddir, "image")
7992                 infloc = os.path.join(builddir, "build-info")
7993                 myebuild = os.path.join(
7994                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7995                 portage_util.ensure_dirs(os.path.dirname(catdir),
7996                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7997                 catdir_lock = portage_locks.lockdir(catdir)
7998                 portage_util.ensure_dirs(catdir,
7999                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
8000                 builddir_lock = portage_locks.lockdir(builddir)
8001                 try:
8002                         portage_locks.unlockdir(catdir_lock)
8003                 finally:
8004                         catdir_lock = None
8005                 try:
8006                         shutil.rmtree(builddir)
8007                 except (IOError, OSError), e:
8008                         if e.errno != errno.ENOENT:
8009                                 raise
8010                         del e
8011                 for mydir in (builddir, pkgloc, infloc):
8012                         portage_util.ensure_dirs(mydir, uid=portage_uid,
8013                                 gid=portage_gid, mode=0755)
8014                 writemsg_stdout(">>> Extracting info\n")
8015                 xptbz2.unpackinfo(infloc)
8016                 mysettings.load_infodir(infloc)
8017                 # Store the md5sum in the vdb.
8018                 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
8019                 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
8020                 fp.close()
8021
8022                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8023
8024                 # Eventually we'd like to pass in the saved ebuild env here.
8025                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8026                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
8027                 if retval != os.EX_OK:
8028                         writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
8029                         return retval
8030
8031                 writemsg_stdout(">>> Extracting %s\n" % mypkg)
8032                 retval = portage_exec.spawn_bash(
8033                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8034                         env=mysettings.environ())
8035                 if retval != os.EX_OK:
8036                         writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
8037                         return retval
8038                 #portage_locks.unlockfile(tbz2_lock)
8039                 #tbz2_lock = None
8040
8041                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8042                         treetype="bintree")
8043                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8044                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8045                 return retval
8046         finally:
8047                 if tbz2_lock:
8048                         portage_locks.unlockfile(tbz2_lock)
8049                 if builddir_lock:
8050                         try:
8051                                 shutil.rmtree(builddir)
8052                         except (IOError, OSError), e:
8053                                 if e.errno != errno.ENOENT:
8054                                         raise
8055                                 del e
8056                         portage_locks.unlockdir(builddir_lock)
8057                         try:
8058                                 if not catdir_lock:
8059                                         # Lock catdir for removal if empty.
8060                                         catdir_lock = portage_locks.lockdir(catdir)
8061                         finally:
8062                                 if catdir_lock:
8063                                         try:
8064                                                 os.rmdir(catdir)
8065                                         except OSError, e:
8066                                                 if e.errno not in (errno.ENOENT,
8067                                                         errno.ENOTEMPTY, errno.EEXIST):
8068                                                         raise
8069                                                 del e
8070                                         portage_locks.unlockdir(catdir_lock)
8071
8072 def deprecated_profile_check():
8073         if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
8074                 return False
8075         deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
8076         dcontent = deprecatedfile.readlines()
8077         deprecatedfile.close()
8078         newprofile = dcontent[0]
8079         writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
8080                 noiselevel=-1)
8081         writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
8082                 noiselevel=-1)
8083         writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
8084         if len(dcontent) > 1:
8085                 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
8086                 for myline in dcontent[1:]:
8087                         writemsg(myline, noiselevel=-1)
8088                 writemsg("\n\n", noiselevel=-1)
8089         return True
8090
8091 # gets virtual package settings
8092 def getvirtuals(myroot):
8093         global settings
8094         writemsg("--- DEPRECATED call to getvirtual\n")
8095         return settings.getvirtuals(myroot)
8096
8097 def commit_mtimedb(mydict=None, filename=None):
8098         if mydict is None:
8099                 global mtimedb
8100                 if "mtimedb" not in globals() or mtimedb is None:
8101                         return
8102                 mtimedb.commit()
8103                 return
8104         if filename is None:
8105                 global mtimedbfile
8106                 filename = mtimedbfile
8107         mydict["version"] = VERSION
8108         d = {} # for full backward compat, pickle it as a plain dict object.
8109         d.update(mydict)
8110         try:
8111                 f = atomic_ofstream(filename)
8112                 cPickle.dump(d, f, -1)
8113                 f.close()
8114                 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
8115         except (IOError, OSError), e:
8116                 pass
8117
8118 def portageexit():
8119         global uid,portage_gid,portdb,db
8120         if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
8121                 close_portdbapi_caches()
8122                 commit_mtimedb()
8123
8124 atexit_register(portageexit)
8125
8126 def global_updates(mysettings, trees, prev_mtimes):
8127         """
8128         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8129
8130         @param mysettings: A config instance for ROOT="/".
8131         @type mysettings: config
8132         @param trees: A dictionary containing portage trees.
8133         @type trees: dict
8134         @param prev_mtimes: A dictionary containing mtimes of files located in
8135                 $PORTDIR/profiles/updates/.
8136         @type prev_mtimes: dict
8137         @rtype: None or List
8138         @return: None if no were no updates, otherwise a list of update commands
8139                 that have been performed.
8140         """
8141         # only do this if we're root and not running repoman/ebuild digest
8142         global secpass
8143         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8144                 return
8145         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8146
8147         try:
8148                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8149                         update_data = grab_updates(updpath)
8150                 else:
8151                         update_data = grab_updates(updpath, prev_mtimes)
8152         except portage_exception.DirectoryNotFound:
8153                 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
8154                 return
8155         myupd = None
8156         if len(update_data) > 0:
8157                 do_upgrade_packagesmessage = 0
8158                 myupd = []
8159                 timestamps = {}
8160                 for mykey, mystat, mycontent in update_data:
8161                         writemsg_stdout("\n\n")
8162                         writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
8163                         writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
8164                         writemsg_stdout("  "+bold(".")+"='update pass'  "+bold("*")+"='binary update'  "+bold("@")+"='/var/db move'\n"+"  "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
8165                         valid_updates, errors = parse_updates(mycontent)
8166                         myupd.extend(valid_updates)
8167                         writemsg_stdout(len(valid_updates) * "." + "\n")
8168                         if len(errors) == 0:
8169                                 # Update our internal mtime since we
8170                                 # processed all of our directives.
8171                                 timestamps[mykey] = long(mystat.st_mtime)
8172                         else:
8173                                 for msg in errors:
8174                                         writemsg("%s\n" % msg, noiselevel=-1)
8175
8176                 update_config_files("/",
8177                         mysettings.get("CONFIG_PROTECT","").split(),
8178                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
8179                         myupd)
8180
8181                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8182                         settings=mysettings)
8183                 for update_cmd in myupd:
8184                         if update_cmd[0] == "move":
8185                                 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
8186                                 trees["/"]["bintree"].move_ent(update_cmd)
8187                         elif update_cmd[0] == "slotmove":
8188                                 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
8189                                 trees["/"]["bintree"].move_slot_ent(update_cmd)
8190
8191                 # The above global updates proceed quickly, so they
8192                 # are considered a single mtimedb transaction.
8193                 if len(timestamps) > 0:
8194                         # We do not update the mtime in the mtimedb
8195                         # until after _all_ of the above updates have
8196                         # been processed because the mtimedb will
8197                         # automatically commit when killed by ctrl C.
8198                         for mykey, mtime in timestamps.iteritems():
8199                                 prev_mtimes[mykey] = mtime
8200
8201                 # We gotta do the brute force updates for these now.
8202                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8203                 "fixpackages" in mysettings.features:
8204                         trees["/"]["bintree"].update_ents(myupd)
8205                 else:
8206                         do_upgrade_packagesmessage = 1
8207
8208                 # Update progress above is indicated by characters written to stdout so
8209                 # we print a couple new lines here to separate the progress output from
8210                 # what follows.
8211                 print
8212                 print
8213
8214                 if do_upgrade_packagesmessage and \
8215                         listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
8216                         writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
8217                         writemsg_stdout("\n    tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
8218                         writemsg_stdout("\n")
8219         if myupd:
8220                 return myupd
8221
8222 #continue setting up other trees
8223
8224 class MtimeDB(dict):
8225         def __init__(self, filename):
8226                 dict.__init__(self)
8227                 self.filename = filename
8228                 self._load(filename)
8229
8230         def _load(self, filename):
8231                 try:
8232                         f = open(filename)
8233                         mypickle = cPickle.Unpickler(f)
8234                         mypickle.find_global = None
8235                         d = mypickle.load()
8236                         f.close()
8237                         del f
8238                 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
8239                         d = {}
8240
8241                 if "old" in d:
8242                         d["updates"] = d["old"]
8243                         del d["old"]
8244                 if "cur" in d:
8245                         del d["cur"]
8246
8247                 d.setdefault("starttime", 0)
8248                 d.setdefault("version", "")
8249                 for k in ("info", "ldpath", "updates"):
8250                         d.setdefault(k, {})
8251
8252                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8253                         "starttime", "updates", "version"))
8254
8255                 for k in d.keys():
8256                         if k not in mtimedbkeys:
8257                                 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
8258                                 del d[k]
8259                 self.update(d)
8260                 self._clean_data = copy.deepcopy(d)
8261
8262         def commit(self):
8263                 if not self.filename:
8264                         return
8265                 d = {}
8266                 d.update(self)
8267                 # Only commit if the internal state has changed.
8268                 if d != self._clean_data:
8269                         commit_mtimedb(mydict=d, filename=self.filename)
8270                         self._clean_data = copy.deepcopy(d)
8271
8272 def create_trees(config_root=None, target_root=None, trees=None):
8273         if trees is None:
8274                 trees = {}
8275         else:
8276                 # clean up any existing portdbapi instances
8277                 for myroot in trees:
8278                         portdb = trees[myroot]["porttree"].dbapi
8279                         portdb.close_caches()
8280                         portdbapi.portdbapi_instances.remove(portdb)
8281                         del trees[myroot]["porttree"], myroot, portdb
8282
8283         settings = config(config_root=config_root, target_root=target_root,
8284                 config_incrementals=portage_const.INCREMENTALS)
8285         settings.lock()
8286         settings.validate()
8287
8288         myroots = [(settings["ROOT"], settings)]
8289         if settings["ROOT"] != "/":
8290                 settings = config(config_root=None, target_root=None,
8291                         config_incrementals=portage_const.INCREMENTALS)
8292                 settings.lock()
8293                 settings.validate()
8294                 myroots.append((settings["ROOT"], settings))
8295
8296         for myroot, mysettings in myroots:
8297                 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8298                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8299                 trees[myroot].addLazySingleton(
8300                         "vartree", vartree, myroot, categories=mysettings.categories,
8301                                 settings=mysettings)
8302                 trees[myroot].addLazySingleton("porttree",
8303                         portagetree, myroot, settings=mysettings)
8304                 trees[myroot].addLazySingleton("bintree",
8305                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8306         return trees
8307
8308 # Initialization of legacy globals.  No functions/classes below this point
8309 # please!  When the above functions and classes become independent of the
8310 # below global variables, it will be possible to make the below code
8311 # conditional on a backward compatibility flag (backward compatibility could
8312 # be disabled via an environment variable, for example).  This will enable new
8313 # code that is aware of this flag to import portage without the unnecessary
8314 # overhead (and other issues!) of initializing the legacy globals.
8315
8316 def init_legacy_globals():
8317         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8318         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8319         profiledir, flushmtimedb
8320
8321         # Portage needs to ensure a sane umask for the files it creates.
8322         os.umask(022)
8323
8324         kwargs = {}
8325         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8326                 kwargs[k] = os.environ.get(envvar, "/")
8327
8328         db = create_trees(**kwargs)
8329
8330         settings = db["/"]["vartree"].settings
8331         portdb = db["/"]["porttree"].dbapi
8332
8333         for myroot in db:
8334                 if myroot != "/":
8335                         settings = db[myroot]["vartree"].settings
8336                         portdb = db[myroot]["porttree"].dbapi
8337                         break
8338
8339         root = settings["ROOT"]
8340
8341         mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8342         mtimedb = MtimeDB(mtimedbfile)
8343
8344         # ========================================================================
8345         # COMPATIBILITY
8346         # These attributes should not be used
8347         # within Portage under any circumstances.
8348         # ========================================================================
8349         archlist    = settings.archlist()
8350         features    = settings.features
8351         groups      = settings["ACCEPT_KEYWORDS"].split()
8352         pkglines    = settings.packages
8353         selinux_enabled   = settings.selinux_enabled()
8354         thirdpartymirrors = settings.thirdpartymirrors()
8355         usedefaults       = settings.use_defs
8356         profiledir  = None
8357         if os.path.isdir(PROFILE_PATH):
8358                 profiledir = PROFILE_PATH
8359         def flushmtimedb(record):
8360                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8361         # ========================================================================
8362         # COMPATIBILITY
8363         # These attributes should not be used
8364         # within Portage under any circumstances.
8365         # ========================================================================
8366
8367 # WARNING!
8368 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8369 # use within Portage.  External use of this variable is unsupported because
8370 # it is experimental and it's behavior is likely to change.
8371 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8372         init_legacy_globals()
8373
8374 # Clear the cache
8375 dircache={}
8376
8377 # ============================================================================
8378 # ============================================================================
8379