8cc167dbf438dbb20d898721bf7cd1532a98fee5
[portage.git] / pym / portage.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6
7 VERSION="$Rev$"[6:-2] + "-svn"
8
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
12
13 try:
14         import sys
15 except ImportError:
16         print "Failed to import sys! Something is _VERY_ wrong with python."
17         raise
18
19 try:
20         import copy, errno, os, re, shutil, time, types
21         try:
22                 import cPickle
23         except ImportError:
24                 import pickle as cPickle
25
26         import stat
27         import commands
28         from time import sleep
29         from random import shuffle
30         import UserDict
31         if getattr(__builtins__, "set", None) is None:
32                 from sets import Set as set
33         from itertools import chain, izip
34 except ImportError, e:
35         sys.stderr.write("\n\n")
36         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
39
40         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42         sys.stderr.write("    "+str(e)+"\n\n");
43         raise
44
45 try:
46         # XXX: This should get renamed to bsd_chflags, I think.
47         import chflags
48         bsd_chflags = chflags
49 except ImportError:
50         bsd_chflags = None
51
52 try:
53         from cache.cache_errors import CacheError
54         import cvstree
55         import xpak
56         import getbinpkg
57         import portage_dep
58         from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59                 isjustname, isspecific, isvalidatom, \
60                 match_from_list, match_to_list, best_match_to_list
61
62         # XXX: This needs to get cleaned up.
63         import output
64         from output import bold, colorize, green, red, yellow
65
66         import portage_const
67         from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68           USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69           PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70           EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71           MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72           DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73           INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74           INCREMENTALS, EAPI, MISC_SH_BINARY
75
76         from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77                                  portage_uid, portage_gid, userpriv_groups
78         from portage_manifest import Manifest
79
80         import portage_util
81         from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82                 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83                 map_dictlist_vals, new_protect_filename, normalize_path, \
84                 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85                 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86         import portage_exception
87         import portage_gpg
88         import portage_locks
89         import portage_exec
90         from portage_exec import atexit_register, run_exitfuncs
91         from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92         import portage_checksum
93         from portage_checksum import perform_md5,perform_checksum,prelink_capable
94         import eclass_cache
95         from portage_localization import _
96         from portage_update import dep_transform, fixdbentries, grab_updates, \
97                 parse_updates, update_config_files, update_dbentries
98
99         # Need these functions directly in portage namespace to not break every external tool in existence
100         from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101                 pkgsplit, vercmp, ververify
102
103         # endversion and endversion_keys are for backward compatibility only.
104         from portage_versions import endversion_keys
105         from portage_versions import suffix_value as endversion
106
107 except ImportError, e:
108         sys.stderr.write("\n\n")
109         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114         sys.stderr.write("!!! a recovery of portage.\n")
115         sys.stderr.write("    "+str(e)+"\n\n")
116         raise
117
118
119 try:
120         import portage_selinux as selinux
121 except OSError, e:
122         writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
123         del e
124 except ImportError:
125         pass
126
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
130
131
132 def load_mod(name):
133         modname = ".".join(name.split(".")[:-1])
134         mod = __import__(modname)
135         components = name.split('.')
136         for comp in components[1:]:
137                 mod = getattr(mod, comp)
138         return mod
139
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
141         for x in key_order:
142                 if top_dict.has_key(x) and top_dict[x].has_key(key):
143                         if FullCopy:
144                                 return copy.deepcopy(top_dict[x][key])
145                         else:
146                                 return top_dict[x][key]
147         if EmptyOnError:
148                 return ""
149         else:
150                 raise KeyError, "Key not found in list; '%s'" % key
151
152 def getcwd():
153         "this fixes situations where the current directory doesn't exist"
154         try:
155                 return os.getcwd()
156         except OSError: #dir doesn't exist
157                 os.chdir("/")
158                 return "/"
159 getcwd()
160
161 def abssymlink(symlink):
162         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163         mylink=os.readlink(symlink)
164         if mylink[0] != '/':
165                 mydir=os.path.dirname(symlink)
166                 mylink=mydir+"/"+mylink
167         return os.path.normpath(mylink)
168
169 dircache = {}
170 cacheHit=0
171 cacheMiss=0
172 cacheStale=0
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174         global cacheHit,cacheMiss,cacheStale
175         mypath = normalize_path(my_original_path)
176         if dircache.has_key(mypath):
177                 cacheHit += 1
178                 cached_mtime, list, ftype = dircache[mypath]
179         else:
180                 cacheMiss += 1
181                 cached_mtime, list, ftype = -1, [], []
182         try:
183                 pathstat = os.stat(mypath)
184                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185                         mtime = pathstat[stat.ST_MTIME]
186                 else:
187                         raise portage_exception.DirectoryNotFound(mypath)
188         except (IOError,OSError,portage_exception.PortageException):
189                 if EmptyOnError:
190                         return [], []
191                 return None, None
192         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193         if mtime != cached_mtime or time.time() - mtime < 4:
194                 if dircache.has_key(mypath):
195                         cacheStale += 1
196                 list = os.listdir(mypath)
197                 ftype = []
198                 for x in list:
199                         try:
200                                 if followSymlinks:
201                                         pathstat = os.stat(mypath+"/"+x)
202                                 else:
203                                         pathstat = os.lstat(mypath+"/"+x)
204
205                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
206                                         ftype.append(0)
207                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
208                                         ftype.append(1)
209                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
210                                         ftype.append(2)
211                                 else:
212                                         ftype.append(3)
213                         except (IOError, OSError):
214                                 ftype.append(3)
215                 dircache[mypath] = mtime, list, ftype
216
217         ret_list = []
218         ret_ftype = []
219         for x in range(0, len(list)):
220                 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221                         ret_list.append(list[x])
222                         ret_ftype.append(ftype[x])
223                 elif (list[x] not in ignorelist):
224                         ret_list.append(list[x])
225                         ret_ftype.append(ftype[x])
226
227         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228         return ret_list, ret_ftype
229
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231         EmptyOnError=False, dirsonly=False):
232         """
233         Portage-specific implementation of os.listdir
234
235         @param mypath: Path whose contents you wish to list
236         @type mypath: String
237         @param recursive: Recursively scan directories contained within mypath
238         @type recursive: Boolean
239         @param filesonly; Only return files, not more directories
240         @type filesonly: Boolean
241         @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242         @type ignorecvs: Boolean
243         @param ignorelist: List of filenames/directories to exclude
244         @type ignorelist: List
245         @param followSymlinks: Follow Symlink'd files and directories
246         @type followSymlinks: Boolean
247         @param EmptyOnError: Return [] if an error occurs.
248         @type EmptyOnError: Boolean
249         @param dirsonly: Only return directories.
250         @type dirsonly: Boolean
251         @rtype: List
252         @returns: A list of files and directories (or just files or just directories) or an empty list.
253         """
254
255         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
256
257         if list is None:
258                 list=[]
259         if ftype is None:
260                 ftype=[]
261
262         if not (filesonly or dirsonly or recursive):
263                 return list
264
265         if recursive:
266                 x=0
267                 while x<len(ftype):
268                         if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
270                                         followSymlinks)
271
272                                 l=l[:]
273                                 for y in range(0,len(l)):
274                                         l[y]=list[x]+"/"+l[y]
275                                 list=list+l
276                                 ftype=ftype+f
277                         x+=1
278         if filesonly:
279                 rlist=[]
280                 for x in range(0,len(ftype)):
281                         if ftype[x]==0:
282                                 rlist=rlist+[list[x]]
283         elif dirsonly:
284                 rlist = []
285                 for x in range(0, len(ftype)):
286                         if ftype[x] == 1:
287                                 rlist = rlist + [list[x]]       
288         else:
289                 rlist=list
290
291         return rlist
292
293 def flatten(mytokens):
294         """this function now turns a [1,[2,3]] list into
295         a [1,2,3] list and returns it."""
296         newlist=[]
297         for x in mytokens:
298                 if type(x)==types.ListType:
299                         newlist.extend(flatten(x))
300                 else:
301                         newlist.append(x)
302         return newlist
303
304 #beautiful directed graph object
305
306 class digraph:
307         def __init__(self):
308                 """Create an empty digraph"""
309                 
310                 # { node : ( { child : priority } , { parent : priority } ) }
311                 self.nodes = {}
312                 self.order = []
313
314         def add(self, node, parent, priority=0):
315                 """Adds the specified node with the specified parent.
316                 
317                 If the dep is a soft-dep and the node already has a hard
318                 relationship to the parent, the relationship is left as hard."""
319                 
320                 if node not in self.nodes:
321                         self.nodes[node] = ({}, {})
322                         self.order.append(node)
323                 
324                 if not parent:
325                         return
326                 
327                 if parent not in self.nodes:
328                         self.nodes[parent] = ({}, {})
329                         self.order.append(parent)
330                 
331                 if parent in self.nodes[node][1]:
332                         if priority > self.nodes[node][1][parent]:
333                                 self.nodes[node][1][parent] = priority
334                 else:
335                         self.nodes[node][1][parent] = priority
336                 
337                 if node in self.nodes[parent][0]:
338                         if priority > self.nodes[parent][0][node]:
339                                 self.nodes[parent][0][node] = priority
340                 else:
341                         self.nodes[parent][0][node] = priority
342
343         def remove(self, node):
344                 """Removes the specified node from the digraph, also removing
345                 and ties to other nodes in the digraph. Raises KeyError if the
346                 node doesn't exist."""
347                 
348                 if node not in self.nodes:
349                         raise KeyError(node)
350                 
351                 for parent in self.nodes[node][1]:
352                         del self.nodes[parent][0][node]
353                 for child in self.nodes[node][0]:
354                         del self.nodes[child][1][node]
355                 
356                 del self.nodes[node]
357                 self.order.remove(node)
358
359         def contains(self, node):
360                 """Checks if the digraph contains mynode"""
361                 return node in self.nodes
362
363         def all_nodes(self):
364                 """Return a list of all nodes in the graph"""
365                 return self.order[:]
366
367         def child_nodes(self, node, ignore_priority=None):
368                 """Return all children of the specified node"""
369                 if ignore_priority is None:
370                         return self.nodes[node][0].keys()
371                 children = []
372                 for child, priority in self.nodes[node][0].iteritems():
373                         if priority > ignore_priority:
374                                 children.append(child)
375                 return children
376
377         def parent_nodes(self, node):
378                 """Return all parents of the specified node"""
379                 return self.nodes[node][1].keys()
380
381         def leaf_nodes(self, ignore_priority=None):
382                 """Return all nodes that have no children
383                 
384                 If ignore_soft_deps is True, soft deps are not counted as
385                 children in calculations."""
386                 
387                 leaf_nodes = []
388                 for node in self.order:
389                         is_leaf_node = True
390                         for child in self.nodes[node][0]:
391                                 if self.nodes[node][0][child] > ignore_priority:
392                                         is_leaf_node = False
393                                         break
394                         if is_leaf_node:
395                                 leaf_nodes.append(node)
396                 return leaf_nodes
397
398         def root_nodes(self, ignore_priority=None):
399                 """Return all nodes that have no parents.
400                 
401                 If ignore_soft_deps is True, soft deps are not counted as
402                 parents in calculations."""
403                 
404                 root_nodes = []
405                 for node in self.order:
406                         is_root_node = True
407                         for parent in self.nodes[node][1]:
408                                 if self.nodes[node][1][parent] > ignore_priority:
409                                         is_root_node = False
410                                         break
411                         if is_root_node:
412                                 root_nodes.append(node)
413                 return root_nodes
414
415         def is_empty(self):
416                 """Checks if the digraph is empty"""
417                 return len(self.nodes) == 0
418
419         def clone(self):
420                 clone = digraph()
421                 clone.nodes = copy.deepcopy(self.nodes)
422                 clone.order = self.order[:]
423                 return clone
424
425         # Backward compatibility
426         addnode = add
427         allnodes = all_nodes
428         allzeros = leaf_nodes
429         hasnode = contains
430         empty = is_empty
431         copy = clone
432
433         def delnode(self, node):
434                 try:
435                         self.remove(node)
436                 except KeyError:
437                         pass
438
439         def firstzero(self):
440                 leaf_nodes = self.leaf_nodes()
441                 if leaf_nodes:
442                         return leaf_nodes[0]
443                 return None
444
445         def hasallzeros(self, ignore_priority=None):
446                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
447                         len(self.order)
448
449         def debug_print(self):
450                 for node in self.nodes:
451                         print node,
452                         if self.nodes[node][0]:
453                                 print "depends on"
454                         else:
455                                 print "(no children)"
456                         for child in self.nodes[node][0]:
457                                 print "  ",child,
458                                 print "(%s)" % self.nodes[node][0][child]
459
460
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463         mylogfiles = listdir(mysettings["T"]+"/logging/")
464         # shortcut for packages without any messages
465         if len(mylogfiles) == 0:
466                 return
467         # exploit listdir() file order so we process log entries in chronological order
468         mylogfiles.reverse()
469         mylogentries = {}
470         my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
471         for f in mylogfiles:
472                 msgfunction, msgtype = f.split(".")
473                 if msgtype.upper() not in my_elog_classes \
474                                 and msgtype.lower() not in my_elog_classes:
475                         continue
476                 if msgfunction not in portage_const.EBUILD_PHASES:
477                         writemsg("!!! can't process invalid log file: %s\n" % f,
478                                 noiselevel=-1)
479                         continue
480                 if not msgfunction in mylogentries:
481                         mylogentries[msgfunction] = []
482                 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483                 mylogentries[msgfunction].append((msgtype, msgcontent))
484
485         # in case the filters matched all messages
486         if len(mylogentries) == 0:
487                 return
488
489         # generate a single string with all log messages
490         fulllog = ""
491         for phase in portage_const.EBUILD_PHASES:
492                 if not phase in mylogentries:
493                         continue
494                 for msgtype,msgcontent in mylogentries[phase]:
495                         fulllog += "%s: %s\n" % (msgtype, phase)
496                         for line in msgcontent:
497                                 fulllog += line
498                         fulllog += "\n"
499
500         # pass the processing to the individual modules
501         logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
502         for s in logsystems:
503                 # - is nicer than _ for module names, so allow people to use it.
504                 s = s.replace("-", "_")
505                 try:
506                         # FIXME: ugly ad.hoc import code
507                         # TODO:  implement a common portage module loader
508                         logmodule = __import__("elog_modules.mod_"+s)
509                         m = getattr(logmodule, "mod_"+s)
510                         def timeout_handler(signum, frame):
511                                 raise portage_exception.PortageException(
512                                         "Timeout in elog_process for system '%s'" % s)
513                         import signal
514                         signal.signal(signal.SIGALRM, timeout_handler)
515                         # Timeout after one minute (in case something like the mail
516                         # module gets hung).
517                         signal.alarm(60)
518                         try:
519                                 m.process(mysettings, cpv, mylogentries, fulllog)
520                         finally:
521                                 signal.alarm(0)
522                         if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523                                 _elog_atexit_handlers.append(m.finalize)
524                                 atexit_register(m.finalize, mysettings)
525                 except (ImportError, AttributeError), e:
526                         writemsg("!!! Error while importing logging modules " + \
527                                 "while loading \"mod_%s\":\n" % str(s))
528                         writemsg("%s\n" % str(e), noiselevel=-1)
529                 except portage_exception.PortageException, e:
530                         writemsg("%s\n" % str(e), noiselevel=-1)
531
532         # clean logfiles to avoid repetitions
533         for f in mylogfiles:
534                 try:
535                         os.unlink(os.path.join(mysettings["T"], "logging", f))
536                 except OSError:
537                         pass
538
539 #parse /etc/env.d and generate /etc/profile.env
540
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542         if target_root is None:
543                 global root
544                 target_root = root
545         if prev_mtimes is None:
546                 global mtimedb
547                 prev_mtimes = mtimedb["ldpath"]
548         envd_dir = os.path.join(target_root, "etc", "env.d")
549         portage_util.ensure_dirs(envd_dir, mode=0755)
550         fns = listdir(envd_dir, EmptyOnError=1)
551         fns.sort()
552         templist = []
553         for x in fns:
554                 if len(x) < 3:
555                         continue
556                 if not x[0].isdigit() or not x[1].isdigit():
557                         continue
558                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
559                         continue
560                 templist.append(x)
561         fns = templist
562         del templist
563
564         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568                   "PYTHONPATH", "ROOTPATH"])
569
570         config_list = []
571
572         for x in fns:
573                 file_path = os.path.join(envd_dir, x)
574                 try:
575                         myconfig = getconfig(file_path, expand=False)
576                 except portage_exception.ParseError, e:
577                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
578                         del e
579                         continue
580                 if myconfig is None:
581                         # broken symlink or file removed by a concurrent process
582                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
583                         continue
584                 config_list.append(myconfig)
585                 if "SPACE_SEPARATED" in myconfig:
586                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
587                         del myconfig["SPACE_SEPARATED"]
588                 if "COLON_SEPARATED" in myconfig:
589                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
590                         del myconfig["COLON_SEPARATED"]
591
592         env = {}
593         specials = {}
594         for var in space_separated:
595                 mylist = []
596                 for myconfig in config_list:
597                         if var in myconfig:
598                                 mylist.extend(filter(None, myconfig[var].split()))
599                                 del myconfig[var] # prepare for env.update(myconfig)
600                 if mylist:
601                         env[var] = " ".join(mylist)
602                 specials[var] = mylist
603
604         for var in colon_separated:
605                 mylist = []
606                 for myconfig in config_list:
607                         if var in myconfig:
608                                 mylist.extend(filter(None, myconfig[var].split(":")))
609                                 del myconfig[var] # prepare for env.update(myconfig)
610                 if mylist:
611                         env[var] = ":".join(mylist)
612                 specials[var] = mylist
613
614         for myconfig in config_list:
615                 """Cumulative variables have already been deleted from myconfig so that
616                 they won't be overwritten by this dict.update call."""
617                 env.update(myconfig)
618
619         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
620         try:
621                 myld = open(ldsoconf_path)
622                 myldlines=myld.readlines()
623                 myld.close()
624                 oldld=[]
625                 for x in myldlines:
626                         #each line has at least one char (a newline)
627                         if x[0]=="#":
628                                 continue
629                         oldld.append(x[:-1])
630         except (IOError, OSError), e:
631                 if e.errno != errno.ENOENT:
632                         raise
633                 oldld = None
634
635         ld_cache_update=False
636
637         newld = specials["LDPATH"]
638         if (oldld!=newld):
639                 #ld.so.conf needs updating and ldconfig needs to be run
640                 myfd = atomic_ofstream(ldsoconf_path)
641                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642                 myfd.write("# contents of /etc/env.d directory\n")
643                 for x in specials["LDPATH"]:
644                         myfd.write(x+"\n")
645                 myfd.close()
646                 ld_cache_update=True
647
648         # Update prelink.conf if we are prelink-enabled
649         if prelink_capable:
650                 newprelink = atomic_ofstream(
651                         os.path.join(target_root, "etc", "prelink.conf"))
652                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653                 newprelink.write("# contents of /etc/env.d directory\n")
654
655                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656                         newprelink.write("-l "+x+"\n");
657                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
658                         if not x:
659                                 continue
660                         if x[-1]!='/':
661                                 x=x+"/"
662                         plmasked=0
663                         for y in specials["PRELINK_PATH_MASK"]:
664                                 if not y:
665                                         continue
666                                 if y[-1]!='/':
667                                         y=y+"/"
668                                 if y==x[0:len(y)]:
669                                         plmasked=1
670                                         break
671                         if not plmasked:
672                                 newprelink.write("-h "+x+"\n")
673                 for x in specials["PRELINK_PATH_MASK"]:
674                         newprelink.write("-b "+x+"\n")
675                 newprelink.close()
676
677         # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
678         # granularity is possible.  In order to avoid the potential ambiguity of
679         # mtimes that differ by less than 1 second, sleep here if any of the
680         # directories have been modified during the current second.
681         sleep_for_mtime_granularity = False
682         current_time = long(time.time())
683         mtime_changed = False
684         lib_dirs = set()
685         for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
686                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
687                 try:
688                         newldpathtime = long(os.stat(x).st_mtime)
689                         lib_dirs.add(normalize_path(x))
690                 except OSError, oe:
691                         if oe.errno == errno.ENOENT:
692                                 try:
693                                         del prev_mtimes[x]
694                                 except KeyError:
695                                         pass
696                                 # ignore this path because it doesn't exist
697                                 continue
698                         raise
699                 if newldpathtime == current_time:
700                         sleep_for_mtime_granularity = True
701                 if x in prev_mtimes:
702                         if prev_mtimes[x] == newldpathtime:
703                                 pass
704                         else:
705                                 prev_mtimes[x] = newldpathtime
706                                 mtime_changed = True
707                 else:
708                         prev_mtimes[x] = newldpathtime
709                         mtime_changed = True
710
711         if mtime_changed:
712                 ld_cache_update = True
713
714         if makelinks and \
715                 not ld_cache_update and \
716                 contents is not None:
717                 libdir_contents_changed = False
718                 for mypath, mydata in contents.iteritems():
719                         if mydata[0] not in ("obj","sym"):
720                                 continue
721                         head, tail = os.path.split(mypath)
722                         if head in lib_dirs:
723                                 libdir_contents_changed = True
724                                 break
725                 if not libdir_contents_changed:
726                         makelinks = False
727
728         # Only run ldconfig as needed
729         if (ld_cache_update or makelinks):
730                 # ldconfig has very different behaviour between FreeBSD and Linux
731                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
732                         # We can't update links if we haven't cleaned other versions first, as
733                         # an older package installed ON TOP of a newer version will cause ldconfig
734                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
735                         # we can safely create links.
736                         writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
737                         if makelinks:
738                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
739                         else:
740                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
741                 elif ostype in ("FreeBSD","DragonFly"):
742                         writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
743                         commands.getstatusoutput(
744                                 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
745                                 (target_root, target_root))
746
747         del specials["LDPATH"]
748
749         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
750         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
751         cenvnotice  = penvnotice[:]
752         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
753         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
754
755         #create /etc/profile.env for bash support
756         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
757         outfile.write(penvnotice)
758
759         env_keys = [ x for x in env if x != "LDPATH" ]
760         env_keys.sort()
761         for x in env_keys:
762                 outfile.write("export %s='%s'\n" % (x, env[x]))
763         outfile.close()
764
765         #create /etc/csh.env for (t)csh support
766         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
767         outfile.write(cenvnotice)
768         for x in env_keys:
769                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
770         outfile.close()
771
772         if sleep_for_mtime_granularity:
773                 while current_time == long(time.time()):
774                         sleep(1)
775
776 def ExtractKernelVersion(base_dir):
777         """
778         Try to figure out what kernel version we are running
779         @param base_dir: Path to sources (usually /usr/src/linux)
780         @type base_dir: string
781         @rtype: tuple( version[string], error[string])
782         @returns:
783         1. tuple( version[string], error[string])
784         Either version or error is populated (but never both)
785
786         """
787         lines = []
788         pathname = os.path.join(base_dir, 'Makefile')
789         try:
790                 f = open(pathname, 'r')
791         except OSError, details:
792                 return (None, str(details))
793         except IOError, details:
794                 return (None, str(details))
795
796         try:
797                 for i in range(4):
798                         lines.append(f.readline())
799         except OSError, details:
800                 return (None, str(details))
801         except IOError, details:
802                 return (None, str(details))
803
804         lines = [l.strip() for l in lines]
805
806         version = ''
807
808         #XXX: The following code relies on the ordering of vars within the Makefile
809         for line in lines:
810                 # split on the '=' then remove annoying whitespace
811                 items = line.split("=")
812                 items = [i.strip() for i in items]
813                 if items[0] == 'VERSION' or \
814                         items[0] == 'PATCHLEVEL':
815                         version += items[1]
816                         version += "."
817                 elif items[0] == 'SUBLEVEL':
818                         version += items[1]
819                 elif items[0] == 'EXTRAVERSION' and \
820                         items[-1] != items[0]:
821                         version += items[1]
822
823         # Grab a list of files named localversion* and sort them
824         localversions = os.listdir(base_dir)
825         for x in range(len(localversions)-1,-1,-1):
826                 if localversions[x][:12] != "localversion":
827                         del localversions[x]
828         localversions.sort()
829
830         # Append the contents of each to the version string, stripping ALL whitespace
831         for lv in localversions:
832                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
833
834         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
835         kernelconfig = getconfig(base_dir+"/.config")
836         if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
837                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
838
839         return (version,None)
840
841 def autouse(myvartree, use_cache=1, mysettings=None):
842         """
843         autuse returns a list of USE variables auto-enabled to packages being installed
844
845         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
846         @type myvartree: vartree
847         @param use_cache: read values from cache
848         @type use_cache: Boolean
849         @param mysettings: Instance of config
850         @type mysettings: config
851         @rtype: string
852         @returns: A string containing a list of USE variables that are enabled via use.defaults
853         """
854         if mysettings is None:
855                 global settings
856                 mysettings = settings
857         if mysettings.profile_path is None:
858                 return ""
859         myusevars=""
860         usedefaults = mysettings.use_defs
861         for myuse in usedefaults:
862                 dep_met = True
863                 for mydep in usedefaults[myuse]:
864                         if not myvartree.dep_match(mydep,use_cache=True):
865                                 dep_met = False
866                                 break
867                 if dep_met:
868                         myusevars += " "+myuse
869         return myusevars
870
871 def check_config_instance(test):
872         if not test or (str(test.__class__) != 'portage.config'):
873                 raise TypeError, "Invalid type for config object: %s" % test.__class__
874
875 class config:
876         """
877         This class encompasses the main portage configuration.  Data is pulled from
878         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
879         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
880         overrides.
881         
882         Generally if you need data like USE flags, FEATURES, environment variables,
883         virtuals ...etc you look in here.
884         """
885         
886         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
887                 config_incrementals=None, config_root=None, target_root=None,
888                 local_config=True):
889                 """
890                 @param clone: If provided, init will use deepcopy to copy by value the instance.
891                 @type clone: Instance of config class.
892                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
893                 and then calling instance.setcpv(mycpv).
894                 @type mycpv: String
895                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
896                 @type config_profile_path: String
897                 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
898                 @type config_incrementals: List
899                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
900                 @type config_root: String
901                 @param target_root: __init__ override of $ROOT env variable.
902                 @type target_root: String
903                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
904                 ignore local config (keywording and unmasking)
905                 @type local_config: Boolean
906                 """
907
908                 debug = os.environ.get("PORTAGE_DEBUG") == "1"
909
910                 self.already_in_regenerate = 0
911
912                 self.locked   = 0
913                 self.mycpv    = None
914                 self.puse     = []
915                 self.modifiedkeys = []
916                 self.uvlist = []
917
918                 self.virtuals = {}
919                 self.virts_p = {}
920                 self.dirVirtuals = None
921                 self.v_count  = 0
922
923                 # Virtuals obtained from the vartree
924                 self.treeVirtuals = {}
925                 # Virtuals by user specification. Includes negatives.
926                 self.userVirtuals = {}
927                 # Virtual negatives from user specifications.
928                 self.negVirtuals  = {}
929
930                 self.user_profile_dir = None
931                 self.local_config = local_config
932
933                 if clone:
934                         self.incrementals = copy.deepcopy(clone.incrementals)
935                         self.profile_path = copy.deepcopy(clone.profile_path)
936                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
937                         self.local_config = copy.deepcopy(clone.local_config)
938
939                         self.module_priority = copy.deepcopy(clone.module_priority)
940                         self.modules         = copy.deepcopy(clone.modules)
941
942                         self.depcachedir = copy.deepcopy(clone.depcachedir)
943
944                         self.packages = copy.deepcopy(clone.packages)
945                         self.virtuals = copy.deepcopy(clone.virtuals)
946
947                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
948                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
949                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
950
951                         self.use_defs = copy.deepcopy(clone.use_defs)
952                         self.usemask  = copy.deepcopy(clone.usemask)
953                         self.usemask_list = copy.deepcopy(clone.usemask_list)
954                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
955                         self.useforce      = copy.deepcopy(clone.useforce)
956                         self.useforce_list = copy.deepcopy(clone.useforce_list)
957                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
958                         self.puse     = copy.deepcopy(clone.puse)
959                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
960                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
961                         self.mycpv    = copy.deepcopy(clone.mycpv)
962
963                         self.configlist = copy.deepcopy(clone.configlist)
964                         self.lookuplist = self.configlist[:]
965                         self.lookuplist.reverse()
966                         self.configdict = {
967                                 "env.d":     self.configlist[0],
968                                 "pkginternal": self.configlist[1],
969                                 "globals":     self.configlist[2],
970                                 "defaults":    self.configlist[3],
971                                 "conf":        self.configlist[4],
972                                 "pkg":         self.configlist[5],
973                                 "auto":        self.configlist[6],
974                                 "backupenv":   self.configlist[7],
975                                 "env":         self.configlist[8] }
976                         self.profiles = copy.deepcopy(clone.profiles)
977                         self.backupenv  = self.configdict["backupenv"]
978                         self.pusedict   = copy.deepcopy(clone.pusedict)
979                         self.categories = copy.deepcopy(clone.categories)
980                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
981                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
982                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
983                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
984                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
985                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
986                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
987                         self.features = copy.deepcopy(clone.features)
988                 else:
989
990                         # backupenv is for calculated incremental variables.
991                         self.backupenv = os.environ.copy()
992                         if not local_config:
993                                 # Clean up pollution from portage_data so that it doesn't
994                                 # interfere with repoman.
995                                 self.backupenv.pop("USERLAND", None)
996
997                         def check_var_directory(varname, var):
998                                 if not os.path.isdir(var):
999                                         writemsg(("!!! Error: %s='%s' is not a directory. " + \
1000                                                 "Please correct this.\n") % (varname, var),
1001                                                 noiselevel=-1)
1002                                         raise portage_exception.DirectoryNotFound(var)
1003
1004                         if config_root is None:
1005                                 config_root = "/"
1006
1007                         config_root = normalize_path(os.path.abspath(
1008                                 config_root)).rstrip(os.path.sep) + os.path.sep
1009
1010                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
1011
1012                         self.depcachedir = DEPCACHE_PATH
1013
1014                         if not config_profile_path:
1015                                 config_profile_path = \
1016                                         os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1017                                 if os.path.isdir(config_profile_path):
1018                                         self.profile_path = config_profile_path
1019                                 else:
1020                                         self.profile_path = None
1021                         else:
1022                                 self.profile_path = config_profile_path[:]
1023
1024                         if not config_incrementals:
1025                                 writemsg("incrementals not specified to class config\n")
1026                                 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1027                         else:
1028                                 self.incrementals = copy.deepcopy(config_incrementals)
1029
1030                         self.module_priority    = ["user","default"]
1031                         self.modules            = {}
1032                         self.modules["user"] = getconfig(
1033                                 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1034                         if self.modules["user"] is None:
1035                                 self.modules["user"] = {}
1036                         self.modules["default"] = {
1037                                 "portdbapi.metadbmodule": "cache.metadata.database",
1038                                 "portdbapi.auxdbmodule":  "cache.flat_hash.database",
1039                         }
1040
1041                         self.usemask=[]
1042                         self.configlist=[]
1043
1044                         # back up our incremental variables:
1045                         self.configdict={}
1046                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1047                         self.configlist.append({})
1048                         self.configdict["env.d"] = self.configlist[-1]
1049
1050                         self.configlist.append({})
1051                         self.configdict["pkginternal"] = self.configlist[-1]
1052
1053                         # The symlink might not exist or might not be a symlink.
1054                         if self.profile_path is None:
1055                                 self.profiles = []
1056                         else:
1057                                 self.profiles = []
1058                                 def addProfile(currentPath):
1059                                         parentsFile = os.path.join(currentPath, "parent")
1060                                         if os.path.exists(parentsFile):
1061                                                 parents = grabfile(parentsFile)
1062                                                 if not parents:
1063                                                         raise portage_exception.ParseError(
1064                                                                 "Empty parent file: '%s'" % parents_file)
1065                                                 for parentPath in parents:
1066                                                         parentPath = normalize_path(os.path.join(
1067                                                                 currentPath, parentPath))
1068                                                         if os.path.exists(parentPath):
1069                                                                 addProfile(parentPath)
1070                                                         else:
1071                                                                 raise portage_exception.ParseError(
1072                                                                         "Parent '%s' not found: '%s'" %  \
1073                                                                         (parentPath, parentsFile))
1074                                         self.profiles.append(currentPath)
1075                                 addProfile(os.path.realpath(self.profile_path))
1076                         if local_config:
1077                                 custom_prof = os.path.join(
1078                                         config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1079                                 if os.path.exists(custom_prof):
1080                                         self.user_profile_dir = custom_prof
1081                                         self.profiles.append(custom_prof)
1082                                 del custom_prof
1083
1084                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1085                         self.packages      = stack_lists(self.packages_list, incremental=1)
1086                         del self.packages_list
1087                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1088
1089                         # revmaskdict
1090                         self.prevmaskdict={}
1091                         for x in self.packages:
1092                                 mycatpkg=dep_getkey(x)
1093                                 if not self.prevmaskdict.has_key(mycatpkg):
1094                                         self.prevmaskdict[mycatpkg]=[x]
1095                                 else:
1096                                         self.prevmaskdict[mycatpkg].append(x)
1097
1098                         # get profile-masked use flags -- INCREMENTAL Child over parent
1099                         self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1100                                 for x in self.profiles]
1101                         self.usemask  = set(stack_lists(
1102                                 self.usemask_list, incremental=True))
1103                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1104                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1105                         del use_defs_lists
1106
1107                         self.pusemask_list = []
1108                         rawpusemask = [grabdict_package(
1109                                 os.path.join(x, "package.use.mask")) \
1110                                 for x in self.profiles]
1111                         for i in xrange(len(self.profiles)):
1112                                 cpdict = {}
1113                                 for k, v in rawpusemask[i].iteritems():
1114                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1115                                 self.pusemask_list.append(cpdict)
1116                         del rawpusemask
1117
1118                         self.pkgprofileuse = []
1119                         rawprofileuse = [grabdict_package(
1120                                 os.path.join(x, "package.use"), juststrings=True) \
1121                                 for x in self.profiles]
1122                         for i in xrange(len(self.profiles)):
1123                                 cpdict = {}
1124                                 for k, v in rawprofileuse[i].iteritems():
1125                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1126                                 self.pkgprofileuse.append(cpdict)
1127                         del rawprofileuse
1128
1129                         self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1130                                 for x in self.profiles]
1131                         self.useforce  = set(stack_lists(
1132                                 self.useforce_list, incremental=True))
1133
1134                         self.puseforce_list = []
1135                         rawpuseforce = [grabdict_package(
1136                                 os.path.join(x, "package.use.force")) \
1137                                 for x in self.profiles]
1138                         for i in xrange(len(self.profiles)):
1139                                 cpdict = {}
1140                                 for k, v in rawpuseforce[i].iteritems():
1141                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1142                                 self.puseforce_list.append(cpdict)
1143                         del rawpuseforce
1144
1145                         try:
1146                                 self.mygcfg   = getconfig(os.path.join(config_root, "etc", "make.globals"))
1147
1148                                 if self.mygcfg is None:
1149                                         self.mygcfg = {}
1150                         except SystemExit, e:
1151                                 raise
1152                         except Exception, e:
1153                                 if debug:
1154                                         raise
1155                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1156                                 if not isinstance(e, EnvironmentError):
1157                                         writemsg("!!! Incorrect multiline literals can cause " + \
1158                                                 "this. Do not use them.\n", noiselevel=-1)
1159                                 sys.exit(1)
1160                         self.configlist.append(self.mygcfg)
1161                         self.configdict["globals"]=self.configlist[-1]
1162
1163                         self.make_defaults_use = []
1164                         self.mygcfg = {}
1165                         if self.profiles:
1166                                 try:
1167                                         mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1168                                         for cfg in mygcfg_dlists:
1169                                                 if cfg:
1170                                                         self.make_defaults_use.append(cfg.get("USE", ""))
1171                                                 else:
1172                                                         self.make_defaults_use.append("")
1173                                         self.mygcfg   = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1174                                         #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1175                                         if self.mygcfg is None:
1176                                                 self.mygcfg = {}
1177                                 except SystemExit, e:
1178                                         raise
1179                                 except Exception, e:
1180                                         if debug:
1181                                                 raise
1182                                         writemsg("!!! %s\n" % (e), noiselevel=-1)
1183                                         if not isinstance(e, EnvironmentError):
1184                                                 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1185                                                         "emerge sync' may fix this. If it does\n",
1186                                                         noiselevel=-1)
1187                                                 writemsg("!!! not then please report this to " + \
1188                                                         "bugs.gentoo.org and, if possible, a dev\n",
1189                                                                 noiselevel=-1)
1190                                                 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1191                                                         noiselevel=-1)
1192                                         sys.exit(1)
1193                         self.configlist.append(self.mygcfg)
1194                         self.configdict["defaults"]=self.configlist[-1]
1195
1196                         try:
1197                                 self.mygcfg = getconfig(
1198                                         os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1199                                         allow_sourcing=True)
1200                                 if self.mygcfg is None:
1201                                         self.mygcfg = {}
1202                         except SystemExit, e:
1203                                 raise
1204                         except Exception, e:
1205                                 if debug:
1206                                         raise
1207                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1208                                 if not isinstance(e, EnvironmentError):
1209                                         writemsg("!!! Incorrect multiline literals can cause " + \
1210                                                 "this. Do not use them.\n", noiselevel=-1)
1211                                 sys.exit(1)
1212
1213                         # Allow ROOT setting to come from make.conf if it's not overridden
1214                         # by the constructor argument (from the calling environment).  As a
1215                         # special exception for a very common use case, config_root == "/"
1216                         # implies that ROOT in make.conf should be ignored.  That way, the
1217                         # user can chroot into $ROOT and the ROOT setting in make.conf will
1218                         # be automatically ignored (unless config_root is other than "/").
1219                         if config_root != "/" and \
1220                                 target_root is None and "ROOT" in self.mygcfg:
1221                                 target_root = self.mygcfg["ROOT"]
1222                         
1223                         self.configlist.append(self.mygcfg)
1224                         self.configdict["conf"]=self.configlist[-1]
1225
1226                         self.configlist.append({})
1227                         self.configdict["pkg"]=self.configlist[-1]
1228
1229                         #auto-use:
1230                         self.configlist.append({})
1231                         self.configdict["auto"]=self.configlist[-1]
1232
1233                         self.configlist.append(self.backupenv) # XXX Why though?
1234                         self.configdict["backupenv"]=self.configlist[-1]
1235
1236                         self.configlist.append(os.environ.copy())
1237                         self.configdict["env"]=self.configlist[-1]
1238                         if not local_config:
1239                                 # Clean up pollution from portage_data so that it doesn't
1240                                 # interfere with repoman.
1241                                 self.configdict["env"].pop("USERLAND", None)
1242
1243                         # make lookuplist for loading package.*
1244                         self.lookuplist=self.configlist[:]
1245                         self.lookuplist.reverse()
1246
1247                         # Blacklist vars that could interfere with portage internals.
1248                         for blacklisted in "CATEGORY", "PKGUSE", "PORTAGE_CONFIGROOT", \
1249                                 "ROOT":
1250                                 for cfg in self.lookuplist:
1251                                         cfg.pop(blacklisted, None)
1252                         del blacklisted, cfg
1253
1254                         if target_root is None:
1255                                 target_root = "/"
1256
1257                         target_root = normalize_path(os.path.abspath(
1258                                 target_root)).rstrip(os.path.sep) + os.path.sep
1259
1260                         check_var_directory("ROOT", target_root)
1261
1262                         env_d = getconfig(
1263                                 os.path.join(target_root, "etc", "profile.env"), expand=False)
1264                         # env_d will be None if profile.env doesn't exist.
1265                         if env_d:
1266                                 self.configdict["env.d"].update(env_d)
1267                                 # Remove duplicate values so they don't override updated
1268                                 # profile.env values later (profile.env is reloaded in each
1269                                 # call to self.regenerate).
1270                                 for cfg in (self.configdict["backupenv"],
1271                                         self.configdict["env"]):
1272                                         for k, v in env_d.iteritems():
1273                                                 try:
1274                                                         if cfg[k] == v:
1275                                                                 del cfg[k]
1276                                                 except KeyError:
1277                                                         pass
1278                                 del cfg, k, v
1279
1280                         self["PORTAGE_CONFIGROOT"] = config_root
1281                         self.backup_changes("PORTAGE_CONFIGROOT")
1282                         self["ROOT"] = target_root
1283                         self.backup_changes("ROOT")
1284
1285                         self.pusedict = {}
1286                         self.pkeywordsdict = {}
1287                         self.punmaskdict = {}
1288                         abs_user_config = os.path.join(config_root,
1289                                 USER_CONFIG_PATH.lstrip(os.path.sep))
1290
1291                         # locations for "categories" and "arch.list" files
1292                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1293                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1294                         pmask_locations.extend(self.profiles)
1295
1296                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1297                         special cases are needed here."""
1298                         overlay_profiles = []
1299                         for ov in self["PORTDIR_OVERLAY"].split():
1300                                 ov = normalize_path(ov)
1301                                 profiles_dir = os.path.join(ov, "profiles")
1302                                 if os.path.isdir(profiles_dir):
1303                                         overlay_profiles.append(profiles_dir)
1304                         locations += overlay_profiles
1305                         
1306                         pmask_locations.extend(overlay_profiles)
1307
1308                         if local_config:
1309                                 locations.append(abs_user_config)
1310                                 pmask_locations.append(abs_user_config)
1311                                 pusedict = grabdict_package(
1312                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1313                                 for key in pusedict.keys():
1314                                         cp = dep_getkey(key)
1315                                         if not self.pusedict.has_key(cp):
1316                                                 self.pusedict[cp] = {}
1317                                         self.pusedict[cp][key] = pusedict[key]
1318
1319                                 #package.keywords
1320                                 pkgdict = grabdict_package(
1321                                         os.path.join(abs_user_config, "package.keywords"),
1322                                         recursive=1)
1323                                 for key in pkgdict.keys():
1324                                         # default to ~arch if no specific keyword is given
1325                                         if not pkgdict[key]:
1326                                                 mykeywordlist = []
1327                                                 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1328                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1329                                                 else:
1330                                                         groups = []
1331                                                 for keyword in groups:
1332                                                         if not keyword[0] in "~-":
1333                                                                 mykeywordlist.append("~"+keyword)
1334                                                 pkgdict[key] = mykeywordlist
1335                                         cp = dep_getkey(key)
1336                                         if not self.pkeywordsdict.has_key(cp):
1337                                                 self.pkeywordsdict[cp] = {}
1338                                         self.pkeywordsdict[cp][key] = pkgdict[key]
1339
1340                                 #package.unmask
1341                                 pkgunmasklines = grabfile_package(
1342                                         os.path.join(abs_user_config, "package.unmask"),
1343                                         recursive=1)
1344                                 for x in pkgunmasklines:
1345                                         mycatpkg=dep_getkey(x)
1346                                         if self.punmaskdict.has_key(mycatpkg):
1347                                                 self.punmaskdict[mycatpkg].append(x)
1348                                         else:
1349                                                 self.punmaskdict[mycatpkg]=[x]
1350
1351                         #getting categories from an external file now
1352                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1353                         self.categories = stack_lists(categories, incremental=1)
1354                         del categories
1355
1356                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1357                         archlist = stack_lists(archlist, incremental=1)
1358                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1359
1360                         #package.mask
1361                         pkgmasklines = []
1362                         for x in pmask_locations:
1363                                 pkgmasklines.append(grabfile_package(
1364                                         os.path.join(x, "package.mask"), recursive=1))
1365                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1366
1367                         self.pmaskdict = {}
1368                         for x in pkgmasklines:
1369                                 mycatpkg=dep_getkey(x)
1370                                 if self.pmaskdict.has_key(mycatpkg):
1371                                         self.pmaskdict[mycatpkg].append(x)
1372                                 else:
1373                                         self.pmaskdict[mycatpkg]=[x]
1374
1375                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1376                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1377                         has_invalid_data = False
1378                         for x in range(len(pkgprovidedlines)-1, -1, -1):
1379                                 myline = pkgprovidedlines[x]
1380                                 if not isvalidatom("=" + myline):
1381                                         writemsg("Invalid package name in package.provided:" + \
1382                                                 " %s\n" % myline, noiselevel=-1)
1383                                         has_invalid_data = True
1384                                         del pkgprovidedlines[x]
1385                                         continue
1386                                 cpvr = catpkgsplit(pkgprovidedlines[x])
1387                                 if not cpvr or cpvr[0] == "null":
1388                                         writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1389                                                 noiselevel=-1)
1390                                         has_invalid_data = True
1391                                         del pkgprovidedlines[x]
1392                                         continue
1393                                 if cpvr[0] == "virtual":
1394                                         writemsg("Virtual package in package.provided: %s\n" % \
1395                                                 myline, noiselevel=-1)
1396                                         has_invalid_data = True
1397                                         del pkgprovidedlines[x]
1398                                         continue
1399                         if has_invalid_data:
1400                                 writemsg("See portage(5) for correct package.provided usage.\n",
1401                                         noiselevel=-1)
1402                         self.pprovideddict = {}
1403                         for x in pkgprovidedlines:
1404                                 cpv=catpkgsplit(x)
1405                                 if not x:
1406                                         continue
1407                                 mycatpkg=dep_getkey(x)
1408                                 if self.pprovideddict.has_key(mycatpkg):
1409                                         self.pprovideddict[mycatpkg].append(x)
1410                                 else:
1411                                         self.pprovideddict[mycatpkg]=[x]
1412
1413                         # reasonable defaults; this is important as without USE_ORDER,
1414                         # USE will always be "" (nothing set)!
1415                         if "USE_ORDER" not in self:
1416                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1417
1418                         self["PORTAGE_GID"] = str(portage_gid)
1419                         self.backup_changes("PORTAGE_GID")
1420
1421                         if self.get("PORTAGE_DEPCACHEDIR", None):
1422                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1423                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1424                         self.backup_changes("PORTAGE_DEPCACHEDIR")
1425
1426                         overlays = self.get("PORTDIR_OVERLAY","").split()
1427                         if overlays:
1428                                 new_ov = []
1429                                 for ov in overlays:
1430                                         ov = normalize_path(ov)
1431                                         if os.path.isdir(ov):
1432                                                 new_ov.append(ov)
1433                                         else:
1434                                                 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1435                                                         " (not a dir): '%s'\n" % ov, noiselevel=-1)
1436                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1437                                 self.backup_changes("PORTDIR_OVERLAY")
1438
1439                         if "CBUILD" not in self and "CHOST" in self:
1440                                 self["CBUILD"] = self["CHOST"]
1441                                 self.backup_changes("CBUILD")
1442
1443                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1444                         self.backup_changes("PORTAGE_BIN_PATH")
1445                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1446                         self.backup_changes("PORTAGE_PYM_PATH")
1447
1448                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1449                                 try:
1450                                         self[var] = str(int(self.get(var, "0")))
1451                                 except ValueError:
1452                                         writemsg(("!!! %s='%s' is not a valid integer.  " + \
1453                                                 "Falling back to '0'.\n") % (var, self[var]),
1454                                                 noiselevel=-1)
1455                                         self[var] = "0"
1456                                 self.backup_changes(var)
1457
1458                         self.regenerate()
1459                         self.features = portage_util.unique_array(self["FEATURES"].split())
1460
1461                         if "gpg" in self.features:
1462                                 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1463                                         not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1464                                         writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1465                                                 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1466                                         self.features.remove("gpg")
1467
1468                         if not portage_exec.sandbox_capable and \
1469                                 ("sandbox" in self.features or "usersandbox" in self.features):
1470                                 if self.profile_path is not None and \
1471                                         os.path.realpath(self.profile_path) == \
1472                                         os.path.realpath(PROFILE_PATH):
1473                                         """ Don't show this warning when running repoman and the
1474                                         sandbox feature came from a profile that doesn't belong to
1475                                         the user."""
1476                                         writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1477                                                 " binary. Disabling...\n\n"), noiselevel=-1)
1478                                 if "sandbox" in self.features:
1479                                         self.features.remove("sandbox")
1480                                 if "usersandbox" in self.features:
1481                                         self.features.remove("usersandbox")
1482
1483                         self.features.sort()
1484                         self["FEATURES"] = " ".join(self.features)
1485                         self.backup_changes("FEATURES")
1486
1487                         self._init_dirs()
1488
1489                 if mycpv:
1490                         self.setcpv(mycpv)
1491
1492         def _init_dirs(self):
1493                 """
1494                 Create a few directories that are critical to portage operation
1495                 """
1496                 if not os.access(self["ROOT"], os.W_OK):
1497                         return
1498
1499                 dir_mode_map = {
1500                         "tmp"             :(-1,          01777, 0),
1501                         "var/tmp"         :(-1,          01777, 0),
1502                         "var/lib/portage" :(portage_gid, 02750, 02),
1503                         "var/cache/edb"   :(portage_gid,  0755, 02)
1504                 }
1505
1506                 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1507                         try:
1508                                 mydir = os.path.join(self["ROOT"], mypath)
1509                                 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1510                         except portage_exception.PortageException, e:
1511                                 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1512                                         noiselevel=-1)
1513                                 writemsg("!!! %s\n" % str(e),
1514                                         noiselevel=-1)
1515
1516         def validate(self):
1517                 """Validate miscellaneous settings and display warnings if necessary.
1518                 (This code was previously in the global scope of portage.py)"""
1519
1520                 groups = self["ACCEPT_KEYWORDS"].split()
1521                 archlist = self.archlist()
1522                 if not archlist:
1523                         writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1524                 else:
1525                         for group in groups:
1526                                 if group not in archlist and group[0] != '-':
1527                                         writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1528                                                 noiselevel=-1)
1529
1530                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1531                         PROFILE_PATH.lstrip(os.path.sep))
1532                 if not os.path.islink(abs_profile_path) and \
1533                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1534                         os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1535                         writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1536                                 noiselevel=-1)
1537                         writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1538                         writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1539
1540                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1541                         USER_VIRTUALS_FILE.lstrip(os.path.sep))
1542                 if os.path.exists(abs_user_virtuals):
1543                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1544                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1545                         writemsg("!!! this new location.\n\n")
1546
1547         def loadVirtuals(self,root):
1548                 """Not currently used by portage."""
1549                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1550                 self.getvirtuals(root)
1551
1552         def load_best_module(self,property_string):
1553                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1554                 try:
1555                         mod = load_mod(best_mod)
1556                 except ImportError:
1557                         dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1558                         sys.exit(1)
1559                 return mod
1560
1561         def lock(self):
1562                 self.locked = 1
1563
1564         def unlock(self):
1565                 self.locked = 0
1566
1567         def modifying(self):
1568                 if self.locked:
1569                         raise Exception, "Configuration is locked."
1570
1571         def backup_changes(self,key=None):
1572                 self.modifying()
1573                 if key and self.configdict["env"].has_key(key):
1574                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1575                 else:
1576                         raise KeyError, "No such key defined in environment: %s" % key
1577
1578         def reset(self,keeping_pkg=0,use_cache=1):
1579                 """
1580                 Restore environment from self.backupenv, call self.regenerate()
1581                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1582                 @type keeping_pkg: Boolean
1583                 @param use_cache: Should self.regenerate use the cache or not
1584                 @type use_cache: Boolean
1585                 @rype: None
1586                 """
1587                 self.modifying()
1588                 self.configdict["env"].clear()
1589                 self.configdict["env"].update(self.backupenv)
1590
1591                 self.modifiedkeys = []
1592                 if not keeping_pkg:
1593                         self.mycpv = None
1594                         self.puse = ""
1595                         self.configdict["pkg"].clear()
1596                         self.configdict["pkginternal"].clear()
1597                         self.configdict["defaults"]["USE"] = \
1598                                 " ".join(self.make_defaults_use)
1599                         self.usemask  = set(stack_lists(
1600                                 self.usemask_list, incremental=True))
1601                         self.useforce  = set(stack_lists(
1602                                 self.useforce_list, incremental=True))
1603                 self.regenerate(use_cache=use_cache)
1604
1605         def load_infodir(self,infodir):
1606                 self.modifying()
1607                 if self.configdict.has_key("pkg"):
1608                         for x in self.configdict["pkg"].keys():
1609                                 del self.configdict["pkg"][x]
1610                 else:
1611                         writemsg("No pkg setup for settings instance?\n",
1612                                 noiselevel=-1)
1613                         sys.exit(17)
1614
1615                 if os.path.exists(infodir):
1616                         if os.path.exists(infodir+"/environment"):
1617                                 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1618
1619                         myre = re.compile('^[A-Z]+$')
1620                         null_byte = "\0"
1621                         for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1622                                 if filename == "FEATURES":
1623                                         # FEATURES from the build host shouldn't be interpreted as
1624                                         # FEATURES on the client system.
1625                                         continue
1626                                 if myre.match(filename):
1627                                         try:
1628                                                 file_path = os.path.join(infodir, filename)
1629                                                 mydata = open(file_path).read().strip()
1630                                                 if len(mydata) < 2048 or filename == "USE":
1631                                                         if null_byte in mydata:
1632                                                                 writemsg("!!! Null byte found in metadata " + \
1633                                                                         "file: '%s'\n" % file_path, noiselevel=-1)
1634                                                                 continue
1635                                                         if filename == "USE":
1636                                                                 binpkg_flags = "-* " + mydata
1637                                                                 self.configdict["pkg"][filename] = binpkg_flags
1638                                                                 self.configdict["env"][filename] = mydata
1639                                                         else:
1640                                                                 self.configdict["pkg"][filename] = mydata
1641                                                                 self.configdict["env"][filename] = mydata
1642                                                 # CATEGORY is important because it's used in doebuild
1643                                                 # to infer the cpv.  If it's corrupted, it leads to
1644                                                 # strange errors later on, so we'll validate it and
1645                                                 # print a warning if necessary.
1646                                                 if filename == "CATEGORY":
1647                                                         matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1648                                                         if not matchobj or matchobj.start() != 0 or \
1649                                                                 matchobj.end() != len(mydata):
1650                                                                 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1651                                                                         os.path.join(infodir, filename), noiselevel=-1)
1652                                         except (OSError, IOError):
1653                                                 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1654                                                         noiselevel=-1)
1655                                                 pass
1656                         return 1
1657                 return 0
1658
1659         def setcpv(self, mycpv, use_cache=1, mydb=None):
1660                 """
1661                 Load a particular CPV into the config, this lets us see the
1662                 Default USE flags for a particular ebuild as well as the USE
1663                 flags from package.use.
1664
1665                 @param mycpv: A cpv to load
1666                 @type mycpv: string
1667                 @param use_cache: Enables caching
1668                 @type use_cache: Boolean
1669                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1670                 @type mydb: dbapi or derivative.
1671                 @rtype: None
1672                 """
1673
1674                 self.modifying()
1675                 if self.mycpv == mycpv:
1676                         return
1677                 has_changed = False
1678                 self.mycpv = mycpv
1679                 cp = dep_getkey(mycpv)
1680                 cpv_slot = self.mycpv
1681                 pkginternaluse = ""
1682                 if mydb:
1683                         slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"])
1684                         cpv_slot = "%s:%s" % (self.mycpv, slot)
1685                         pkginternaluse = []
1686                         for x in iuse.split():
1687                                 if x.startswith("+"):
1688                                         pkginternaluse.append(x[1:])
1689                                 elif x.startswith("-"):
1690                                         pkginternaluse.append(x)
1691                         pkginternaluse = " ".join(pkginternaluse)
1692                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1693                         self.configdict["pkginternal"]["USE"] = pkginternaluse
1694                         has_changed = True
1695                 defaults = []
1696                 for i in xrange(len(self.profiles)):
1697                         defaults.append(self.make_defaults_use[i])
1698                         cpdict = self.pkgprofileuse[i].get(cp, None)
1699                         if cpdict:
1700                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1701                                 if best_match:
1702                                         defaults.append(cpdict[best_match])
1703                 defaults = " ".join(defaults)
1704                 if defaults != self.configdict["defaults"].get("USE",""):
1705                         self.configdict["defaults"]["USE"] = defaults
1706                         has_changed = True
1707                 useforce = []
1708                 for i in xrange(len(self.profiles)):
1709                         useforce.append(self.useforce_list[i])
1710                         cpdict = self.puseforce_list[i].get(cp, None)
1711                         if cpdict:
1712                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1713                                 if best_match:
1714                                         useforce.append(cpdict[best_match])
1715                 useforce = set(stack_lists(useforce, incremental=True))
1716                 if useforce != self.useforce:
1717                         self.useforce = useforce
1718                         has_changed = True
1719                 usemask = []
1720                 for i in xrange(len(self.profiles)):
1721                         usemask.append(self.usemask_list[i])
1722                         cpdict = self.pusemask_list[i].get(cp, None)
1723                         if cpdict:
1724                                 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1725                                 if best_match:
1726                                         usemask.append(cpdict[best_match])
1727                 usemask = set(stack_lists(usemask, incremental=True))
1728                 if usemask != self.usemask:
1729                         self.usemask = usemask
1730                         has_changed = True
1731                 oldpuse = self.puse
1732                 self.puse = ""
1733                 cpdict = self.pusedict.get(cp)
1734                 if cpdict:
1735                         self.pusekey = best_match_to_list(cpv_slot, cpdict.keys())
1736                         if self.pusekey:
1737                                 self.puse = " ".join(cpdict[self.pusekey])
1738                 if oldpuse != self.puse:
1739                         has_changed = True
1740                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1741                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
1742                 # CATEGORY is essential for doebuild calls
1743                 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1744                 if has_changed:
1745                         self.reset(keeping_pkg=1,use_cache=use_cache)
1746
1747         def setinst(self,mycpv,mydbapi):
1748                 self.modifying()
1749                 if len(self.virtuals) == 0:
1750                         self.getvirtuals()
1751                 # Grab the virtuals this package provides and add them into the tree virtuals.
1752                 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1753                 if isinstance(mydbapi, portdbapi):
1754                         myuse = self["USE"]
1755                 else:
1756                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1757                 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1758
1759                 cp = dep_getkey(mycpv)
1760                 for virt in virts:
1761                         virt = dep_getkey(virt)
1762                         if not self.treeVirtuals.has_key(virt):
1763                                 self.treeVirtuals[virt] = []
1764                         # XXX: Is this bad? -- It's a permanent modification
1765                         if cp not in self.treeVirtuals[virt]:
1766                                 self.treeVirtuals[virt].append(cp)
1767
1768                 self.virtuals = self.__getvirtuals_compile()
1769
1770
1771         def regenerate(self,useonly=0,use_cache=1):
1772                 """
1773                 Regenerate settings
1774                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1775                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1776                 variables.  This also updates the env.d configdict; useful in case an ebuild
1777                 changes the environment.
1778
1779                 If FEATURES has already stacked, it is not stacked twice.
1780
1781                 @param useonly: Only regenerate USE flags (not any other incrementals)
1782                 @type useonly: Boolean
1783                 @param use_cache: Enable Caching (only for autouse)
1784                 @type use_cache: Boolean
1785                 @rtype: None
1786                 """
1787
1788                 self.modifying()
1789                 if self.already_in_regenerate:
1790                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1791                         writemsg("!!! Looping in regenerate.\n",1)
1792                         return
1793                 else:
1794                         self.already_in_regenerate = 1
1795
1796                 # We grab the latest profile.env here since it changes frequently.
1797                 self.configdict["env.d"].clear()
1798                 env_d = getconfig(
1799                         os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1800                 if env_d:
1801                         # env_d will be None if profile.env doesn't exist.
1802                         self.configdict["env.d"].update(env_d)
1803
1804                 if useonly:
1805                         myincrementals=["USE"]
1806                 else:
1807                         myincrementals = self.incrementals
1808                 myincrementals = set(myincrementals)
1809                 # If self.features exists, it has already been stacked and may have
1810                 # been mutated, so don't stack it again or else any mutations will be
1811                 # reverted.
1812                 if "FEATURES" in myincrementals and hasattr(self, "features"):
1813                         myincrementals.remove("FEATURES")
1814
1815                 if "USE" in myincrementals:
1816                         # Process USE last because it depends on USE_EXPAND which is also
1817                         # an incremental!
1818                         myincrementals.remove("USE")
1819
1820                 for mykey in myincrementals:
1821
1822                         mydbs=self.configlist[:-1]
1823
1824                         myflags=[]
1825                         for curdb in mydbs:
1826                                 if mykey not in curdb:
1827                                         continue
1828                                 #variables are already expanded
1829                                 mysplit = curdb[mykey].split()
1830
1831                                 for x in mysplit:
1832                                         if x=="-*":
1833                                                 # "-*" is a special "minus" var that means "unset all settings".
1834                                                 # so USE="-* gnome" will have *just* gnome enabled.
1835                                                 myflags = []
1836                                                 continue
1837
1838                                         if x[0]=="+":
1839                                                 # Not legal. People assume too much. Complain.
1840                                                 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1841                                                         noiselevel=-1)
1842                                                 x=x[1:]
1843                                                 if not x:
1844                                                         continue
1845
1846                                         if (x[0]=="-"):
1847                                                 if (x[1:] in myflags):
1848                                                         # Unset/Remove it.
1849                                                         del myflags[myflags.index(x[1:])]
1850                                                 continue
1851
1852                                         # We got here, so add it now.
1853                                         if x not in myflags:
1854                                                 myflags.append(x)
1855
1856                         myflags.sort()
1857                         #store setting in last element of configlist, the original environment:
1858                         if myflags or mykey in self:
1859                                 self.configlist[-1][mykey] = " ".join(myflags)
1860                         del myflags
1861
1862                 # Do the USE calculation last because it depends on USE_EXPAND.
1863                 if "auto" in self["USE_ORDER"].split(":"):
1864                         self.configdict["auto"]["USE"] = autouse(
1865                                 vartree(root=self["ROOT"], categories=self.categories,
1866                                         settings=self),
1867                                 use_cache=use_cache, mysettings=self)
1868                 else:
1869                         self.configdict["auto"]["USE"] = ""
1870
1871                 use_expand = self.get("USE_EXPAND", "").split()
1872
1873                 if not self.uvlist:
1874                         for x in self["USE_ORDER"].split(":"):
1875                                 if x in self.configdict:
1876                                         self.uvlist.append(self.configdict[x])
1877                         self.uvlist.reverse()
1878
1879                 myflags = set()
1880                 for curdb in self.uvlist:
1881                         cur_use_expand = [x for x in use_expand if x in curdb]
1882                         mysplit = curdb.get("USE", "").split()
1883                         if not mysplit and not cur_use_expand:
1884                                 continue
1885                         for x in mysplit:
1886                                 if x == "-*":
1887                                         myflags.clear()
1888                                         continue
1889
1890                                 if x[0] == "+":
1891                                         writemsg(colorize("BAD", "USE flags should not start " + \
1892                                                 "with a '+': %s\n" % x), noiselevel=-1)
1893                                         x = x[1:]
1894                                         if not x:
1895                                                 continue
1896
1897                                 if x[0] == "-":
1898                                         myflags.discard(x[1:])
1899                                         continue
1900
1901                                 myflags.add(x)
1902
1903                         for var in cur_use_expand:
1904                                 var_lower = var.lower()
1905                                 is_not_incremental = var not in myincrementals
1906                                 if is_not_incremental:
1907                                         prefix = var_lower + "_"
1908                                         for x in list(myflags):
1909                                                 if x.startswith(prefix):
1910                                                         myflags.remove(x)
1911                                 for x in curdb[var].split():
1912                                         if x[0] == "+":
1913                                                 if is_not_incremental:
1914                                                         writemsg(colorize("BAD", "Invalid '+' " + \
1915                                                                 "operator in non-incremental variable " + \
1916                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1917                                                         continue
1918                                                 else:
1919                                                         writemsg(colorize("BAD", "Invalid '+' " + \
1920                                                                 "operator in incremental variable " + \
1921                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1922                                                 x = x[1:]
1923                                         if x[0] == "-":
1924                                                 if is_not_incremental:
1925                                                         writemsg(colorize("BAD", "Invalid '-' " + \
1926                                                                 "operator in non-incremental variable " + \
1927                                                                  "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1928                                                         continue
1929                                                 myflags.discard(var_lower + "_" + x[1:])
1930                                                 continue
1931                                         myflags.add(var_lower + "_" + x)
1932
1933                 myflags.update(self.useforce)
1934
1935                 # FEATURES=test should imply USE=test
1936                 if "test" in self.configlist[-1].get("FEATURES","").split():
1937                         myflags.add("test")
1938                         if self.get("EBUILD_FORCE_TEST") == "1":
1939                                 self.usemask.discard("test")
1940
1941                 usesplit = [ x for x in myflags if \
1942                         x not in self.usemask]
1943
1944                 usesplit.sort()
1945
1946                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1947                 # that they are consistent.
1948                 for var in use_expand:
1949                         prefix = var.lower() + "_"
1950                         prefix_len = len(prefix)
1951                         expand_flags = set([ x[prefix_len:] for x in usesplit \
1952                                 if x.startswith(prefix) ])
1953                         var_split = self.get(var, "").split()
1954                         # Preserve the order of var_split because it can matter for things
1955                         # like LINGUAS.
1956                         var_split = [ x for x in var_split if x in expand_flags ]
1957                         var_split.extend(expand_flags.difference(var_split))
1958                         if var_split or var in self:
1959                                 # Don't export empty USE_EXPAND vars unless the user config
1960                                 # exports them as empty.  This is required for vars such as
1961                                 # LINGUAS, where unset and empty have different meanings.
1962                                 self[var] = " ".join(var_split)
1963
1964                 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1965                 if self.configdict["defaults"].has_key("ARCH"):
1966                         if self.configdict["defaults"]["ARCH"]:
1967                                 if self.configdict["defaults"]["ARCH"] not in usesplit:
1968                                         usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1969
1970                 self.configlist[-1]["USE"]= " ".join(usesplit)
1971
1972                 self.already_in_regenerate = 0
1973
1974         def get_virts_p(self, myroot):
1975                 if self.virts_p:
1976                         return self.virts_p
1977                 virts = self.getvirtuals(myroot)
1978                 if virts:
1979                         myvkeys = virts.keys()
1980                         for x in myvkeys:
1981                                 vkeysplit = x.split("/")
1982                                 if not self.virts_p.has_key(vkeysplit[1]):
1983                                         self.virts_p[vkeysplit[1]] = virts[x]
1984                 return self.virts_p
1985
1986         def getvirtuals(self, myroot=None):
1987                 """myroot is now ignored because, due to caching, it has always been
1988                 broken for all but the first call."""
1989                 myroot = self["ROOT"]
1990                 if self.virtuals:
1991                         return self.virtuals
1992
1993                 virtuals_list = []
1994                 for x in self.profiles:
1995                         virtuals_file = os.path.join(x, "virtuals")
1996                         virtuals_dict = grabdict(virtuals_file)
1997                         for k in virtuals_dict.keys():
1998                                 if not isvalidatom(k) or dep_getkey(k) != k:
1999                                         writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2000                                                 (virtuals_file, k), noiselevel=-1)
2001                                         del virtuals_dict[k]
2002                                         continue
2003                                 myvalues = virtuals_dict[k]
2004                                 for x in myvalues:
2005                                         myatom = x
2006                                         if x.startswith("-"):
2007                                                 # allow incrementals
2008                                                 myatom = x[1:]
2009                                         if not isvalidatom(myatom):
2010                                                 writemsg("--- Invalid atom in %s: %s\n" % \
2011                                                         (virtuals_file, x), noiselevel=-1)
2012                                                 myvalues.remove(x)
2013                                 if not myvalues:
2014                                         del virtuals_dict[k]
2015                         if virtuals_dict:
2016                                 virtuals_list.append(virtuals_dict)
2017
2018                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2019                 del virtuals_list
2020
2021                 for virt in self.dirVirtuals:
2022                         # Preference for virtuals decreases from left to right.
2023                         self.dirVirtuals[virt].reverse()
2024
2025                 # Repoman does not use user or tree virtuals.
2026                 if self.local_config and not self.treeVirtuals:
2027                         temp_vartree = vartree(myroot, None,
2028                                 categories=self.categories, settings=self)
2029                         # Reduce the provides into a list by CP.
2030                         self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2031
2032                 self.virtuals = self.__getvirtuals_compile()
2033                 return self.virtuals
2034
2035         def __getvirtuals_compile(self):
2036                 """Stack installed and profile virtuals.  Preference for virtuals
2037                 decreases from left to right.
2038                 Order of preference:
2039                 1. installed and in profile
2040                 2. installed only
2041                 3. profile only
2042                 """
2043
2044                 # Virtuals by profile+tree preferences.
2045                 ptVirtuals   = {}
2046
2047                 for virt, installed_list in self.treeVirtuals.iteritems():
2048                         profile_list = self.dirVirtuals.get(virt, None)
2049                         if not profile_list:
2050                                 continue
2051                         for cp in installed_list:
2052                                 if cp in profile_list:
2053                                         ptVirtuals.setdefault(virt, [])
2054                                         ptVirtuals[virt].append(cp)
2055
2056                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2057                         self.dirVirtuals])
2058                 return virtuals
2059
2060         def __delitem__(self,mykey):
2061                 self.modifying()
2062                 for x in self.lookuplist:
2063                         if x != None:
2064                                 if mykey in x:
2065                                         del x[mykey]
2066
2067         def __getitem__(self,mykey):
2068                 match = ''
2069                 for x in self.lookuplist:
2070                         if x is None:
2071                                 writemsg("!!! lookuplist is null.\n")
2072                         elif x.has_key(mykey):
2073                                 match = x[mykey]
2074                                 break
2075                 return match
2076
2077         def has_key(self,mykey):
2078                 for x in self.lookuplist:
2079                         if x.has_key(mykey):
2080                                 return 1
2081                 return 0
2082
2083         def __contains__(self, mykey):
2084                 """Called to implement membership test operators (in and not in)."""
2085                 return bool(self.has_key(mykey))
2086
2087         def setdefault(self, k, x=None):
2088                 if k in self:
2089                         return self[k]
2090                 else:
2091                         self[k] = x
2092                         return x
2093
2094         def get(self, k, x=None):
2095                 if k in self:
2096                         return self[k]
2097                 else:
2098                         return x
2099
2100         def keys(self):
2101                 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2102
2103         def __setitem__(self,mykey,myvalue):
2104                 "set a value; will be thrown away at reset() time"
2105                 if type(myvalue) != types.StringType:
2106                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2107                 self.modifying()
2108                 self.modifiedkeys += [mykey]
2109                 self.configdict["env"][mykey]=myvalue
2110
2111         def environ(self):
2112                 "return our locally-maintained environment"
2113                 mydict={}
2114                 for x in self.keys():
2115                         myvalue = self[x]
2116                         if not isinstance(myvalue, basestring):
2117                                 writemsg("!!! Non-string value in config: %s=%s\n" % \
2118                                         (x, myvalue), noiselevel=-1)
2119                                 continue
2120                         mydict[x] = myvalue
2121                 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2122                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2123                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2124
2125                 return mydict
2126
2127         def thirdpartymirrors(self):
2128                 if getattr(self, "_thirdpartymirrors", None) is None:
2129                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2130                         for x in self["PORTDIR_OVERLAY"].split():
2131                                 profileroots.insert(0, os.path.join(x, "profiles"))
2132                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2133                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2134                 return self._thirdpartymirrors
2135
2136         def archlist(self):
2137                 return flatten([[myarch, "~" + myarch] \
2138                         for myarch in self["PORTAGE_ARCHLIST"].split()])
2139
2140         def selinux_enabled(self):
2141                 if getattr(self, "_selinux_enabled", None) is None:
2142                         self._selinux_enabled = 0
2143                         if "selinux" in self["USE"].split():
2144                                 if "selinux" in globals():
2145                                         if selinux.is_selinux_enabled() == 1:
2146                                                 self._selinux_enabled = 1
2147                                         else:
2148                                                 self._selinux_enabled = 0
2149                                 else:
2150                                         writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2151                                                 noiselevel=-1)
2152                                         self._selinux_enabled = 0
2153                         if self._selinux_enabled == 0:
2154                                 try:    
2155                                         del sys.modules["selinux"]
2156                                 except KeyError:
2157                                         pass
2158                 return self._selinux_enabled
2159
2160 # XXX This would be to replace getstatusoutput completely.
2161 # XXX Issue: cannot block execution. Deadlock condition.
2162 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2163         """
2164         Spawn a subprocess with extra portage-specific options.
2165         Optiosn include:
2166
2167         Sandbox: Sandbox means the spawned process will be limited in its ability t
2168         read and write files (normally this means it is restricted to ${IMAGE}/)
2169         SElinux Sandbox: Enables sandboxing on SElinux
2170         Reduced Privileges: Drops privilages such that the process runs as portage:portage
2171         instead of as root.
2172
2173         Notes: os.system cannot be used because it messes with signal handling.  Instead we
2174         use the portage_exec spawn* family of functions.
2175
2176         This function waits for the process to terminate.
2177
2178         @param mystring: Command to run
2179         @type mystring: String
2180         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2181         @type mysettings: Dictionary or config instance
2182         @param debug: Ignored
2183         @type debug: Boolean
2184         @param free: Enable sandboxing for this process
2185         @type free: Boolean
2186         @param droppriv: Drop to portage:portage when running this command
2187         @type droppriv: Boolean
2188         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2189         @type sesandbox: Boolean
2190         @param keywords: Extra options encoded as a dict, to be passed to spawn
2191         @type keywords: Dictionary
2192         @rtype: Integer
2193         @returns:
2194         1. The return code of the spawned process.
2195         """
2196
2197         if type(mysettings) == types.DictType:
2198                 env=mysettings
2199                 keywords["opt_name"]="[ %s ]" % "portage"
2200         else:
2201                 check_config_instance(mysettings)
2202                 env=mysettings.environ()
2203                 keywords["opt_name"]="[%s]" % mysettings["PF"]
2204
2205         # The default policy for the sesandbox domain only allows entry (via exec)
2206         # from shells and from binaries that belong to portage (the number of entry
2207         # points is minimized).  The "tee" binary is not among the allowed entry
2208         # points, so it is spawned outside of the sesandbox domain and reads from a
2209         # pipe between two domains.
2210         logfile = keywords.get("logfile")
2211         mypids = []
2212         pw = None
2213         if logfile:
2214                 del keywords["logfile"]
2215                 fd_pipes = keywords.get("fd_pipes")
2216                 if fd_pipes is None:
2217                         fd_pipes = {0:0, 1:1, 2:2}
2218                 elif 1 not in fd_pipes or 2 not in fd_pipes:
2219                         raise ValueError(fd_pipes)
2220                 pr, pw = os.pipe()
2221                 mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
2222                          returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
2223                 os.close(pr)
2224                 fd_pipes[1] = pw
2225                 fd_pipes[2] = pw
2226                 keywords["fd_pipes"] = fd_pipes
2227
2228         features = mysettings.features
2229         # XXX: Negative RESTRICT word
2230         droppriv=(droppriv and ("userpriv" in features) and not \
2231                 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2232                  ("userpriv" in mysettings["RESTRICT"].split())))
2233
2234         if droppriv and not uid and portage_gid and portage_uid:
2235                 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2236
2237         if not free:
2238                 free=((droppriv and "usersandbox" not in features) or \
2239                         (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2240
2241         if free:
2242                 keywords["opt_name"] += " bash"
2243                 spawn_func = portage_exec.spawn_bash
2244         else:
2245                 keywords["opt_name"] += " sandbox"
2246                 spawn_func = portage_exec.spawn_sandbox
2247
2248         if sesandbox:
2249                 con = selinux.getcontext()
2250                 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2251                 selinux.setexec(con)
2252
2253         returnpid = keywords.get("returnpid")
2254         keywords["returnpid"] = True
2255         try:
2256                 mypids.extend(spawn_func(mystring, env=env, **keywords))
2257         finally:
2258                 if pw:
2259                         os.close(pw)
2260                 if sesandbox:
2261                         selinux.setexec(None)
2262
2263         if returnpid:
2264                 return mypids
2265
2266         while mypids:
2267                 pid = mypids.pop(0)
2268                 retval = os.waitpid(pid, 0)[1]
2269                 portage_exec.spawned_pids.remove(pid)
2270                 if retval != os.EX_OK:
2271                         for pid in mypids:
2272                                 if os.waitpid(pid, os.WNOHANG) == (0,0):
2273                                         import signal
2274                                         os.kill(pid, signal.SIGTERM)
2275                                         os.waitpid(pid, 0)
2276                                 portage_exec.spawned_pids.remove(pid)
2277                         if retval & 0xff:
2278                                 return (retval & 0xff) << 8
2279                         return retval >> 8
2280         return os.EX_OK
2281
2282 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2283         "fetch files.  Will use digest file if available."
2284
2285         features = mysettings.features
2286         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2287         if ("mirror" in mysettings["RESTRICT"].split()) or \
2288            ("nomirror" in mysettings["RESTRICT"].split()):
2289                 if ("mirror" in features) and ("lmirror" not in features):
2290                         # lmirror should allow you to bypass mirror restrictions.
2291                         # XXX: This is not a good thing, and is temporary at best.
2292                         print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2293                         return 1
2294
2295         thirdpartymirrors = mysettings.thirdpartymirrors()
2296
2297         check_config_instance(mysettings)
2298
2299         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2300                 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2301
2302         mymirrors=[]
2303
2304         if listonly or ("distlocks" not in features):
2305                 use_locks = 0
2306
2307         fetch_to_ro = 0
2308         if "skiprocheck" in features:
2309                 fetch_to_ro = 1
2310
2311         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2312                 if use_locks:
2313                         writemsg(red("!!! For fetching to a read-only filesystem, " + \
2314                                 "locking should be turned off.\n"), noiselevel=-1)
2315                         writemsg("!!! This can be done by adding -distlocks to " + \
2316                                 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2317 #                       use_locks = 0
2318
2319         # local mirrors are always added
2320         if custommirrors.has_key("local"):
2321                 mymirrors += custommirrors["local"]
2322
2323         if ("nomirror" in mysettings["RESTRICT"].split()) or \
2324            ("mirror"   in mysettings["RESTRICT"].split()):
2325                 # We don't add any mirrors.
2326                 pass
2327         else:
2328                 if try_mirrors:
2329                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2330
2331         pkgdir = mysettings.get("O")
2332         if pkgdir:
2333                 mydigests = Manifest(
2334                         pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
2335         else:
2336                 # no digests because fetch was not called for a specific package
2337                 mydigests = {}
2338
2339         fsmirrors = []
2340         for x in range(len(mymirrors)-1,-1,-1):
2341                 if mymirrors[x] and mymirrors[x][0]=='/':
2342                         fsmirrors += [mymirrors[x]]
2343                         del mymirrors[x]
2344
2345         restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2346         custom_local_mirrors = custommirrors.get("local", [])
2347         if restrict_fetch:
2348                 # With fetch restriction, a normal uri may only be fetched from
2349                 # custom local mirrors (if available).  A mirror:// uri may also
2350                 # be fetched from specific mirrors (effectively overriding fetch
2351                 # restriction, but only for specific mirrors).
2352                 locations = custom_local_mirrors
2353         else:
2354                 locations = mymirrors
2355
2356         filedict={}
2357         primaryuri_indexes={}
2358         for myuri in myuris:
2359                 myfile=os.path.basename(myuri)
2360                 if not filedict.has_key(myfile):
2361                         filedict[myfile]=[]
2362                         for y in range(0,len(locations)):
2363                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2364                 if myuri[:9]=="mirror://":
2365                         eidx = myuri.find("/", 9)
2366                         if eidx != -1:
2367                                 mirrorname = myuri[9:eidx]
2368
2369                                 # Try user-defined mirrors first
2370                                 if custommirrors.has_key(mirrorname):
2371                                         for cmirr in custommirrors[mirrorname]:
2372                                                 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2373                                                 # remove the mirrors we tried from the list of official mirrors
2374                                                 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2375                                                         thirdpartymirrors[mirrorname].remove(cmirr)
2376                                 # now try the official mirrors
2377                                 if thirdpartymirrors.has_key(mirrorname):
2378                                         shuffle(thirdpartymirrors[mirrorname])
2379
2380                                         for locmirr in thirdpartymirrors[mirrorname]:
2381                                                 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2382
2383                                 if not filedict[myfile]:
2384                                         writemsg("No known mirror by the name: %s\n" % (mirrorname))
2385                         else:
2386                                 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2387                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
2388                 else:
2389                         if restrict_fetch:
2390                                 # Only fetch from specific mirrors is allowed.
2391                                 continue
2392                         if "primaryuri" in mysettings["RESTRICT"].split():
2393                                 # Use the source site first.
2394                                 if primaryuri_indexes.has_key(myfile):
2395                                         primaryuri_indexes[myfile] += 1
2396                                 else:
2397                                         primaryuri_indexes[myfile] = 0
2398                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2399                         else:
2400                                 filedict[myfile].append(myuri)
2401
2402         can_fetch=True
2403
2404         if listonly:
2405                 can_fetch = False
2406
2407         for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2408                 if not mysettings.get(var_name, None):
2409                         can_fetch = False
2410
2411         if can_fetch:
2412                 dirmode  = 02070
2413                 filemode =   060
2414                 modemask =    02
2415                 distdir_dirs = [""]
2416                 if "distlocks" in features:
2417                         distdir_dirs.append(".locks")
2418                 try:
2419                         
2420                         for x in distdir_dirs:
2421                                 mydir = os.path.join(mysettings["DISTDIR"], x)
2422                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2423                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2424                                                 noiselevel=-1)
2425                                         def onerror(e):
2426                                                 raise # bail out on the first error that occurs during recursion
2427                                         if not apply_recursive_permissions(mydir,
2428                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2429                                                 filemode=filemode, filemask=modemask, onerror=onerror):
2430                                                 raise portage_exception.OperationNotPermitted(
2431                                                         "Failed to apply recursive permissions for the portage group.")
2432                 except portage_exception.PortageException, e:
2433                         if not os.path.isdir(mysettings["DISTDIR"]):
2434                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2435                                 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2436                                 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2437
2438         if can_fetch and \
2439                 not fetch_to_ro and \
2440                 not os.access(mysettings["DISTDIR"], os.W_OK):
2441                 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2442                         noiselevel=-1)
2443                 can_fetch = False
2444
2445         if can_fetch and use_locks and locks_in_subdir:
2446                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2447                         if not os.access(distlocks_subdir, os.W_OK):
2448                                 writemsg("!!! No write access to write to %s.  Aborting.\n" % distlocks_subdir,
2449                                         noiselevel=-1)
2450                                 return 0
2451                         del distlocks_subdir
2452         for myfile in filedict.keys():
2453                 """
2454                 fetched  status
2455                 0        nonexistent
2456                 1        partially downloaded
2457                 2        completely downloaded
2458                 """
2459                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2460                 fetched=0
2461                 file_lock = None
2462                 if listonly:
2463                         writemsg_stdout("\n", noiselevel=-1)
2464                 else:
2465                         if use_locks and can_fetch:
2466                                 waiting_msg = None
2467                                 if "parallel-fetch" in features:
2468                                         waiting_msg = ("Downloading '%s'... " + \
2469                                                 "see /var/log/emerge-fetch.log for details.") % myfile
2470                                 if locks_in_subdir:
2471                                         file_lock = portage_locks.lockfile(
2472                                                 os.path.join(mysettings["DISTDIR"],
2473                                                 locks_in_subdir, myfile), wantnewlockfile=1,
2474                                                 waiting_msg=waiting_msg)
2475                                 else:
2476                                         file_lock = portage_locks.lockfile(
2477                                                 myfile_path, wantnewlockfile=1,
2478                                                 waiting_msg=waiting_msg)
2479                 try:
2480                         if not listonly:
2481                                 if fsmirrors and not os.path.exists(myfile_path):
2482                                         for mydir in fsmirrors:
2483                                                 mirror_file = os.path.join(mydir, myfile)
2484                                                 try:
2485                                                         shutil.copyfile(mirror_file, myfile_path)
2486                                                         writemsg(_("Local mirror has file:" + \
2487                                                                 " %(file)s\n" % {"file":myfile}))
2488                                                         break
2489                                                 except (IOError, OSError), e:
2490                                                         if e.errno != errno.ENOENT:
2491                                                                 raise
2492                                                         del e
2493
2494                                 try:
2495                                         mystat = os.stat(myfile_path)
2496                                 except OSError, e:
2497                                         if e.errno != errno.ENOENT:
2498                                                 raise
2499                                         del e
2500                                 else:
2501                                         try:
2502                                                 apply_secpass_permissions(
2503                                                         myfile_path, gid=portage_gid, mode=0664, mask=02,
2504                                                         stat_cached=mystat)
2505                                         except portage_exception.PortageException, e:
2506                                                 if not os.access(myfile_path, os.R_OK):
2507                                                         writemsg("!!! Failed to adjust permissions:" + \
2508                                                                 " %s\n" % str(e), noiselevel=-1)
2509                                         if myfile not in mydigests:
2510                                                 # We don't have a digest, but the file exists.  We must
2511                                                 # assume that it is fully downloaded.
2512                                                 continue
2513                                         else:
2514                                                 if mystat.st_size < mydigests[myfile]["size"] and \
2515                                                         not restrict_fetch:
2516                                                         fetched = 1 # Try to resume this download.
2517                                                 else:
2518                                                         verified_ok, reason = portage_checksum.verify_all(
2519                                                                 myfile_path, mydigests[myfile])
2520                                                         if not verified_ok:
2521                                                                 writemsg("!!! Previously fetched" + \
2522                                                                         " file: '%s'\n" % myfile, noiselevel=-1)
2523                                                                 writemsg("!!! Reason: %s\n" % reason[0],
2524                                                                         noiselevel=-1)
2525                                                                 writemsg(("!!! Got:      %s\n" + \
2526                                                                         "!!! Expected: %s\n") % \
2527                                                                         (reason[1], reason[2]), noiselevel=-1)
2528                                                                 if reason[0] == "Insufficient data for checksum verification":
2529                                                                         return 0
2530                                                                 if can_fetch and not restrict_fetch:
2531                                                                         writemsg("Refetching...\n\n",
2532                                                                                 noiselevel=-1)
2533                                                                         os.unlink(myfile_path)
2534                                                         else:
2535                                                                 eout = output.EOutput()
2536                                                                 eout.quiet = \
2537                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
2538                                                                 for digest_name in mydigests[myfile]:
2539                                                                         eout.ebegin(
2540                                                                                 "%s %s ;-)" % (myfile, digest_name))
2541                                                                         eout.eend(0)
2542                                                                 continue # fetch any remaining files
2543
2544                         for loc in filedict[myfile]:
2545                                 if listonly:
2546                                         writemsg_stdout(loc+" ", noiselevel=-1)
2547                                         continue
2548                                 # allow different fetchcommands per protocol
2549                                 protocol = loc[0:loc.find("://")]
2550                                 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2551                                         fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2552                                 else:
2553                                         fetchcommand=mysettings["FETCHCOMMAND"]
2554                                 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2555                                         resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2556                                 else:
2557                                         resumecommand=mysettings["RESUMECOMMAND"]
2558
2559                                 if not can_fetch:
2560                                         if fetched != 2:
2561                                                 if fetched == 0:
2562                                                         writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2563                                                                 noiselevel=-1)
2564                                                 else:
2565                                                         writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2566                                                                 noiselevel=-1)
2567                                                 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2568                                                         if not mysettings.get(var_name, None):
2569                                                                 writemsg(("!!! %s is unset.  It should " + \
2570                                                                 "have been defined in /etc/make.globals.\n") \
2571                                                                  % var_name, noiselevel=-1)
2572                                                 return 0
2573                                         else:
2574                                                 continue
2575
2576                                 if fetched != 2:
2577                                         #we either need to resume or start the download
2578                                         #you can't use "continue" when you're inside a "try" block
2579                                         if fetched==1:
2580                                                 #resume mode:
2581                                                 writemsg(">>> Resuming download...\n")
2582                                                 locfetch=resumecommand
2583                                         else:
2584                                                 #normal mode:
2585                                                 locfetch=fetchcommand
2586                                         writemsg_stdout(">>> Downloading '%s'\n" % \
2587                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2588                                         variables = {
2589                                                 "DISTDIR": mysettings["DISTDIR"],
2590                                                 "URI":     loc,
2591                                                 "FILE":    myfile
2592                                         }
2593                                         import shlex, StringIO
2594                                         lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
2595                                         lexer.whitespace_split = True
2596                                         myfetch = [varexpand(x, mydict=variables) for x in lexer]
2597
2598                                         spawn_keywords = {}
2599                                         if "userfetch" in mysettings.features and \
2600                                                 os.getuid() == 0 and portage_gid and portage_uid:
2601                                                 spawn_keywords.update({
2602                                                         "uid"    : portage_uid,
2603                                                         "gid"    : portage_gid,
2604                                                         "groups" : userpriv_groups,
2605                                                         "umask"  : 002})
2606
2607                                         try:
2608
2609                                                 if mysettings.selinux_enabled():
2610                                                         con = selinux.getcontext()
2611                                                         con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2612                                                         selinux.setexec(con)
2613                                                         # bash is an allowed entrypoint, while most binaries are not
2614                                                         myfetch = ["bash", "-c", "exec \"$@\"", myfetch[0]] + myfetch
2615
2616                                                 myret = portage_exec.spawn(myfetch,
2617                                                         env=mysettings.environ(), **spawn_keywords)
2618
2619                                                 if mysettings.selinux_enabled():
2620                                                         selinux.setexec(None)
2621
2622                                         finally:
2623                                                 try:
2624                                                         apply_secpass_permissions(myfile_path,
2625                                                                 gid=portage_gid, mode=0664, mask=02)
2626                                                 except portage_exception.FileNotFound, e:
2627                                                         pass
2628                                                 except portage_exception.PortageException, e:
2629                                                         if not os.access(myfile_path, os.R_OK):
2630                                                                 writemsg("!!! Failed to adjust permissions:" + \
2631                                                                         " %s\n" % str(e), noiselevel=-1)
2632
2633                                         if mydigests!=None and mydigests.has_key(myfile):
2634                                                 try:
2635                                                         mystat = os.stat(myfile_path)
2636                                                 except OSError, e:
2637                                                         if e.errno != errno.ENOENT:
2638                                                                 raise
2639                                                         del e
2640                                                         fetched = 0
2641                                                 else:
2642                                                         # no exception?  file exists. let digestcheck() report
2643                                                         # an appropriately for size or checksum errors
2644                                                         if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2645                                                                 # Fetch failed... Try the next one... Kill 404 files though.
2646                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2647                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2648                                                                         if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2649                                                                                 try:
2650                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2651                                                                                         writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2652                                                                                         fetched = 0
2653                                                                                         continue
2654                                                                                 except (IOError, OSError):
2655                                                                                         pass
2656                                                                 fetched = 1
2657                                                                 continue
2658                                                         if not fetchonly:
2659                                                                 fetched=2
2660                                                                 break
2661                                                         else:
2662                                                                 # File is the correct size--check the checksums for the fetched
2663                                                                 # file NOW, for those users who don't have a stable/continuous
2664                                                                 # net connection. This way we have a chance to try to download
2665                                                                 # from another mirror...
2666                                                                 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2667                                                                 if not verified_ok:
2668                                                                         print reason
2669                                                                         writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2670                                                                                 noiselevel=-1)
2671                                                                         writemsg("!!! Reason: "+reason[0]+"\n",
2672                                                                                 noiselevel=-1)
2673                                                                         writemsg("!!! Got:      %s\n!!! Expected: %s\n" % \
2674                                                                                 (reason[1], reason[2]), noiselevel=-1)
2675                                                                         if reason[0] == "Insufficient data for checksum verification":
2676                                                                                 return 0
2677                                                                         writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2678                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2679                                                                         fetched=0
2680                                                                 else:
2681                                                                         eout = output.EOutput()
2682                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2683                                                                         for x_key in mydigests[myfile].keys():
2684                                                                                 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2685                                                                                 eout.eend(0)
2686                                                                         fetched=2
2687                                                                         break
2688                                         else:
2689                                                 if not myret:
2690                                                         fetched=2
2691                                                         break
2692                                                 elif mydigests!=None:
2693                                                         writemsg("No digest file available and download failed.\n\n",
2694                                                                 noiselevel=-1)
2695                 finally:
2696                         if use_locks and file_lock:
2697                                 portage_locks.unlockfile(file_lock)
2698
2699                 if listonly:
2700                         writemsg_stdout("\n", noiselevel=-1)
2701                 if fetched != 2:
2702                         if restrict_fetch:
2703                                 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2704                                         mysettings["PF"], "has fetch restriction turned on."
2705                                 print "!!! This probably means that this " + \
2706                                         "ebuild's files must be downloaded"
2707                                 print "!!! manually.  See the comments in" + \
2708                                         " the ebuild for more information.\n"
2709                                 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2710                         elif listonly:
2711                                 continue
2712                         elif not filedict[myfile]:
2713                                 writemsg("Warning: No mirrors available for file" + \
2714                                         " '%s'\n" % (myfile), noiselevel=-1)
2715                         else:
2716                                 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2717                                         noiselevel=-1)
2718                         return 0
2719         return 1
2720
2721 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2722         """
2723         Generates a digest file if missing.  Assumes all files are available.
2724         DEPRECATED: this now only is a compability wrapper for 
2725                     portage_manifest.Manifest()
2726         NOTE: manifestonly and overwrite are useless with manifest2 and
2727               are therefore ignored."""
2728         if myportdb is None:
2729                 writemsg("Warning: myportdb not specified to digestgen\n")
2730                 global portdb
2731                 myportdb = portdb
2732         global _doebuild_manifest_exempt_depend
2733         try:
2734                 _doebuild_manifest_exempt_depend += 1
2735                 distfiles_map = {}
2736                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2737                 for cpv in fetchlist_dict:
2738                         try:
2739                                 for myfile in fetchlist_dict[cpv]:
2740                                         distfiles_map.setdefault(myfile, []).append(cpv)
2741                         except portage_exception.InvalidDependString, e:
2742                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2743                                 writemsg("!!! Invalid SRC_URI for '%s'.\n" % cpv, noiselevel=-1)
2744                                 del e
2745                                 return 0
2746                 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
2747                 manifest1_compat = not os.path.exists(
2748                         os.path.join(mytree, "manifest1_obsolete"))
2749                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2750                         fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
2751                 # Don't require all hashes since that can trigger excessive
2752                 # fetches when sufficient digests already exist.  To ease transition
2753                 # while Manifest 1 is being removed, only require hashes that will
2754                 # exist before and after the transition.
2755                 required_hash_types = set()
2756                 required_hash_types.add("size")
2757                 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
2758                 dist_hashes = mf.fhashdict.get("DIST", {})
2759                 missing_hashes = set()
2760                 for myfile in distfiles_map:
2761                         myhashes = dist_hashes.get(myfile)
2762                         if not myhashes:
2763                                 missing_hashes.add(myfile)
2764                                 continue
2765                         if required_hash_types.difference(myhashes):
2766                                 missing_hashes.add(myfile)
2767                 if missing_hashes:
2768                         missing_files = []
2769                         for myfile in missing_hashes:
2770                                 try:
2771                                         os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2772                                 except OSError, e:
2773                                         if e.errno != errno.ENOENT:
2774                                                 raise
2775                                         del e
2776                                         missing_files.append(myfile)
2777                         if missing_files:
2778                                 mytree = os.path.realpath(os.path.dirname(
2779                                         os.path.dirname(mysettings["O"])))
2780                                 fetch_settings = config(clone=mysettings)
2781                                 debug = mysettings.get("PORTAGE_DEBUG") == "1"
2782                                 for myfile in missing_files:
2783                                         success = False
2784                                         for cpv in distfiles_map[myfile]:
2785                                                 myebuild = os.path.join(mysettings["O"],
2786                                                         catsplit(cpv)[1] + ".ebuild")
2787                                                 # for RESTRICT=fetch, mirror, etc...
2788                                                 doebuild_environment(myebuild, "fetch",
2789                                                         mysettings["ROOT"], fetch_settings,
2790                                                         debug, 1, myportdb)
2791                                                 alluris, aalist = myportdb.getfetchlist(
2792                                                         cpv, mytree=mytree, all=True,
2793                                                         mysettings=fetch_settings)
2794                                                 myuris = [uri for uri in alluris \
2795                                                         if os.path.basename(uri) == myfile]
2796                                                 fetch_settings["A"] = myfile # for use by pkg_nofetch()
2797                                                 if fetch(myuris, fetch_settings):
2798                                                         success = True
2799                                                         break
2800                                         if not success:
2801                                                 writemsg(("!!! File %s doesn't exist, can't update " + \
2802                                                         "Manifest\n") % myfile, noiselevel=-1)
2803                                                 return 0
2804                 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2805                 try:
2806                         mf.create(requiredDistfiles=myarchives,
2807                                 assumeDistHashesSometimes=True,
2808                                 assumeDistHashesAlways=(
2809                                 "assume-digests" in mysettings.features))
2810                 except portage_exception.FileNotFound, e:
2811                         writemsg(("!!! File %s doesn't exist, can't update " + \
2812                                 "Manifest\n") % e, noiselevel=-1)
2813                         return 0
2814                 mf.write(sign=False)
2815                 if "assume-digests" not in mysettings.features:
2816                         distlist = mf.fhashdict.get("DIST", {}).keys()
2817                         distlist.sort()
2818                         auto_assumed = []
2819                         for filename in distlist:
2820                                 if not os.path.exists(
2821                                         os.path.join(mysettings["DISTDIR"], filename)):
2822                                         auto_assumed.append(filename)
2823                         if auto_assumed:
2824                                 mytree = os.path.realpath(
2825                                         os.path.dirname(os.path.dirname(mysettings["O"])))
2826                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2827                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
2828                                 pkgs.sort()
2829                                 writemsg_stdout("  digest.assumed" + output.colorize("WARN",
2830                                         str(len(auto_assumed)).rjust(18)) + "\n")
2831                                 for pkg_key in pkgs:
2832                                         fetchlist = myportdb.getfetchlist(pkg_key,
2833                                                 mysettings=mysettings, all=True, mytree=mytree)[1]
2834                                         pv = pkg_key.split("/")[1]
2835                                         for filename in auto_assumed:
2836                                                 if filename in fetchlist:
2837                                                         writemsg_stdout(
2838                                                                 "   digest-%s::%s\n" % (pv, filename))
2839                 return 1
2840         finally:
2841                 _doebuild_manifest_exempt_depend -= 1
2842
2843 def digestParseFile(myfilename, mysettings=None):
2844         """(filename) -- Parses a given file for entries matching:
2845         <checksumkey> <checksum_hex_string> <filename> <filesize>
2846         Ignores lines that don't start with a valid checksum identifier
2847         and returns a dict with the filenames as keys and {checksumkey:checksum}
2848         as the values.
2849         DEPRECATED: this function is now only a compability wrapper for
2850                     portage_manifest.Manifest()."""
2851
2852         mysplit = myfilename.split(os.sep)
2853         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2854                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2855         elif mysplit[-1] == "Manifest":
2856                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2857
2858         if mysettings is None:
2859                 global settings
2860                 mysettings = config(clone=settings)
2861
2862         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2863
2864 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2865         """Verifies checksums.  Assumes all files have been downloaded.
2866         DEPRECATED: this is now only a compability wrapper for 
2867                     portage_manifest.Manifest()."""
2868         if not strict:
2869                 return 1
2870         pkgdir = mysettings["O"]
2871         manifest_path = os.path.join(pkgdir, "Manifest")
2872         if not os.path.exists(manifest_path):
2873                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2874                         noiselevel=-1)
2875                 if strict:
2876                         return 0
2877         mf = Manifest(pkgdir, mysettings["DISTDIR"])
2878         eout = output.EOutput()
2879         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2880         try:
2881                 eout.ebegin("checking ebuild checksums ;-)")
2882                 mf.checkTypeHashes("EBUILD")
2883                 eout.eend(0)
2884                 eout.ebegin("checking auxfile checksums ;-)")
2885                 mf.checkTypeHashes("AUX")
2886                 eout.eend(0)
2887                 eout.ebegin("checking miscfile checksums ;-)")
2888                 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2889                 eout.eend(0)
2890                 for f in myfiles:
2891                         eout.ebegin("checking %s ;-)" % f)
2892                         mf.checkFileHashes(mf.findFile(f), f)
2893                         eout.eend(0)
2894         except KeyError, e:
2895                 eout.eend(1)
2896                 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2897                 return 0
2898         except portage_exception.FileNotFound, e:
2899                 eout.eend(1)
2900                 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2901                         noiselevel=-1)
2902                 return 0
2903         except portage_exception.DigestException, e:
2904                 eout.eend(1)
2905                 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2906                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2907                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2908                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2909                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2910                 return 0
2911         # Make sure that all of the ebuilds are actually listed in the Manifest.
2912         for f in os.listdir(pkgdir):
2913                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2914                         writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2915                                 os.path.join(pkgdir, f), noiselevel=-1)
2916                         return 0
2917         """ epatch will just grab all the patches out of a directory, so we have to
2918         make sure there aren't any foreign files that it might grab."""
2919         filesdir = os.path.join(pkgdir, "files")
2920         for parent, dirs, files in os.walk(filesdir):
2921                 for d in dirs:
2922                         if d.startswith(".") or d == "CVS":
2923                                 dirs.remove(d)
2924                 for f in files:
2925                         if f.startswith("."):
2926                                 continue
2927                         f = os.path.join(parent, f)[len(filesdir) + 1:]
2928                         file_type = mf.findFile(f)
2929                         if file_type != "AUX" and not f.startswith("digest-"):
2930                                 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2931                                         os.path.join(filesdir, f), noiselevel=-1)
2932                                 return 0
2933         return 1
2934
2935 # parse actionmap to spawn ebuild with the appropriate args
2936 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2937         if alwaysdep or "noauto" not in mysettings.features:
2938                 # process dependency first
2939                 if "dep" in actionmap[mydo].keys():
2940                         retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2941                         if retval:
2942                                 return retval
2943         kwargs = actionmap[mydo]["args"]
2944         mysettings["EBUILD_PHASE"] = mydo
2945         phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2946         mysettings["EBUILD_PHASE"] = ""
2947
2948         if not kwargs["droppriv"] and secpass >= 2:
2949                 """ Privileged phases may have left files that need to be made
2950                 writable to a less privileged user."""
2951                 apply_recursive_permissions(mysettings["T"],
2952                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2953                         filemode=060, filemask=0)
2954
2955         if phase_retval == os.EX_OK:
2956                 if mydo == "install":
2957                         # User and group bits that match the "portage" user or group are
2958                         # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2959                         # necessary.  The chown system call may clear S_ISUID and S_ISGID
2960                         # bits, so those bits are restored if necessary.
2961                         inst_uid = int(mysettings["PORTAGE_INST_UID"])
2962                         inst_gid = int(mysettings["PORTAGE_INST_GID"])
2963                         for parent, dirs, files in os.walk(mysettings["D"]):
2964                                 for fname in chain(dirs, files):
2965                                         fpath = os.path.join(parent, fname)
2966                                         mystat = os.lstat(fpath)
2967                                         if mystat.st_uid != portage_uid and \
2968                                                 mystat.st_gid != portage_gid:
2969                                                 continue
2970                                         myuid = -1
2971                                         mygid = -1
2972                                         if mystat.st_uid == portage_uid:
2973                                                 myuid = inst_uid
2974                                         if mystat.st_gid == portage_gid:
2975                                                 mygid = inst_gid
2976                                         apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2977                                                 mode=mystat.st_mode, stat_cached=mystat,
2978                                                 follow_links=False)
2979                         mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2980                         qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2981                         if qa_retval:
2982                                 writemsg("!!! install_qa_check failed; exiting.\n",
2983                                         noiselevel=-1)
2984                         return qa_retval
2985         return phase_retval
2986
2987
2988 def eapi_is_supported(eapi):
2989         return str(eapi).strip() == str(portage_const.EAPI).strip()
2990
2991 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2992
2993         ebuild_path = os.path.abspath(myebuild)
2994         pkg_dir     = os.path.dirname(ebuild_path)
2995
2996         if mysettings.configdict["pkg"].has_key("CATEGORY"):
2997                 cat = mysettings.configdict["pkg"]["CATEGORY"]
2998         else:
2999                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
3000         mypv = os.path.basename(ebuild_path)[:-7]       
3001         mycpv = cat+"/"+mypv
3002         mysplit=pkgsplit(mypv,silent=0)
3003         if mysplit is None:
3004                 raise portage_exception.IncorrectParameter(
3005                         "Invalid ebuild path: '%s'" % myebuild)
3006
3007         if mydo != "depend":
3008                 """For performance reasons, setcpv only triggers reset when it
3009                 detects a package-specific change in config.  For the ebuild
3010                 environment, a reset call is forced in order to ensure that the
3011                 latest env.d variables are used."""
3012                 mysettings.reset(use_cache=use_cache)
3013                 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
3014
3015         mysettings["EBUILD_PHASE"] = mydo
3016
3017         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
3018
3019         # We are disabling user-specific bashrc files.
3020         mysettings["BASH_ENV"] = INVALID_ENV_FILE
3021
3022         if debug: # Otherwise it overrides emerge's settings.
3023                 # We have no other way to set debug... debug can't be passed in
3024                 # due to how it's coded... Don't overwrite this so we can use it.
3025                 mysettings["PORTAGE_DEBUG"] = "1"
3026
3027         mysettings["ROOT"]     = myroot
3028         mysettings["STARTDIR"] = getcwd()
3029
3030         mysettings["EBUILD"]   = ebuild_path
3031         mysettings["O"]        = pkg_dir
3032         mysettings.configdict["pkg"]["CATEGORY"] = cat
3033         mysettings["FILESDIR"] = pkg_dir+"/files"
3034         mysettings["PF"]       = mypv
3035
3036         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
3037         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
3038
3039         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
3040         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
3041         mysettings["PN"] = mysplit[0]
3042         mysettings["PV"] = mysplit[1]
3043         mysettings["PR"] = mysplit[2]
3044
3045         if portage_util.noiselimit < 0:
3046                 mysettings["PORTAGE_QUIET"] = "1"
3047
3048         if mydo != "depend":
3049                 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"]  = \
3050                         mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
3051                 if not eapi_is_supported(eapi):
3052                         # can't do anything with this.
3053                         raise portage_exception.UnsupportedAPIException(mycpv, eapi)
3054                 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
3055                         portage_dep.use_reduce(portage_dep.paren_reduce(
3056                         mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
3057
3058         if mysplit[2] == "r0":
3059                 mysettings["PVR"]=mysplit[1]
3060         else:
3061                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
3062
3063         if mysettings.has_key("PATH"):
3064                 mysplit=mysettings["PATH"].split(":")
3065         else:
3066                 mysplit=[]
3067         if PORTAGE_BIN_PATH not in mysplit:
3068                 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
3069
3070         # Sandbox needs cannonical paths.
3071         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
3072                 mysettings["PORTAGE_TMPDIR"])
3073         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
3074         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
3075         
3076         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
3077         # locations in order to prevent interference.
3078         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
3079                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3080                         mysettings["PKG_TMPDIR"],
3081                         mysettings["CATEGORY"], mysettings["PF"])
3082         else:
3083                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3084                         mysettings["BUILD_PREFIX"],
3085                         mysettings["CATEGORY"], mysettings["PF"])
3086
3087         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
3088         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
3089         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
3090         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
3091
3092         mysettings["PORTAGE_BASHRC"] = os.path.join(
3093                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
3094
3095         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
3096         if (mydo!="depend") or not mysettings.has_key("KV"):
3097                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
3098                 if mykv:
3099                         # Regular source tree
3100                         mysettings["KV"]=mykv
3101                 else:
3102                         mysettings["KV"]=""
3103
3104         # Allow color.map to control colors associated with einfo, ewarn, etc...
3105         mycolors = []
3106         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
3107                 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
3108         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
3109
3110 def prepare_build_dirs(myroot, mysettings, cleanup):
3111
3112         clean_dirs = [mysettings["HOME"]]
3113
3114         # We enable cleanup when we want to make sure old cruft (such as the old
3115         # environment) doesn't interfere with the current phase.
3116         if cleanup:
3117                 clean_dirs.append(mysettings["T"])
3118
3119         for clean_dir in clean_dirs:
3120                 try:
3121                         shutil.rmtree(clean_dir)
3122                 except OSError, oe:
3123                         if errno.ENOENT == oe.errno:
3124                                 pass
3125                         elif errno.EPERM == oe.errno:
3126                                 writemsg("%s\n" % oe, noiselevel=-1)
3127                                 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3128                                         clean_dir, noiselevel=-1)
3129                                 return 1
3130                         else:
3131                                 raise
3132
3133         def makedirs(dir_path):
3134                 try:
3135                         os.makedirs(dir_path)
3136                 except OSError, oe:
3137                         if errno.EEXIST == oe.errno:
3138                                 pass
3139                         elif errno.EPERM == oe.errno:
3140                                 writemsg("%s\n" % oe, noiselevel=-1)
3141                                 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3142                                         dir_path, noiselevel=-1)
3143                                 return False
3144                         else:
3145                                 raise
3146                 return True
3147
3148         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3149
3150         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3151         mydirs.append(os.path.dirname(mydirs[-1]))
3152
3153         try:
3154                 for mydir in mydirs:
3155                         portage_util.ensure_dirs(mydir)
3156                         portage_util.apply_secpass_permissions(mydir,
3157                                 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3158                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3159                         """These directories don't necessarily need to be group writable.
3160                         However, the setup phase is commonly run as a privileged user prior
3161                         to the other phases being run by an unprivileged user.  Currently,
3162                         we use the portage group to ensure that the unprivleged user still
3163                         has write access to these directories in any case."""
3164                         portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3165                         portage_util.apply_secpass_permissions(mysettings[dir_key],
3166                                 uid=portage_uid, gid=portage_gid)
3167         except portage_exception.PermissionDenied, e:
3168                 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3169                 return 1
3170         except portage_exception.OperationNotPermitted, e:
3171                 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3172                 return 1
3173         except portage_exception.FileNotFound, e:
3174                 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3175                 return 1
3176
3177         features_dirs = {
3178                 "ccache":{
3179                         "basedir_var":"CCACHE_DIR",
3180                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3181                         "always_recurse":False},
3182                 "confcache":{
3183                         "basedir_var":"CONFCACHE_DIR",
3184                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3185                         "always_recurse":False},
3186                 "distcc":{
3187                         "basedir_var":"DISTCC_DIR",
3188                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3189                         "subdirs":("lock", "state"),
3190                         "always_recurse":True}
3191         }
3192         dirmode  = 02070
3193         filemode =   060
3194         modemask =    02
3195         for myfeature, kwargs in features_dirs.iteritems():
3196                 if myfeature in mysettings.features:
3197                         basedir = mysettings[kwargs["basedir_var"]]
3198                         if basedir == "":
3199                                 basedir = kwargs["default_dir"]
3200                                 mysettings[kwargs["basedir_var"]] = basedir
3201                         try:
3202                                 mydirs = [mysettings[kwargs["basedir_var"]]]
3203                                 if "subdirs" in kwargs:
3204                                         for subdir in kwargs["subdirs"]:
3205                                                 mydirs.append(os.path.join(basedir, subdir))
3206                                 for mydir in mydirs:
3207                                         modified = portage_util.ensure_dirs(mydir)
3208                                         # Generally, we only want to apply permissions for
3209                                         # initial creation.  Otherwise, we don't know exactly what
3210                                         # permissions the user wants, so should leave them as-is.
3211                                         if modified or kwargs["always_recurse"]:
3212                                                 if modified:
3213                                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3214                                                                 noiselevel=-1)
3215                                                 def onerror(e):
3216                                                         raise   # The feature is disabled if a single error
3217                                                                         # occurs during permissions adjustment.
3218                                                 if not apply_recursive_permissions(mydir,
3219                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3220                                                 filemode=filemode, filemask=modemask, onerror=onerror):
3221                                                         raise portage_exception.OperationNotPermitted(
3222                                                                 "Failed to apply recursive permissions for the portage group.")
3223                         except portage_exception.PortageException, e:
3224                                 mysettings.features.remove(myfeature)
3225                                 mysettings["FEATURES"] = " ".join(mysettings.features)
3226                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3227                                 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3228                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
3229                                 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3230                                         noiselevel=-1)
3231                                 time.sleep(5)
3232
3233         workdir_mode = 0700
3234         try:
3235                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3236                 if mode.isdigit():
3237                         parsed_mode = int(mode, 8)
3238                 elif mode == "":
3239                         raise KeyError()
3240                 else:
3241                         raise ValueError()
3242                 if parsed_mode & 07777 != parsed_mode:
3243                         raise ValueError("Invalid file mode: %s" % mode)
3244                 else:
3245                         workdir_mode = parsed_mode
3246         except KeyError, e:
3247                 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3248         except ValueError, e:
3249                 if len(str(e)) > 0:
3250                         writemsg("%s\n" % e)
3251                 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3252                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3253         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3254         try:
3255                 apply_secpass_permissions(mysettings["WORKDIR"],
3256                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3257         except portage_exception.FileNotFound:
3258                 pass # ebuild.sh will create it
3259
3260         if mysettings.get("PORT_LOGDIR", "") == "":
3261                 while "PORT_LOGDIR" in mysettings:
3262                         del mysettings["PORT_LOGDIR"]
3263         if "PORT_LOGDIR" in mysettings:
3264                 try:
3265                         portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3266                                 uid=portage_uid, gid=portage_gid, mode=02770)
3267                 except portage_exception.PortageException, e:
3268                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3269                         writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3270                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
3271                         writemsg("!!! Disabling logging.\n", noiselevel=-1)
3272                         while "PORT_LOGDIR" in mysettings:
3273                                 del mysettings["PORT_LOGDIR"]
3274         if "PORT_LOGDIR" in mysettings:
3275                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3276                 if not os.path.exists(logid_path):
3277                         f = open(logid_path, "w")
3278                         f.close()
3279                         del f
3280                 logid_time = time.strftime("%Y%m%d-%H%M%S",
3281                         time.gmtime(os.stat(logid_path).st_mtime))
3282                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3283                         mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3284                         (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3285                 del logid_path, logid_time
3286         else:
3287                 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
3288                 # enabled since it is possible that local SELinux security policies
3289                 # do not allow ouput to be piped out of the sesandbox domain.
3290                 if not (mysettings.selinux_enabled() and \
3291                         "sesandbox" in mysettings.features):
3292                         mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3293                                 mysettings["T"], "build.log")
3294
3295 _doebuild_manifest_exempt_depend = 0
3296 _doebuild_manifest_checked = None
3297
3298 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3299         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3300         mydbapi=None, vartree=None, prev_mtimes=None):
3301         
3302         """
3303         Wrapper function that invokes specific ebuild phases through the spawning
3304         of ebuild.sh
3305         
3306         @param myebuild: name of the ebuild to invoke the phase on (CPV)
3307         @type myebuild: String
3308         @param mydo: Phase to run
3309         @type mydo: String
3310         @param myroot: $ROOT (usually '/', see man make.conf)
3311         @type myroot: String
3312         @param mysettings: Portage Configuration
3313         @type mysettings: instance of portage.config
3314         @param debug: Turns on various debug information (eg, debug for spawn)
3315         @type debug: Boolean
3316         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3317         @type listonly: Boolean
3318         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3319         @type fetchonly: Boolean
3320         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3321         @type cleanup: Boolean
3322         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3323         @type dbkey: Dict or String
3324         @param use_cache: Enables the cache
3325         @type use_cache: Boolean
3326         @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3327         @type fetchall: Boolean
3328         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3329         @type tree: String
3330         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3331         @type mydbapi: portdbapi instance
3332         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3333         @type vartree: vartree instance
3334         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3335         @type prev_mtimes: dictionary
3336         @rtype: Boolean
3337         @returns:
3338         1. 0 for success
3339         2. 1 for error
3340         
3341         Most errors have an accompanying error message.
3342         
3343         listonly and fetchonly are only really necessary for operations involving 'fetch'
3344         prev_mtimes are only necessary for merge operations.
3345         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3346         
3347         """
3348         
3349         if not tree:
3350                 writemsg("Warning: tree not specified to doebuild\n")
3351                 tree = "porttree"
3352         global db
3353         
3354         # chunked out deps for each phase, so that ebuild binary can use it 
3355         # to collapse targets down.
3356         actionmap_deps={
3357         "depend": [],
3358         "setup":  [],
3359         "unpack": ["setup"],
3360         "compile":["unpack"],
3361         "test":   ["compile"],
3362         "install":["test"],
3363         "rpm":    ["install"],
3364         "package":["install"],
3365         }
3366         
3367         if mydbapi is None:
3368                 mydbapi = db[myroot][tree].dbapi
3369
3370         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3371                 vartree = db[myroot]["vartree"]
3372
3373         features = mysettings.features
3374
3375         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3376                         "config","setup","depend","fetch","digest",
3377                         "unpack","compile","test","install","rpm","qmerge","merge",
3378                         "package","unmerge", "manifest"]
3379
3380         if mydo not in validcommands:
3381                 validcommands.sort()
3382                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3383                         noiselevel=-1)
3384                 for vcount in range(len(validcommands)):
3385                         if vcount%6 == 0:
3386                                 writemsg("\n!!! ", noiselevel=-1)
3387                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3388                 writemsg("\n", noiselevel=-1)
3389                 return 1
3390
3391         if not os.path.exists(myebuild):
3392                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3393                         noiselevel=-1)
3394                 return 1
3395
3396         global _doebuild_manifest_exempt_depend
3397
3398         if "strict" in features and \
3399                 "digest" not in features and \
3400                 tree == "porttree" and \
3401                 mydo not in ("digest", "manifest", "help") and \
3402                 not _doebuild_manifest_exempt_depend:
3403                 # Always verify the ebuild checksums before executing it.
3404                 pkgdir = os.path.dirname(myebuild)
3405                 manifest_path = os.path.join(pkgdir, "Manifest")
3406                 global _doebuild_manifest_checked
3407                 # Avoid checking the same Manifest several times in a row during a
3408                 # regen with an empty cache.
3409                 if _doebuild_manifest_checked != manifest_path:
3410                         if not os.path.exists(manifest_path):
3411                                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3412                                         noiselevel=-1)
3413                                 return 1
3414                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
3415                         try:
3416                                 mf.checkTypeHashes("EBUILD")
3417                         except portage_exception.FileNotFound, e:
3418                                 writemsg("!!! A file listed in the Manifest " + \
3419                                         "could not be found: %s\n" % str(e), noiselevel=-1)
3420                                 return 1
3421                         except portage_exception.DigestException, e:
3422                                 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3423                                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3424                                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3425                                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3426                                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3427                                 return 1
3428                         # Make sure that all of the ebuilds are actually listed in the
3429                         # Manifest.
3430                         for f in os.listdir(pkgdir):
3431                                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3432                                         writemsg("!!! A file is not listed in the " + \
3433                                         "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3434                                         noiselevel=-1)
3435                                         return 1
3436                         _doebuild_manifest_checked = manifest_path
3437
3438         logfile=None
3439         builddir_lock = None
3440         try:
3441                 if mydo in ("digest", "manifest", "help"):
3442                         # Temporarily exempt the depend phase from manifest checks, in case
3443                         # aux_get calls trigger cache generation.
3444                         _doebuild_manifest_exempt_depend += 1
3445
3446                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3447                         use_cache, mydbapi)
3448
3449                 # get possible slot information from the deps file
3450                 if mydo == "depend":
3451                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3452                         if isinstance(dbkey, dict):
3453                                 mysettings["dbkey"] = ""
3454                                 pr, pw = os.pipe()
3455                                 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3456                                 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3457                                         fd_pipes=fd_pipes, returnpid=True)
3458                                 os.close(pw) # belongs exclusively to the child process now
3459                                 maxbytes = 1024
3460                                 mybytes = []
3461                                 while True:
3462                                         mybytes.append(os.read(pr, maxbytes))
3463                                         if not mybytes[-1]:
3464                                                 break
3465                                 os.close(pr)
3466                                 mybytes = "".join(mybytes)
3467                                 global auxdbkeys
3468                                 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3469                                         dbkey[k] = v
3470                                 retval = os.waitpid(mypids[0], 0)[1]
3471                                 portage_exec.spawned_pids.remove(mypids[0])
3472                                 # If it got a signal, return the signal that was sent, but
3473                                 # shift in order to distinguish it from a return value. (just
3474                                 # like portage_exec.spawn() would do).
3475                                 if retval & 0xff:
3476                                         return (retval & 0xff) << 8
3477                                 # Otherwise, return its exit code.
3478                                 return retval >> 8
3479                         elif dbkey:
3480                                 mysettings["dbkey"] = dbkey
3481                         else:
3482                                 mysettings["dbkey"] = \
3483                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3484
3485                         return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3486
3487                 # Validate dependency metadata here to ensure that ebuilds with invalid
3488                 # data are never installed (even via the ebuild command).
3489                 invalid_dep_exempt_phases = \
3490                         set(["clean", "cleanrm", "help", "prerm", "postrm"])
3491                 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3492                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3493                 misc_keys = ["LICENSE", "PROVIDE", "RESTRICT", "SRC_URI"]
3494                 all_keys = dep_keys + misc_keys
3495                 metadata = dict(izip(all_keys, mydbapi.aux_get(mycpv, all_keys)))
3496                 class FakeTree(object):
3497                         def __init__(self, mydb):
3498                                 self.dbapi = mydb
3499                 dep_check_trees = {myroot:{}}
3500                 dep_check_trees[myroot]["porttree"] = \
3501                         FakeTree(fakedbapi(settings=mysettings))
3502                 for dep_type in dep_keys:
3503                         mycheck = dep_check(metadata[dep_type], None, mysettings,
3504                                 myuse="all", myroot=myroot, trees=dep_check_trees)
3505                         if not mycheck[0]:
3506                                 writemsg("%s: %s\n%s\n" % (
3507                                         dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3508                                 if mydo not in invalid_dep_exempt_phases:
3509                                         return 1
3510                         del dep_type, mycheck
3511                 for k in misc_keys:
3512                         try:
3513                                 portage_dep.use_reduce(
3514                                         portage_dep.paren_reduce(metadata[k]), matchall=True)
3515                         except portage_exception.InvalidDependString, e:
3516                                 writemsg("%s: %s\n%s\n" % (
3517                                         k, metadata[k], str(e)), noiselevel=-1)
3518                                 del e
3519                                 if mydo not in invalid_dep_exempt_phases:
3520                                         return 1
3521                         del k
3522                 del mycpv, dep_keys, metadata, misc_keys, FakeTree, dep_check_trees
3523
3524                 if "PORTAGE_TMPDIR" not in mysettings or \
3525                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3526                         writemsg("The directory specified in your " + \
3527                                 "PORTAGE_TMPDIR variable, '%s',\n" % \
3528                                 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3529                         writemsg("does not exist.  Please create this directory or " + \
3530                                 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3531                         return 1
3532
3533                 # Build directory creation isn't required for any of these.
3534                 if mydo not in ("digest", "fetch", "help", "manifest"):
3535                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3536                         if mystatus:
3537                                 return mystatus
3538                         # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3539                         logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3540                 if mydo == "unmerge":
3541                         return unmerge(mysettings["CATEGORY"],
3542                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
3543
3544                 # if any of these are being called, handle them -- running them out of
3545                 # the sandbox -- and stop now.
3546                 if mydo in ["clean","cleanrm"]:
3547                         return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3548                                 debug=debug, free=1, logfile=None)
3549                 elif mydo == "help":
3550                         return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3551                                 debug=debug, free=1, logfile=logfile)
3552                 elif mydo == "setup":
3553                         infodir = os.path.join(
3554                                 mysettings["PORTAGE_BUILDDIR"], "build-info")
3555                         if os.path.isdir(infodir):
3556                                 """Load USE flags for setup phase of a binary package.
3557                                 Ideally, the environment.bz2 would be used instead."""
3558                                 mysettings.load_infodir(infodir)
3559                         retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3560                                 debug=debug, free=1, logfile=logfile)
3561                         if secpass >= 2:
3562                                 """ Privileged phases may have left files that need to be made
3563                                 writable to a less privileged user."""
3564                                 apply_recursive_permissions(mysettings["T"],
3565                                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3566                                         filemode=060, filemask=0)
3567                         return retval
3568                 elif mydo == "preinst":
3569                         mysettings["IMAGE"] = mysettings["D"]
3570                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3571                                 mysettings, debug=debug, free=1, logfile=logfile)
3572                         if phase_retval == os.EX_OK:
3573                                 # Post phase logic and tasks that have been factored out of
3574                                 # ebuild.sh.
3575                                 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3576                                         "preinst_sfperms", "preinst_selinux_labels",
3577                                         "preinst_suid_scan"]
3578                                 mysettings["EBUILD_PHASE"] = ""
3579                                 phase_retval = spawn(" ".join(myargs),
3580                                         mysettings, debug=debug, free=1, logfile=logfile)
3581                                 if phase_retval != os.EX_OK:
3582                                         writemsg("!!! post preinst failed; exiting.\n",
3583                                                 noiselevel=-1)
3584                         del mysettings["IMAGE"]
3585                         return phase_retval
3586                 elif mydo == "postinst":
3587                         mysettings.load_infodir(mysettings["O"])
3588                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3589                                 mysettings, debug=debug, free=1, logfile=logfile)
3590                         if phase_retval == os.EX_OK:
3591                                 # Post phase logic and tasks that have been factored out of
3592                                 # ebuild.sh.
3593                                 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3594                                 mysettings["EBUILD_PHASE"] = ""
3595                                 phase_retval = spawn(" ".join(myargs),
3596                                         mysettings, debug=debug, free=1, logfile=logfile)
3597                                 if phase_retval != os.EX_OK:
3598                                         writemsg("!!! post postinst failed; exiting.\n",
3599                                                 noiselevel=-1)
3600                         return phase_retval
3601                 elif mydo in ["prerm","postrm","config"]:
3602                         mysettings.load_infodir(mysettings["O"])
3603                         return spawn(EBUILD_SH_BINARY + " " + mydo,
3604                                 mysettings, debug=debug, free=1, logfile=logfile)
3605
3606                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3607
3608                 # Make sure we get the correct tree in case there are overlays.
3609                 mytree = os.path.realpath(
3610                         os.path.dirname(os.path.dirname(mysettings["O"])))
3611                 try:
3612                         newuris, alist = mydbapi.getfetchlist(
3613                                 mycpv, mytree=mytree, mysettings=mysettings)
3614                         alluris, aalist = mydbapi.getfetchlist(
3615                                 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3616                 except portage_exception.InvalidDependString, e:
3617                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3618                         writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
3619                         del e
3620                         return 1
3621                 mysettings["A"] = " ".join(alist)
3622                 mysettings["AA"] = " ".join(aalist)
3623                 if ("mirror" in features) or fetchall:
3624                         fetchme = alluris[:]
3625                         checkme = aalist[:]
3626                 elif mydo == "digest":
3627                         fetchme = alluris[:]
3628                         checkme = aalist[:]
3629                         # Skip files that we already have digests for.
3630                         mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3631                         mydigests = mf.getTypeDigests("DIST")
3632                         required_hash_types = set()
3633                         required_hash_types.add("size")
3634                         required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
3635                         for filename, hashes in mydigests.iteritems():
3636                                 if not required_hash_types.difference(hashes):
3637                                         checkme = [i for i in checkme if i != filename]
3638                                         fetchme = [i for i in fetchme \
3639                                                 if os.path.basename(i) != filename]
3640                                 del filename, hashes
3641                 else:
3642                         fetchme = newuris[:]
3643                         checkme = alist[:]
3644
3645                 # Only try and fetch the files if we are going to need them ...
3646                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3647                 # unpack compile install`, we will try and fetch 4 times :/
3648                 need_distfiles = (mydo in ("fetch", "unpack") or \
3649                         mydo not in ("digest", "manifest") and "noauto" not in features)
3650                 if need_distfiles and not fetch(
3651                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3652                         return 1
3653
3654                 if mydo == "fetch" and listonly:
3655                         return 0
3656
3657                 try:
3658                         if mydo == "manifest":
3659                                 return not digestgen(aalist, mysettings, overwrite=1,
3660                                         manifestonly=1, myportdb=mydbapi)
3661                         elif mydo == "digest":
3662                                 return not digestgen(aalist, mysettings, overwrite=1,
3663                                         myportdb=mydbapi)
3664                         elif "digest" in mysettings.features:
3665                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3666                 except portage_exception.PermissionDenied, e:
3667                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3668                         if mydo in ("digest", "manifest"):
3669                                 return 1
3670
3671                 # See above comment about fetching only when needed
3672                 if not digestcheck(checkme, mysettings, ("strict" in features),
3673                         (mydo not in ["digest","fetch","unpack"] and \
3674                         mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3675                         "noauto" in features)):
3676                         return 1
3677
3678                 if mydo == "fetch":
3679                         return 0
3680
3681                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3682                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3683                         orig_distdir = mysettings["DISTDIR"]
3684                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3685                         edpath = mysettings["DISTDIR"] = \
3686                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3687                         if os.path.exists(edpath):
3688                                 try:
3689                                         if os.path.isdir(edpath) and not os.path.islink(edpath):
3690                                                 shutil.rmtree(edpath)
3691                                         else:
3692                                                 os.unlink(edpath)
3693                                 except OSError:
3694                                         print "!!! Failed reseting ebuild distdir path, " + edpath
3695                                         raise
3696                         os.mkdir(edpath)
3697                         apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3698                         try:
3699                                 for file in alist:
3700                                         os.symlink(os.path.join(orig_distdir, file),
3701                                                 os.path.join(edpath, file))
3702                         except OSError:
3703                                 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3704                                 raise
3705
3706                 #initial dep checks complete; time to process main commands
3707
3708                 nosandbox = (("userpriv" in features) and \
3709                         ("usersandbox" not in features) and \
3710                         ("userpriv" not in mysettings["RESTRICT"]) and \
3711                         ("nouserpriv" not in mysettings["RESTRICT"]))
3712                 if nosandbox and ("userpriv" not in features or \
3713                         "userpriv" in mysettings["RESTRICT"] or \
3714                         "nouserpriv" in mysettings["RESTRICT"]):
3715                         nosandbox = ("sandbox" not in features and \
3716                                 "usersandbox" not in features)
3717
3718                 sesandbox = mysettings.selinux_enabled() and \
3719                         "sesandbox" in mysettings.features
3720                 ebuild_sh = EBUILD_SH_BINARY + " %s"
3721                 misc_sh = MISC_SH_BINARY + " dyn_%s"
3722
3723                 # args are for the to spawn function
3724                 actionmap = {
3725 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":0}},
3726 "setup":  {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1,         "sesandbox":0}},
3727 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":sesandbox}},
3728 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3729 "test":   {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3730 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0,         "sesandbox":sesandbox}},
3731 "rpm":    {"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3732 "package":{"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3733                 }
3734
3735                 # merge the deps in so we have again a 'full' actionmap
3736                 # be glad when this can die.
3737                 for x in actionmap.keys():
3738                         if len(actionmap_deps.get(x, [])):
3739                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3740
3741                 if mydo in actionmap.keys():
3742                         if mydo=="package":
3743                                 portage_util.ensure_dirs(
3744                                         os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3745                                 portage_util.ensure_dirs(
3746                                         os.path.join(mysettings["PKGDIR"], "All"))
3747                         retval = spawnebuild(mydo,
3748                                 actionmap, mysettings, debug, logfile=logfile)
3749                 elif mydo=="qmerge":
3750                         # check to ensure install was run.  this *only* pops up when users
3751                         # forget it and are using ebuild
3752                         if not os.path.exists(
3753                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3754                                 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3755                                         noiselevel=-1)
3756                                 return 1
3757                         # qmerge is a special phase that implies noclean.
3758                         if "noclean" not in mysettings.features:
3759                                 mysettings.features.append("noclean")
3760                         #qmerge is specifically not supposed to do a runtime dep check
3761                         retval = merge(
3762                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3763                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3764                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3765                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3766                 elif mydo=="merge":
3767                         retval = spawnebuild("install", actionmap, mysettings, debug,
3768                                 alwaysdep=1, logfile=logfile)
3769                         if retval == os.EX_OK:
3770                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3771                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3772                                         "build-info"), myroot, mysettings,
3773                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3774                                         vartree=vartree, prev_mtimes=prev_mtimes)
3775                 else:
3776                         print "!!! Unknown mydo:",mydo
3777                         return 1
3778
3779                 if retval != os.EX_OK and tree == "porttree":
3780                         for i in xrange(len(mydbapi.porttrees)-1):
3781                                 t = mydbapi.porttrees[i+1]
3782                                 if myebuild.startswith(t):
3783                                         # Display the non-cannonical path, in case it's different, to
3784                                         # prevent confusion.
3785                                         overlays = mysettings["PORTDIR_OVERLAY"].split()
3786                                         try:
3787                                                 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3788                                                         overlays[i], noiselevel=-1)
3789                                         except IndexError:
3790                                                 pass
3791                                         break
3792                 return retval
3793
3794         finally:
3795                 if builddir_lock:
3796                         portage_locks.unlockdir(builddir_lock)
3797
3798                 # Make sure that DISTDIR is restored to it's normal value before we return!
3799                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3800                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3801                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3802
3803                 if logfile:
3804                         try:
3805                                 if os.stat(logfile).st_size == 0:
3806                                         os.unlink(logfile)
3807                         except OSError:
3808                                 pass
3809
3810                 if mydo in ("digest", "manifest", "help"):
3811                         # If necessary, depend phase has been triggered by aux_get calls
3812                         # and the exemption is no longer needed.
3813                         _doebuild_manifest_exempt_depend -= 1
3814
3815 expandcache={}
3816
3817 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3818         """moves a file from src to dest, preserving all permissions and attributes; mtime will
3819         be preserved even when moving across filesystems.  Returns true on success and false on
3820         failure.  Move is atomic."""
3821         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3822         global lchown
3823         if mysettings is None:
3824                 global settings
3825                 mysettings = settings
3826         selinux_enabled = mysettings.selinux_enabled()
3827         try:
3828                 if not sstat:
3829                         sstat=os.lstat(src)
3830
3831         except SystemExit, e:
3832                 raise
3833         except Exception, e:
3834                 print "!!! Stating source file failed... movefile()"
3835                 print "!!!",e
3836                 return None
3837
3838         destexists=1
3839         try:
3840                 dstat=os.lstat(dest)
3841         except (OSError, IOError):
3842                 dstat=os.lstat(os.path.dirname(dest))
3843                 destexists=0
3844
3845         if bsd_chflags:
3846                 # Check that we can actually unset schg etc flags...
3847                 # Clear the flags on source and destination; we'll reinstate them after merging
3848                 if destexists and dstat.st_flags != 0:
3849                         if bsd_chflags.lchflags(dest, 0) < 0:
3850                                 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3851                                         noiselevel=-1)
3852                 # We might have an immutable flag on the parent dir; save and clear.
3853                 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3854                 if pflags != 0:
3855                         bsd_chflags.lchflags(os.path.dirname(dest), 0)
3856
3857                 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3858                         bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3859                         # This is bad: we can't merge the file with these flags set.
3860                         writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3861                                 noiselevel=-1)
3862                         return None
3863
3864         if destexists:
3865                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3866                         try:
3867                                 os.unlink(dest)
3868                                 destexists=0
3869                         except SystemExit, e:
3870                                 raise
3871                         except Exception, e:
3872                                 pass
3873
3874         if stat.S_ISLNK(sstat[stat.ST_MODE]):
3875                 try:
3876                         target=os.readlink(src)
3877                         if mysettings and mysettings["D"]:
3878                                 if target.find(mysettings["D"])==0:
3879                                         target=target[len(mysettings["D"]):]
3880                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3881                                 os.unlink(dest)
3882                         if selinux_enabled:
3883                                 sid = selinux.get_lsid(src)
3884                                 selinux.secure_symlink(target,dest,sid)
3885                         else:
3886                                 os.symlink(target,dest)
3887                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3888                         return os.lstat(dest)[stat.ST_MTIME]
3889                 except SystemExit, e:
3890                         raise
3891                 except Exception, e:
3892                         print "!!! failed to properly create symlink:"
3893                         print "!!!",dest,"->",target
3894                         print "!!!",e
3895                         return None
3896
3897         renamefailed=1
3898         if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3899                 try:
3900                         if selinux_enabled:
3901                                 ret=selinux.secure_rename(src,dest)
3902                         else:
3903                                 ret=os.rename(src,dest)
3904                         renamefailed=0
3905                 except SystemExit, e:
3906                         raise
3907                 except Exception, e:
3908                         if e[0]!=errno.EXDEV:
3909                                 # Some random error.
3910                                 print "!!! Failed to move",src,"to",dest
3911                                 print "!!!",e
3912                                 return None
3913                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3914         if renamefailed:
3915                 didcopy=0
3916                 if stat.S_ISREG(sstat[stat.ST_MODE]):
3917                         try: # For safety copy then move it over.
3918                                 if selinux_enabled:
3919                                         selinux.secure_copy(src,dest+"#new")
3920                                         selinux.secure_rename(dest+"#new",dest)
3921                                 else:
3922                                         shutil.copyfile(src,dest+"#new")
3923                                         os.rename(dest+"#new",dest)
3924                                 didcopy=1
3925                         except SystemExit, e:
3926                                 raise
3927                         except Exception, e:
3928                                 print '!!! copy',src,'->',dest,'failed.'
3929                                 print "!!!",e
3930                                 return None
3931                 else:
3932                         #we don't yet handle special, so we need to fall back to /bin/mv
3933                         if selinux_enabled:
3934                                 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3935                         else:
3936                                 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3937                                 if a[0]!=0:
3938                                         print "!!! Failed to move special file:"
3939                                         print "!!! '"+src+"' to '"+dest+"'"
3940                                         print "!!!",a
3941                                         return None # failure
3942                 try:
3943                         if didcopy:
3944                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3945                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3946                                 else:
3947                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3948                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3949                                 os.unlink(src)
3950                 except SystemExit, e:
3951                         raise
3952                 except Exception, e:
3953                         print "!!! Failed to chown/chmod/unlink in movefile()"
3954                         print "!!!",dest
3955                         print "!!!",e
3956                         return None
3957
3958         if newmtime:
3959                 os.utime(dest,(newmtime,newmtime))
3960         else:
3961                 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3962                 newmtime=sstat[stat.ST_MTIME]
3963
3964         if bsd_chflags:
3965                 # Restore the flags we saved before moving
3966                 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3967                         writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3968                                 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3969                         return None
3970
3971         return newmtime
3972
3973 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3974         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3975         if not os.access(myroot, os.W_OK):
3976                 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3977                         noiselevel=-1)
3978                 return errno.EACCES
3979         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3980                 vartree=vartree)
3981         return mylink.merge(pkgloc, infloc, myroot, myebuild,
3982                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3983
3984 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3985         mylink = dblink(
3986                 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3987         try:
3988                 mylink.lockdb()
3989                 if mylink.exists():
3990                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3991                                 ldpath_mtimes=ldpath_mtimes)
3992                         if retval == os.EX_OK:
3993                                 mylink.delete()
3994                         return retval
3995                 return os.EX_OK
3996         finally:
3997                 mylink.unlockdb()
3998
3999 def getCPFromCPV(mycpv):
4000         """Calls pkgsplit on a cpv and returns only the cp."""
4001         return pkgsplit(mycpv)[0]
4002
4003 def dep_virtual(mysplit, mysettings):
4004         "Does virtual dependency conversion"
4005         newsplit=[]
4006         myvirtuals = mysettings.getvirtuals()
4007         for x in mysplit:
4008                 if type(x)==types.ListType:
4009                         newsplit.append(dep_virtual(x, mysettings))
4010                 else:
4011                         mykey=dep_getkey(x)
4012                         mychoices = myvirtuals.get(mykey, None)
4013                         if mychoices:
4014                                 if len(mychoices) == 1:
4015                                         a = x.replace(mykey, mychoices[0])
4016                                 else:
4017                                         if x[0]=="!":
4018                                                 # blocker needs "and" not "or(||)".
4019                                                 a=[]
4020                                         else:
4021                                                 a=['||']
4022                                         for y in mychoices:
4023                                                 a.append(x.replace(mykey, y))
4024                                 newsplit.append(a)
4025                         else:
4026                                 newsplit.append(x)
4027         return newsplit
4028
4029 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
4030         trees=None, **kwargs):
4031         """Recursively expand new-style virtuals so as to collapse one or more
4032         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
4033         zero cost regardless of whether or not they are currently installed. Virtual
4034         blockers are supported but only when the virtual expands to a single
4035         atom because it wouldn't necessarily make sense to block all the components
4036         of a compound virtual.  When more than one new-style virtual is matched,
4037         the matches are sorted from highest to lowest versions and the atom is
4038         expanded to || ( highest match ... lowest match )."""
4039         newsplit = []
4040         # According to GLEP 37, RDEPEND is the only dependency type that is valid
4041         # for new-style virtuals.  Repoman should enforce this.
4042         dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
4043         def compare_pkgs(a, b):
4044                 return pkgcmp(b[1], a[1])
4045         portdb = trees[myroot]["porttree"].dbapi
4046         if kwargs["use_binaries"]:
4047                 portdb = trees[myroot]["bintree"].dbapi
4048         myvirtuals = mysettings.getvirtuals()
4049         for x in mysplit:
4050                 if x == "||":
4051                         newsplit.append(x)
4052                         continue
4053                 elif isinstance(x, list):
4054                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
4055                                 mysettings, myroot=myroot, trees=trees, **kwargs))
4056                         continue
4057                 if portage_dep._dep_check_strict and \
4058                         not isvalidatom(x, allow_blockers=True):
4059                         raise portage_exception.ParseError(
4060                                 "invalid atom: '%s'" % x)
4061                 mykey = dep_getkey(x)
4062                 if not mykey.startswith("virtual/"):
4063                         newsplit.append(x)
4064                         continue
4065                 mychoices = myvirtuals.get(mykey, [])
4066                 isblocker = x.startswith("!")
4067                 match_atom = x
4068                 if isblocker:
4069                         match_atom = x[1:]
4070                 pkgs = {}
4071                 for cpv in portdb.match(match_atom):
4072                         # only use new-style matches
4073                         if cpv.startswith("virtual/"):
4074                                 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], portdb)
4075                 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
4076                         vardb = trees[myroot]["vartree"].dbapi
4077                         for cpv in vardb.match(match_atom):
4078                                 # only use new-style matches
4079                                 if cpv.startswith("virtual/"):
4080                                         if cpv in pkgs:
4081                                                 continue
4082                                         pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], vardb)
4083                 if not (pkgs or mychoices):
4084                         # This one couldn't be expanded as a new-style virtual.  Old-style
4085                         # virtuals have already been expanded by dep_virtual, so this one
4086                         # is unavailable and dep_zapdeps will identify it as such.  The
4087                         # atom is not eliminated here since it may still represent a
4088                         # dependency that needs to be satisfied.
4089                         newsplit.append(x)
4090                         continue
4091                 if not pkgs and len(mychoices) == 1:
4092                         newsplit.append(x.replace(mykey, mychoices[0]))
4093                         continue
4094                 pkgs = pkgs.values()
4095                 pkgs.sort(compare_pkgs) # Prefer higher versions.
4096                 if isblocker:
4097                         a = []
4098                 else:
4099                         a = ['||']
4100                 for y in pkgs:
4101                         depstring = " ".join(y[2].aux_get(y[0], dep_keys))
4102                         if edebug:
4103                                 print "Virtual Parent:   ", y[0]
4104                                 print "Virtual Depstring:", depstring
4105                         mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
4106                                 trees=trees, **kwargs)
4107                         if not mycheck[0]:
4108                                 raise portage_exception.ParseError(
4109                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
4110                         if isblocker:
4111                                 virtual_atoms = [atom for atom in mycheck[1] \
4112                                         if not atom.startswith("!")]
4113                                 if len(virtual_atoms) == 1:
4114                                         # It wouldn't make sense to block all the components of a
4115                                         # compound virtual, so only a single atom block is allowed.
4116                                         a.append("!" + virtual_atoms[0])
4117                         else:
4118                                 mycheck[1].append("="+y[0]) # pull in the new-style virtual
4119                                 a.append(mycheck[1])
4120                 # Plain old-style virtuals.  New-style virtuals are preferred.
4121                 for y in mychoices:
4122                         a.append(x.replace(mykey, y))
4123                 if isblocker and not a:
4124                         # Probably a compound virtual.  Pass the atom through unprocessed.
4125                         newsplit.append(x)
4126                         continue
4127                 newsplit.append(a)
4128         return newsplit
4129
4130 def dep_eval(deplist):
4131         if not deplist:
4132                 return 1
4133         if deplist[0]=="||":
4134                 #or list; we just need one "1"
4135                 for x in deplist[1:]:
4136                         if type(x)==types.ListType:
4137                                 if dep_eval(x)==1:
4138                                         return 1
4139                         elif x==1:
4140                                         return 1
4141                 #XXX: unless there's no available atoms in the list
4142                 #in which case we need to assume that everything is
4143                 #okay as some ebuilds are relying on an old bug.
4144                 if len(deplist) == 1:
4145                         return 1
4146                 return 0
4147         else:
4148                 for x in deplist:
4149                         if type(x)==types.ListType:
4150                                 if dep_eval(x)==0:
4151                                         return 0
4152                         elif x==0 or x==2:
4153                                 return 0
4154                 return 1
4155
4156 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
4157         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
4158         Returned deplist contains steps that must be taken to satisfy dependencies."""
4159         if trees is None:
4160                 global db
4161                 trees = db
4162         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
4163         if not reduced or unreduced == ["||"] or dep_eval(reduced):
4164                 return []
4165
4166         if unreduced[0] != "||":
4167                 unresolved = []
4168                 for dep, satisfied in izip(unreduced, reduced):
4169                         if isinstance(dep, list):
4170                                 unresolved += dep_zapdeps(dep, satisfied, myroot,
4171                                         use_binaries=use_binaries, trees=trees)
4172                         elif not satisfied:
4173                                 unresolved.append(dep)
4174                 return unresolved
4175
4176         # We're at a ( || atom ... ) type level and need to make a choice
4177         deps = unreduced[1:]
4178         satisfieds = reduced[1:]
4179
4180         # Our preference order is for an the first item that:
4181         # a) contains all unmasked packages with the same key as installed packages
4182         # b) contains all unmasked packages
4183         # c) contains masked installed packages
4184         # d) is the first item
4185
4186         preferred = []
4187         preferred_any_slot = []
4188         possible_upgrades = []
4189         other = []
4190
4191         # Alias the trees we'll be checking availability against
4192         vardb = None
4193         if "vartree" in trees[myroot]:
4194                 vardb = trees[myroot]["vartree"].dbapi
4195         if use_binaries:
4196                 mydbapi = trees[myroot]["bintree"].dbapi
4197         else:
4198                 mydbapi = trees[myroot]["porttree"].dbapi
4199
4200         # Sort the deps into preferred (installed) and other
4201         # with values of [[required_atom], availablility]
4202         for dep, satisfied in izip(deps, satisfieds):
4203                 if isinstance(dep, list):
4204                         atoms = dep_zapdeps(dep, satisfied, myroot,
4205                                 use_binaries=use_binaries, trees=trees)
4206                 else:
4207                         atoms = [dep]
4208
4209                 if not vardb:
4210                         # called by repoman
4211                         other.append((atoms, None, False))
4212                         continue
4213
4214                 all_available = True
4215                 versions = {}
4216                 for atom in atoms:
4217                         avail_pkg = best(mydbapi.match(atom))
4218                         if avail_pkg:
4219                                 avail_slot = "%s:%s" % (dep_getkey(atom),
4220                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4221                         elif not avail_pkg and use_binaries:
4222                                 # With --usepkgonly, count installed packages as "available".
4223                                 # Note that --usepkgonly currently has no package.mask support.
4224                                 # See bug #149816.
4225                                 avail_pkg = best(vardb.match(atom))
4226                                 if avail_pkg:
4227                                         avail_slot = "%s:%s" % (dep_getkey(atom),
4228                                                 vardb.aux_get(avail_pkg, ["SLOT"])[0])
4229                         if not avail_pkg:
4230                                 all_available = False
4231                                 break
4232
4233                         versions[avail_slot] = avail_pkg
4234
4235                 this_choice = (atoms, versions, all_available)
4236                 if all_available:
4237                         # The "all installed" criterion is not version or slot specific.
4238                         # If any version of a package is installed then we assume that it
4239                         # is preferred over other possible packages choices.
4240                         all_installed = True
4241                         for atom in set([dep_getkey(atom) for atom in atoms]):
4242                                 # New-style virtuals have zero cost to install.
4243                                 if not vardb.match(atom) and not atom.startswith("virtual/"):
4244                                         all_installed = False
4245                                         break
4246                         all_installed_slots = False
4247                         if all_installed:
4248                                 all_installed_slots = True
4249                                 for slot_atom in versions:
4250                                         # New-style virtuals have zero cost to install.
4251                                         if not vardb.match(slot_atom) and \
4252                                                 not slot_atom.startswith("virtual/"):
4253                                                 all_installed_slots = False
4254                                                 break
4255                         if all_installed:
4256                                 if all_installed_slots:
4257                                         preferred.append(this_choice)
4258                                 else:
4259                                         preferred_any_slot.append(this_choice)
4260                         else:
4261                                 possible_upgrades.append(this_choice)
4262                 else:
4263                         other.append(this_choice)
4264
4265         # Compare the "all_installed" choices against the "all_available" choices
4266         # for possible missed upgrades.  The main purpose of this code is to find
4267         # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4268         # into || ( highest version ... lowest version ).  We want to prefer the
4269         # highest all_available version of the new-style virtual when there is a
4270         # lower all_installed version.
4271         preferred.extend(preferred_any_slot)
4272         preferred.extend(possible_upgrades)
4273         possible_upgrades = preferred[1:]
4274         for possible_upgrade in possible_upgrades:
4275                 atoms, versions, all_available = possible_upgrade
4276                 myslots = set(versions)
4277                 for other_choice in preferred:
4278                         if possible_upgrade is other_choice:
4279                                 # possible_upgrade will not be promoted, so move on
4280                                 break
4281                         o_atoms, o_versions, o_all_available = other_choice
4282                         intersecting_slots = myslots.intersection(o_versions)
4283                         if not intersecting_slots:
4284                                 continue
4285                         has_upgrade = False
4286                         has_downgrade = False
4287                         for myslot in intersecting_slots:
4288                                 myversion = versions[myslot]
4289                                 o_version = o_versions[myslot]
4290                                 if myversion != o_version:
4291                                         if myversion == best([myversion, o_version]):
4292                                                 has_upgrade = True
4293                                         else:
4294                                                 has_downgrade = True
4295                                                 break
4296                         if has_upgrade and not has_downgrade:
4297                                 preferred.remove(possible_upgrade)
4298                                 o_index = preferred.index(other_choice)
4299                                 preferred.insert(o_index, possible_upgrade)
4300                                 break
4301
4302         # preferred now contains a) and c) from the order above with
4303         # the masked flag differentiating the two. other contains b)
4304         # and d) so adding other to preferred will give us a suitable
4305         # list to iterate over.
4306         preferred.extend(other)
4307
4308         for allow_masked in (False, True):
4309                 for atoms, versions, all_available in preferred:
4310                         if all_available or allow_masked:
4311                                 return atoms
4312
4313         assert(False) # This point should not be reachable
4314
4315
4316 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4317         if not len(mydep):
4318                 return mydep
4319         if mydep[0]=="*":
4320                 mydep=mydep[1:]
4321         orig_dep = mydep
4322         mydep = dep_getcpv(orig_dep)
4323         myindex = orig_dep.index(mydep)
4324         prefix = orig_dep[:myindex]
4325         postfix = orig_dep[myindex+len(mydep):]
4326         return prefix + cpv_expand(
4327                 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4328
4329 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4330         use_cache=1, use_binaries=0, myroot="/", trees=None):
4331         """Takes a depend string and parses the condition."""
4332         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4333         #check_config_instance(mysettings)
4334         if trees is None:
4335                 trees = globals()["db"]
4336         if use=="yes":
4337                 if myuse is None:
4338                         #default behavior
4339                         myusesplit = mysettings["USE"].split()
4340                 else:
4341                         myusesplit = myuse
4342                         # We've been given useflags to use.
4343                         #print "USE FLAGS PASSED IN."
4344                         #print myuse
4345                         #if "bindist" in myusesplit:
4346                         #       print "BINDIST is set!"
4347                         #else:
4348                         #       print "BINDIST NOT set."
4349         else:
4350                 #we are being run by autouse(), don't consult USE vars yet.
4351                 # WE ALSO CANNOT USE SETTINGS
4352                 myusesplit=[]
4353
4354         #convert parenthesis to sublists
4355         mysplit = portage_dep.paren_reduce(depstring)
4356
4357         mymasks = set()
4358         useforce = set()
4359         useforce.add(mysettings["ARCH"])
4360         if use == "all":
4361                 # This masking/forcing is only for repoman.  In other cases, relevant
4362                 # masking/forcing should have already been applied via
4363                 # config.regenerate().  Also, binary or installed packages may have
4364                 # been built with flags that are now masked, and it would be
4365                 # inconsistent to mask them now.  Additionally, myuse may consist of
4366                 # flags from a parent package that is being merged to a $ROOT that is
4367                 # different from the one that mysettings represents.
4368                 mymasks.update(mysettings.usemask)
4369                 mymasks.update(mysettings.archlist())
4370                 mymasks.discard(mysettings["ARCH"])
4371                 useforce.update(mysettings.useforce)
4372                 useforce.difference_update(mymasks)
4373         try:
4374                 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4375                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4376         except portage_exception.InvalidDependString, e:
4377                 return [0, str(e)]
4378
4379         # Do the || conversions
4380         mysplit=portage_dep.dep_opconvert(mysplit)
4381
4382         if mysplit == []:
4383                 #dependencies were reduced to nothing
4384                 return [1,[]]
4385
4386         # Recursively expand new-style virtuals so as to
4387         # collapse one or more levels of indirection.
4388         try:
4389                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4390                         use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4391                         use_binaries=use_binaries, myroot=myroot, trees=trees)
4392         except portage_exception.ParseError, e:
4393                 return [0, str(e)]
4394
4395         mysplit2=mysplit[:]
4396         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4397         if mysplit2 is None:
4398                 return [0,"Invalid token"]
4399
4400         writemsg("\n\n\n", 1)
4401         writemsg("mysplit:  %s\n" % (mysplit), 1)
4402         writemsg("mysplit2: %s\n" % (mysplit2), 1)
4403
4404         myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4405                 use_binaries=use_binaries, trees=trees)
4406         mylist = flatten(myzaps)
4407         writemsg("myzaps:   %s\n" % (myzaps), 1)
4408         writemsg("mylist:   %s\n" % (mylist), 1)
4409         #remove duplicates
4410         mydict={}
4411         for x in mylist:
4412                 mydict[x]=1
4413         writemsg("mydict:   %s\n" % (mydict), 1)
4414         return [1,mydict.keys()]
4415
4416 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4417         "Reduces the deplist to ones and zeros"
4418         deplist=mydeplist[:]
4419         for mypos in xrange(len(deplist)):
4420                 if type(deplist[mypos])==types.ListType:
4421                         #recurse
4422                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4423                 elif deplist[mypos]=="||":
4424                         pass
4425                 else:
4426                         mykey = dep_getkey(deplist[mypos])
4427                         if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4428                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4429                                 deplist[mypos]=True
4430                         elif mydbapi is None:
4431                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
4432                                 # return all of deps the deps that have been selected
4433                                 # (excluding those satisfied by package.provided).
4434                                 deplist[mypos] = False
4435                         else:
4436                                 if mode:
4437                                         mydep=mydbapi.xmatch(mode,deplist[mypos])
4438                                 else:
4439                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4440                                 if mydep!=None:
4441                                         tmp=(len(mydep)>=1)
4442                                         if deplist[mypos][0]=="!":
4443                                                 tmp=False
4444                                         deplist[mypos]=tmp
4445                                 else:
4446                                         #encountered invalid string
4447                                         return None
4448         return deplist
4449
4450 def cpv_getkey(mycpv):
4451         myslash=mycpv.split("/")
4452         mysplit=pkgsplit(myslash[-1])
4453         mylen=len(myslash)
4454         if mylen==2:
4455                 return myslash[0]+"/"+mysplit[0]
4456         elif mylen==1:
4457                 return mysplit[0]
4458         else:
4459                 return mysplit
4460
4461 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4462         mysplit=mykey.split("/")
4463         if settings is None:
4464                 settings = globals()["settings"]
4465         virts = settings.getvirtuals("/")
4466         virts_p = settings.get_virts_p("/")
4467         if len(mysplit)==1:
4468                 if mydb and type(mydb)==types.InstanceType:
4469                         for x in settings.categories:
4470                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4471                                         return x+"/"+mykey
4472                         if virts_p.has_key(mykey):
4473                                 return(virts_p[mykey][0])
4474                 return "null/"+mykey
4475         elif mydb:
4476                 if type(mydb)==types.InstanceType:
4477                         if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4478                                 return virts[mykey][0]
4479                 return mykey
4480
4481 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4482         """Given a string (packagename or virtual) expand it into a valid
4483         cat/package string. Virtuals use the mydb to determine which provided
4484         virtual is a valid choice and defaults to the first element when there
4485         are no installed/available candidates."""
4486         myslash=mycpv.split("/")
4487         mysplit=pkgsplit(myslash[-1])
4488         if settings is None:
4489                 settings = globals()["settings"]
4490         virts = settings.getvirtuals("/")
4491         virts_p = settings.get_virts_p("/")
4492         if len(myslash)>2:
4493                 # this is illegal case.
4494                 mysplit=[]
4495                 mykey=mycpv
4496         elif len(myslash)==2:
4497                 if mysplit:
4498                         mykey=myslash[0]+"/"+mysplit[0]
4499                 else:
4500                         mykey=mycpv
4501                 if mydb and virts and mykey in virts:
4502                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4503                         if hasattr(mydb, "cp_list"):
4504                                 if not mydb.cp_list(mykey, use_cache=use_cache):
4505                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4506                                         mykey_orig = mykey[:]
4507                                         for vkey in virts[mykey]:
4508                                                 if mydb.cp_list(vkey,use_cache=use_cache):
4509                                                         mykey = vkey
4510                                                         writemsg("virts chosen: %s\n" % (mykey), 1)
4511                                                         break
4512                                         if mykey == mykey_orig:
4513                                                 mykey=virts[mykey][0]
4514                                                 writemsg("virts defaulted: %s\n" % (mykey), 1)
4515                         #we only perform virtual expansion if we are passed a dbapi
4516         else:
4517                 #specific cpv, no category, ie. "foo-1.0"
4518                 if mysplit:
4519                         myp=mysplit[0]
4520                 else:
4521                         # "foo" ?
4522                         myp=mycpv
4523                 mykey=None
4524                 matches=[]
4525                 if mydb:
4526                         for x in settings.categories:
4527                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4528                                         matches.append(x+"/"+myp)
4529                 if len(matches) > 1:
4530                         virtual_name_collision = False
4531                         if len(matches) == 2:
4532                                 for x in matches:
4533                                         if not x.startswith("virtual/"):
4534                                                 # Assume that the non-virtual is desired.  This helps
4535                                                 # avoid the ValueError for invalid deps that come from
4536                                                 # installed packages (during reverse blocker detection,
4537                                                 # for example).
4538                                                 mykey = x
4539                                         else:
4540                                                 virtual_name_collision = True
4541                         if not virtual_name_collision:
4542                                 raise ValueError, matches
4543                 elif matches:
4544                         mykey=matches[0]
4545
4546                 if not mykey and type(mydb)!=types.ListType:
4547                         if virts_p.has_key(myp):
4548                                 mykey=virts_p[myp][0]
4549                         #again, we only perform virtual expansion if we have a dbapi (not a list)
4550                 if not mykey:
4551                         mykey="null/"+myp
4552         if mysplit:
4553                 if mysplit[2]=="r0":
4554                         return mykey+"-"+mysplit[1]
4555                 else:
4556                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4557         else:
4558                 return mykey
4559
4560 def getmaskingreason(mycpv, settings=None, portdb=None):
4561         from portage_util import grablines
4562         if settings is None:
4563                 settings = globals()["settings"]
4564         if portdb is None:
4565                 portdb = globals()["portdb"]
4566         mysplit = catpkgsplit(mycpv)
4567         if not mysplit:
4568                 raise ValueError("invalid CPV: %s" % mycpv)
4569         if not portdb.cpv_exists(mycpv):
4570                 raise KeyError("CPV %s does not exist" % mycpv)
4571         mycp=mysplit[0]+"/"+mysplit[1]
4572
4573         # XXX- This is a temporary duplicate of code from the config constructor.
4574         locations = [os.path.join(settings["PORTDIR"], "profiles")]
4575         locations.extend(settings.profiles)
4576         for ov in settings["PORTDIR_OVERLAY"].split():
4577                 profdir = os.path.join(normalize_path(ov), "profiles")
4578                 if os.path.isdir(profdir):
4579                         locations.append(profdir)
4580         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4581                 USER_CONFIG_PATH.lstrip(os.path.sep)))
4582         locations.reverse()
4583         pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4584         pmasklines = []
4585         while pmasklists: # stack_lists doesn't preserve order so it can't be used
4586                 pmasklines.extend(pmasklists.pop(0))
4587         del pmasklists
4588
4589         if settings.pmaskdict.has_key(mycp):
4590                 for x in settings.pmaskdict[mycp]:
4591                         if mycpv in portdb.xmatch("match-all", x):
4592                                 comment = ""
4593                                 l = "\n"
4594                                 comment_valid = -1
4595                                 for i in xrange(len(pmasklines)):
4596                                         l = pmasklines[i].strip()
4597                                         if l == "":
4598                                                 comment = ""
4599                                                 comment_valid = -1
4600                                         elif l[0] == "#":
4601                                                 comment += (l+"\n")
4602                                                 comment_valid = i + 1
4603                                         elif l == x:
4604                                                 if comment_valid != i:
4605                                                         comment = ""
4606                                                 return comment
4607                                         elif comment_valid != -1:
4608                                                 # Apparently this comment applies to muliple masks, so
4609                                                 # it remains valid until a blank line is encountered.
4610                                                 comment_valid += 1
4611         return None
4612
4613 def getmaskingstatus(mycpv, settings=None, portdb=None):
4614         if settings is None:
4615                 settings = globals()["settings"]
4616         if portdb is None:
4617                 portdb = globals()["portdb"]
4618         mysplit = catpkgsplit(mycpv)
4619         if not mysplit:
4620                 raise ValueError("invalid CPV: %s" % mycpv)
4621         if not portdb.cpv_exists(mycpv):
4622                 raise KeyError("CPV %s does not exist" % mycpv)
4623         mycp=mysplit[0]+"/"+mysplit[1]
4624
4625         rValue = []
4626
4627         # profile checking
4628         revmaskdict=settings.prevmaskdict
4629         if revmaskdict.has_key(mycp):
4630                 for x in revmaskdict[mycp]:
4631                         if x[0]=="*":
4632                                 myatom = x[1:]
4633                         else:
4634                                 myatom = x
4635                         if not match_to_list(mycpv, [myatom]):
4636                                 rValue.append("profile")
4637                                 break
4638
4639         # package.mask checking
4640         maskdict=settings.pmaskdict
4641         unmaskdict=settings.punmaskdict
4642         if maskdict.has_key(mycp):
4643                 for x in maskdict[mycp]:
4644                         if mycpv in portdb.xmatch("match-all", x):
4645                                 unmask=0
4646                                 if unmaskdict.has_key(mycp):
4647                                         for z in unmaskdict[mycp]:
4648                                                 if mycpv in portdb.xmatch("match-all",z):
4649                                                         unmask=1
4650                                                         break
4651                                 if unmask==0:
4652                                         rValue.append("package.mask")
4653
4654         # keywords checking
4655         try:
4656                 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4657         except KeyError:
4658                 # The "depend" phase apparently failed for some reason.  An associated
4659                 # error message will have already been printed to stderr.
4660                 return ["corruption"]
4661         if not eapi_is_supported(eapi):
4662                 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4663         mygroups = mygroups.split()
4664         pgroups = settings["ACCEPT_KEYWORDS"].split()
4665         myarch = settings["ARCH"]
4666         if pgroups and myarch not in pgroups:
4667                 """For operating systems other than Linux, ARCH is not necessarily a
4668                 valid keyword."""
4669                 myarch = pgroups[0].lstrip("~")
4670         pkgdict = settings.pkeywordsdict
4671
4672         cp = dep_getkey(mycpv)
4673         if pkgdict.has_key(cp):
4674                 matches = match_to_list(mycpv, pkgdict[cp].keys())
4675                 for match in matches:
4676                         pgroups.extend(pkgdict[cp][match])
4677                 if matches:
4678                         inc_pgroups = []
4679                         for x in pgroups:
4680                                 if x != "-*" and x.startswith("-"):
4681                                         try:
4682                                                 inc_pgroups.remove(x[1:])
4683                                         except ValueError:
4684                                                 pass
4685                                 if x not in inc_pgroups:
4686                                         inc_pgroups.append(x)
4687                         pgroups = inc_pgroups
4688                         del inc_pgroups
4689
4690         kmask = "missing"
4691
4692         for keyword in pgroups:
4693                 if keyword in mygroups:
4694                         kmask=None
4695
4696         if kmask:
4697                 fallback = None
4698                 for gp in mygroups:
4699                         if gp=="*":
4700                                 kmask=None
4701                                 break
4702                         elif gp=="-"+myarch:
4703                                 kmask="-"+myarch
4704                                 break
4705                         elif gp=="~"+myarch:
4706                                 kmask="~"+myarch
4707                                 break
4708
4709         if kmask:
4710                 rValue.append(kmask+" keyword")
4711         return rValue
4712
4713 class portagetree:
4714         def __init__(self, root="/", virtual=None, clone=None, settings=None):
4715
4716                 if clone:
4717                         writemsg("portagetree.__init__(): deprecated " + \
4718                                 "use of clone parameter\n", noiselevel=-1)
4719                         self.root=clone.root
4720                         self.portroot=clone.portroot
4721                         self.pkglines=clone.pkglines
4722                 else:
4723                         self.root=root
4724                         if settings is None:
4725                                 settings = globals()["settings"]
4726                         self.settings = settings
4727                         self.portroot=settings["PORTDIR"]
4728                         self.virtual=virtual
4729                         self.dbapi = portdbapi(
4730                                 settings["PORTDIR"], mysettings=settings)
4731
4732         def dep_bestmatch(self,mydep):
4733                 "compatibility method"
4734                 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4735                 if mymatch is None:
4736                         return ""
4737                 return mymatch
4738
4739         def dep_match(self,mydep):
4740                 "compatibility method"
4741                 mymatch=self.dbapi.xmatch("match-visible",mydep)
4742                 if mymatch is None:
4743                         return []
4744                 return mymatch
4745
4746         def exists_specific(self,cpv):
4747                 return self.dbapi.cpv_exists(cpv)
4748
4749         def getallnodes(self):
4750                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
4751                 masked package for nodes in this nodes list."""
4752                 return self.dbapi.cp_all()
4753
4754         def getname(self,pkgname):
4755                 "returns file location for this particular package (DEPRECATED)"
4756                 if not pkgname:
4757                         return ""
4758                 mysplit=pkgname.split("/")
4759                 psplit=pkgsplit(mysplit[1])
4760                 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4761
4762         def resolve_specific(self,myspec):
4763                 cps=catpkgsplit(myspec)
4764                 if not cps:
4765                         return None
4766                 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4767                         settings=self.settings)
4768                 mykey=mykey+"-"+cps[2]
4769                 if cps[3]!="r0":
4770                         mykey=mykey+"-"+cps[3]
4771                 return mykey
4772
4773         def depcheck(self,mycheck,use="yes",myusesplit=None):
4774                 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4775
4776         def getslot(self,mycatpkg):
4777                 "Get a slot for a catpkg; assume it exists."
4778                 myslot = ""
4779                 try:
4780                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4781                 except SystemExit, e:
4782                         raise
4783                 except Exception, e:
4784                         pass
4785                 return myslot
4786
4787
4788 class dbapi:
4789         def __init__(self):
4790                 pass
4791
4792         def close_caches(self):
4793                 pass
4794
4795         def cp_list(self,cp,use_cache=1):
4796                 return
4797
4798         def cpv_all(self):
4799                 cpv_list = []
4800                 for cp in self.cp_all():
4801                         cpv_list.extend(self.cp_list(cp))
4802                 return cpv_list
4803
4804         def aux_get(self,mycpv,mylist):
4805                 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4806                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4807                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4808                 raise NotImplementedError
4809
4810         def match(self,origdep,use_cache=1):
4811                 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4812                 mykey=dep_getkey(mydep)
4813                 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4814                 myslot = portage_dep.dep_getslot(mydep)
4815                 if myslot is not None:
4816                         mylist = [cpv for cpv in mylist \
4817                                 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4818                 return mylist
4819
4820         def match2(self,mydep,mykey,mylist):
4821                 writemsg("DEPRECATED: dbapi.match2\n")
4822                 match_from_list(mydep,mylist)
4823
4824         def invalidentry(self, mypath):
4825                 if re.search("portage_lockfile$",mypath):
4826                         if not os.environ.has_key("PORTAGE_MASTER_PID"):
4827                                 writemsg("Lockfile removed: %s\n" % mypath, 1)
4828                                 portage_locks.unlockfile((mypath,None,None))
4829                         else:
4830                                 # Nothing we can do about it. We're probably sandboxed.
4831                                 pass
4832                 elif re.search(".*/-MERGING-(.*)",mypath):
4833                         if os.path.exists(mypath):
4834                                 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4835                 else:
4836                         writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4837
4838
4839
4840 class fakedbapi(dbapi):
4841         "This is a dbapi to use for the emptytree function.  It's empty, but things can be added to it."
4842         def __init__(self, settings=None):
4843                 self.cpvdict={}
4844                 self.cpdict={}
4845                 if settings is None:
4846                         settings = globals()["settings"]
4847                 self.settings = settings
4848                 self._match_cache = {}
4849
4850         def _clear_cache(self):
4851                 if self._match_cache:
4852                         self._match_cache = {}
4853
4854         def match(self, origdep, use_cache=1):
4855                 result = self._match_cache.get(origdep, None)
4856                 if result is not None:
4857                         return result[:]
4858                 result = dbapi.match(self, origdep, use_cache=use_cache)
4859                 self._match_cache[origdep] = result
4860                 return result[:]
4861
4862         def cpv_exists(self,mycpv):
4863                 return self.cpvdict.has_key(mycpv)
4864
4865         def cp_list(self,mycp,use_cache=1):
4866                 if not self.cpdict.has_key(mycp):
4867                         return []
4868                 else:
4869                         return self.cpdict[mycp]
4870
4871         def cp_all(self):
4872                 returnme=[]
4873                 for x in self.cpdict.keys():
4874                         returnme.extend(self.cpdict[x])
4875                 return returnme
4876
4877         def cpv_all(self):
4878                 return self.cpvdict.keys()
4879
4880         def cpv_inject(self, mycpv, metadata=None):
4881                 """Adds a cpv from the list of available packages."""
4882                 self._clear_cache()
4883                 mycp=cpv_getkey(mycpv)
4884                 self.cpvdict[mycpv] = metadata
4885                 myslot = None
4886                 if metadata:
4887                         myslot = metadata.get("SLOT", None)
4888                 if myslot and mycp in self.cpdict:
4889                         # If necessary, remove another package in the same SLOT.
4890                         for cpv in self.cpdict[mycp]:
4891                                 if mycpv != cpv:
4892                                         other_metadata = self.cpvdict[cpv]
4893                                         if other_metadata:
4894                                                 if myslot == other_metadata.get("SLOT", None):
4895                                                         self.cpv_remove(cpv)
4896                                                         break
4897                 if mycp not in self.cpdict:
4898                         self.cpdict[mycp] = []
4899                 if not mycpv in self.cpdict[mycp]:
4900                         self.cpdict[mycp].append(mycpv)
4901
4902         def cpv_remove(self,mycpv):
4903                 """Removes a cpv from the list of available packages."""
4904                 self._clear_cache()
4905                 mycp=cpv_getkey(mycpv)
4906                 if self.cpvdict.has_key(mycpv):
4907                         del     self.cpvdict[mycpv]
4908                 if not self.cpdict.has_key(mycp):
4909                         return
4910                 while mycpv in self.cpdict[mycp]:
4911                         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4912                 if not len(self.cpdict[mycp]):
4913                         del self.cpdict[mycp]
4914
4915         def aux_get(self, mycpv, wants):
4916                 if not self.cpv_exists(mycpv):
4917                         raise KeyError(mycpv)
4918                 metadata = self.cpvdict[mycpv]
4919                 if not metadata:
4920                         return ["" for x in wants]
4921                 return [metadata.get(x, "") for x in wants]
4922
4923         def aux_update(self, cpv, values):
4924                 self._clear_cache()
4925                 self.cpvdict[cpv].update(values)
4926
4927 class bindbapi(fakedbapi):
4928         def __init__(self, mybintree=None, settings=None):
4929                 self.bintree = mybintree
4930                 self.cpvdict={}
4931                 self.cpdict={}
4932                 if settings is None:
4933                         settings = globals()["settings"]
4934                 self.settings = settings
4935                 self._match_cache = {}
4936                 # Selectively cache metadata in order to optimize dep matching.
4937                 self._aux_cache_keys = set(["SLOT"])
4938                 self._aux_cache = {}
4939
4940         def match(self, *pargs, **kwargs):
4941                 if self.bintree and not self.bintree.populated:
4942                         self.bintree.populate()
4943                 return fakedbapi.match(self, *pargs, **kwargs)
4944
4945         def aux_get(self,mycpv,wants):
4946                 if self.bintree and not self.bintree.populated:
4947                         self.bintree.populate()
4948                 cache_me = False
4949                 if not set(wants).difference(self._aux_cache_keys):
4950                         aux_cache = self._aux_cache.get(mycpv)
4951                         if aux_cache is not None:
4952                                 return [aux_cache[x] for x in wants]
4953                         cache_me = True
4954                 mysplit = mycpv.split("/")
4955                 mylist  = []
4956                 tbz2name = mysplit[1]+".tbz2"
4957                 if not self.bintree.remotepkgs or \
4958                         not self.bintree.isremote(mycpv):
4959                         tbz2_path = self.bintree.getname(mycpv)
4960                         if not os.path.exists(tbz2_path):
4961                                 raise KeyError(mycpv)
4962                         getitem = xpak.tbz2(tbz2_path).getfile
4963                 else:
4964                         getitem = self.bintree.remotepkgs[tbz2name].get
4965                 mydata = {}
4966                 mykeys = wants
4967                 if cache_me:
4968                         mykeys = self._aux_cache_keys.union(wants)
4969                 for x in mykeys:
4970                         myval = getitem(x)
4971                         # myval is None if the key doesn't exist
4972                         # or the tbz2 is corrupt.
4973                         if myval:
4974                                 mydata[x] = " ".join(myval.split())
4975                 if "EAPI" in mykeys:
4976                         if not mydata.setdefault("EAPI", "0"):
4977                                 mydata["EAPI"] = "0"
4978                 if cache_me:
4979                         aux_cache = {}
4980                         for x in self._aux_cache_keys:
4981                                 aux_cache[x] = mydata.get(x, "")
4982                         self._aux_cache[mycpv] = aux_cache
4983                 return [mydata.get(x, "") for x in wants]
4984
4985         def aux_update(self, cpv, values):
4986                 if not self.bintree.populated:
4987                         self.bintree.populate()
4988                 tbz2path = self.bintree.getname(cpv)
4989                 if not os.path.exists(tbz2path):
4990                         raise KeyError(cpv)
4991                 mytbz2 = xpak.tbz2(tbz2path)
4992                 mydata = mytbz2.get_data()
4993                 mydata.update(values)
4994                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4995
4996         def cp_list(self, *pargs, **kwargs):
4997                 if not self.bintree.populated:
4998                         self.bintree.populate()
4999                 return fakedbapi.cp_list(self, *pargs, **kwargs)
5000
5001         def cpv_all(self):
5002                 if not self.bintree.populated:
5003                         self.bintree.populate()
5004                 return fakedbapi.cpv_all(self)
5005
5006 class vardbapi(dbapi):
5007         def __init__(self, root, categories=None, settings=None, vartree=None):
5008                 self.root       = root[:]
5009                 #cache for category directory mtimes
5010                 self.mtdircache = {}
5011                 #cache for dependency checks
5012                 self.matchcache = {}
5013                 #cache for cp_list results
5014                 self.cpcache    = {}
5015                 self.blockers   = None
5016                 if settings is None:
5017                         settings = globals()["settings"]
5018                 self.settings = settings
5019                 if categories is None:
5020                         categories = settings.categories
5021                 self.categories = categories[:]
5022                 if vartree is None:
5023                         vartree = globals()["db"][root]["vartree"]
5024                 self.vartree = vartree
5025                 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
5026                         "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
5027                 self._aux_cache = None
5028                 self._aux_cache_version = "1"
5029                 self._aux_cache_filename = os.path.join(self.root,
5030                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
5031
5032         def cpv_exists(self,mykey):
5033                 "Tells us whether an actual ebuild exists on disk (no masking)"
5034                 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
5035
5036         def cpv_counter(self,mycpv):
5037                 "This method will grab the COUNTER. Returns a counter value."
5038                 try:
5039                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
5040                 except (KeyError, ValueError):
5041                         pass
5042                 cdir=self.root+VDB_PATH+"/"+mycpv
5043                 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
5044
5045                 # We write our new counter value to a new file that gets moved into
5046                 # place to avoid filesystem corruption on XFS (unexpected reboot.)
5047                 corrupted=0
5048                 if os.path.exists(cpath):
5049                         cfile=open(cpath, "r")
5050                         try:
5051                                 counter=long(cfile.readline())
5052                         except ValueError:
5053                                 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
5054                                 counter=long(0)
5055                                 corrupted=1
5056                         cfile.close()
5057                 elif os.path.exists(cdir):
5058                         mys = pkgsplit(mycpv)
5059                         myl = self.match(mys[0],use_cache=0)
5060                         print mys,myl
5061                         if len(myl) == 1:
5062                                 try:
5063                                         # Only one package... Counter doesn't matter.
5064                                         write_atomic(cpath, "1")
5065                                         counter = 1
5066                                 except SystemExit, e:
5067                                         raise
5068                                 except Exception, e:
5069                                         writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5070                                                 noiselevel=-1)
5071                                         writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5072                                                 noiselevel=-1)
5073                                         writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
5074                                         writemsg("!!! %s\n" % e, noiselevel=-1)
5075                                         sys.exit(1)
5076                         else:
5077                                 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5078                                         noiselevel=-1)
5079                                 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5080                                         noiselevel=-1)
5081                                 writemsg("!!! remerge the package.\n", noiselevel=-1)
5082                                 sys.exit(1)
5083                 else:
5084                         counter=long(0)
5085                 if corrupted:
5086                         # update new global counter file
5087                         write_atomic(cpath, str(counter))
5088                 return counter
5089
5090         def cpv_inject(self,mycpv):
5091                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
5092                 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
5093                 counter = self.counter_tick(self.root, mycpv=mycpv)
5094                 # write local package counter so that emerge clean does the right thing
5095                 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
5096
5097         def isInjected(self,mycpv):
5098                 if self.cpv_exists(mycpv):
5099                         if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
5100                                 return True
5101                         if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
5102                                 return True
5103                 return False
5104
5105         def move_ent(self,mylist):
5106                 origcp=mylist[1]
5107                 newcp=mylist[2]
5108
5109                 # sanity check
5110                 for cp in [origcp,newcp]:
5111                         if not (isvalidatom(cp) and isjustname(cp)):
5112                                 raise portage_exception.InvalidPackageName(cp)
5113                 origmatches=self.match(origcp,use_cache=0)
5114                 if not origmatches:
5115                         return
5116                 for mycpv in origmatches:
5117                         mycpsplit=catpkgsplit(mycpv)
5118                         mynewcpv=newcp+"-"+mycpsplit[2]
5119                         mynewcat=newcp.split("/")[0]
5120                         if mycpsplit[3]!="r0":
5121                                 mynewcpv += "-"+mycpsplit[3]
5122                         mycpsplit_new = catpkgsplit(mynewcpv)
5123                         origpath=self.root+VDB_PATH+"/"+mycpv
5124                         if not os.path.exists(origpath):
5125                                 continue
5126                         writemsg_stdout("@")
5127                         if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
5128                                 #create the directory
5129                                 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
5130                         newpath=self.root+VDB_PATH+"/"+mynewcpv
5131                         if os.path.exists(newpath):
5132                                 #dest already exists; keep this puppy where it is.
5133                                 continue
5134                         os.rename(origpath, newpath)
5135
5136                         # We need to rename the ebuild now.
5137                         old_pf = catsplit(mycpv)[1]
5138                         new_pf = catsplit(mynewcpv)[1]
5139                         if new_pf != old_pf:
5140                                 try:
5141                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
5142                                                 os.path.join(newpath, new_pf + ".ebuild"))
5143                                 except OSError, e:
5144                                         if e.errno != errno.ENOENT:
5145                                                 raise
5146                                         del e
5147                                 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
5148
5149                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
5150                         fixdbentries([mylist], newpath)
5151
5152         def update_ents(self, update_iter):
5153                 """Run fixdbentries on all installed packages (time consuming).  Like
5154                 fixpackages, this should be run from a helper script and display
5155                 a progress indicator."""
5156                 dbdir = os.path.join(self.root, VDB_PATH)
5157                 for catdir in listdir(dbdir):
5158                         catdir = dbdir+"/"+catdir
5159                         if os.path.isdir(catdir):
5160                                 for pkgdir in listdir(catdir):
5161                                         pkgdir = catdir+"/"+pkgdir
5162                                         if os.path.isdir(pkgdir):
5163                                                 fixdbentries(update_iter, pkgdir)
5164
5165         def move_slot_ent(self,mylist):
5166                 pkg=mylist[1]
5167                 origslot=mylist[2]
5168                 newslot=mylist[3]
5169
5170                 if not isvalidatom(pkg):
5171                         raise portage_exception.InvalidAtom(pkg)
5172
5173                 origmatches=self.match(pkg,use_cache=0)
5174                 
5175                 if not origmatches:
5176                         return
5177                 for mycpv in origmatches:
5178                         origpath=self.root+VDB_PATH+"/"+mycpv
5179                         if not os.path.exists(origpath):
5180                                 continue
5181
5182                         slot=grabfile(origpath+"/SLOT");
5183                         if (not slot):
5184                                 continue
5185
5186                         if (slot[0]!=origslot):
5187                                 continue
5188
5189                         writemsg_stdout("s")
5190                         write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
5191
5192         def cp_list(self,mycp,use_cache=1):
5193                 mysplit=mycp.split("/")
5194                 if mysplit[0] == '*':
5195                         mysplit[0] = mysplit[0][1:]
5196                 try:
5197                         mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5198                 except OSError:
5199                         mystat=0
5200                 if use_cache and self.cpcache.has_key(mycp):
5201                         cpc=self.cpcache[mycp]
5202                         if cpc[0]==mystat:
5203                                 return cpc[1]
5204                 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5205
5206                 if (list is None):
5207                         return []
5208                 returnme=[]
5209                 for x in list:
5210                         if x.startswith("."):
5211                                 continue
5212                         if x[0] == '-':
5213                                 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5214                                 continue
5215                         ps=pkgsplit(x)
5216                         if not ps:
5217                                 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5218                                 continue
5219                         if len(mysplit) > 1:
5220                                 if ps[0]==mysplit[1]:
5221                                         returnme.append(mysplit[0]+"/"+x)
5222                 if use_cache:
5223                         self.cpcache[mycp]=[mystat,returnme]
5224                 elif self.cpcache.has_key(mycp):
5225                         del self.cpcache[mycp]
5226                 return returnme
5227
5228         def cpv_all(self,use_cache=1):
5229                 returnme=[]
5230                 basepath = self.root+VDB_PATH+"/"
5231
5232                 for x in self.categories:
5233                         for y in listdir(basepath+x,EmptyOnError=1):
5234                                 if y.startswith("."):
5235                                         continue
5236                                 subpath = x+"/"+y
5237                                 # -MERGING- should never be a cpv, nor should files.
5238                                 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5239                                         returnme += [subpath]
5240                 return returnme
5241
5242         def cp_all(self,use_cache=1):
5243                 mylist = self.cpv_all(use_cache=use_cache)
5244                 d={}
5245                 for y in mylist:
5246                         if y[0] == '*':
5247                                 y = y[1:]
5248                         mysplit=catpkgsplit(y)
5249                         if not mysplit:
5250                                 self.invalidentry(self.root+VDB_PATH+"/"+y)
5251                                 continue
5252                         d[mysplit[0]+"/"+mysplit[1]] = None
5253                 return d.keys()
5254
5255         def checkblockers(self,origdep):
5256                 pass
5257
5258         def match(self,origdep,use_cache=1):
5259                 "caching match function"
5260                 mydep = dep_expand(
5261                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5262                 mykey=dep_getkey(mydep)
5263                 mycat=mykey.split("/")[0]
5264                 if not use_cache:
5265                         if self.matchcache.has_key(mycat):
5266                                 del self.mtdircache[mycat]
5267                                 del self.matchcache[mycat]
5268                         mymatch = match_from_list(mydep,
5269                                 self.cp_list(mykey, use_cache=use_cache))
5270                         myslot = portage_dep.dep_getslot(mydep)
5271                         if myslot is not None:
5272                                 mymatch = [cpv for cpv in mymatch \
5273                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5274                         return mymatch
5275                 try:
5276                         curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5277                 except (IOError, OSError):
5278                         curmtime=0
5279
5280                 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5281                         # clear cache entry
5282                         self.mtdircache[mycat]=curmtime
5283                         self.matchcache[mycat]={}
5284                 if not self.matchcache[mycat].has_key(mydep):
5285                         mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5286                         myslot = portage_dep.dep_getslot(mydep)
5287                         if myslot is not None:
5288                                 mymatch = [cpv for cpv in mymatch \
5289                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5290                         self.matchcache[mycat][mydep]=mymatch
5291                 return self.matchcache[mycat][mydep][:]
5292
5293         def findname(self, mycpv):
5294                 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5295
5296         def flush_cache(self):
5297                 """If the current user has permission and the internal aux_get cache has
5298                 been updated, save it to disk and mark it unmodified.  This is called
5299                 by emerge after it has loaded the full vdb for use in dependency
5300                 calculations.  Currently, the cache is only written if the user has
5301                 superuser privileges (since that's required to obtain a lock), but all
5302                 users have read access and benefit from faster metadata lookups (as
5303                 long as at least part of the cache is still valid)."""
5304                 if self._aux_cache is not None and \
5305                         self._aux_cache["modified"] and \
5306                         secpass >= 2:
5307                         valid_nodes = set(self.cpv_all())
5308                         for cpv in self._aux_cache["packages"].keys():
5309                                 if cpv not in valid_nodes:
5310                                         del self._aux_cache["packages"][cpv]
5311                         del self._aux_cache["modified"]
5312                         try:
5313                                 f = atomic_ofstream(self._aux_cache_filename)
5314                                 cPickle.dump(self._aux_cache, f, -1)
5315                                 f.close()
5316                                 portage_util.apply_secpass_permissions(
5317                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
5318                         except (IOError, OSError), e:
5319                                 pass
5320                         self._aux_cache["modified"] = False
5321
5322         def aux_get(self, mycpv, wants):
5323                 """This automatically caches selected keys that are frequently needed
5324                 by emerge for dependency calculations.  The cached metadata is
5325                 considered valid if the mtime of the package directory has not changed
5326                 since the data was cached.  The cache is stored in a pickled dict
5327                 object with the following format:
5328
5329                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5330
5331                 If an error occurs while loading the cache pickle or the version is
5332                 unrecognized, the cache will simple be recreated from scratch (it is
5333                 completely disposable).
5334                 """
5335                 if not self._aux_cache_keys.intersection(wants):
5336                         return self._aux_get(mycpv, wants)
5337                 if self._aux_cache is None:
5338                         try:
5339                                 f = open(self._aux_cache_filename)
5340                                 mypickle = cPickle.Unpickler(f)
5341                                 mypickle.find_global = None
5342                                 self._aux_cache = mypickle.load()
5343                                 f.close()
5344                                 del f
5345                         except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5346                                 pass
5347                         if not self._aux_cache or \
5348                                 not isinstance(self._aux_cache, dict) or \
5349                                 self._aux_cache.get("version") != self._aux_cache_version or \
5350                                 not self._aux_cache.get("packages"):
5351                                 self._aux_cache = {"version":self._aux_cache_version}
5352                                 self._aux_cache["packages"] = {}
5353                         self._aux_cache["modified"] = False
5354                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5355                 mydir_stat = None
5356                 try:
5357                         mydir_stat = os.stat(mydir)
5358                 except OSError, e:
5359                         if e.errno != errno.ENOENT:
5360                                 raise
5361                         raise KeyError(mycpv)
5362                 mydir_mtime = long(mydir_stat.st_mtime)
5363                 pkg_data = self._aux_cache["packages"].get(mycpv)
5364                 mydata = {}
5365                 cache_valid = False
5366                 if pkg_data:
5367                         cache_mtime, metadata = pkg_data
5368                         cache_valid = cache_mtime == mydir_mtime
5369                 if cache_valid:
5370                         cache_incomplete = self._aux_cache_keys.difference(metadata)
5371                         if cache_incomplete:
5372                                 # Allow self._aux_cache_keys to change without a cache version
5373                                 # bump and efficiently recycle partial cache whenever possible.
5374                                 cache_valid = False
5375                                 pull_me = cache_incomplete.union(wants)
5376                         else:
5377                                 pull_me = set(wants).difference(self._aux_cache_keys)
5378                         mydata.update(metadata)
5379                 else:
5380                         pull_me = self._aux_cache_keys.union(wants)
5381                 if pull_me:
5382                         # pull any needed data and cache it
5383                         aux_keys = list(pull_me)
5384                         for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5385                                 mydata[k] = v
5386                         if not cache_valid:
5387                                 cache_data = {}
5388                                 for aux_key in self._aux_cache_keys:
5389                                         cache_data[aux_key] = mydata[aux_key]
5390                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5391                                 self._aux_cache["modified"] = True
5392                 return [mydata[x] for x in wants]
5393
5394         def _aux_get(self, mycpv, wants):
5395                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5396                 try:
5397                         if not stat.S_ISDIR(os.stat(mydir).st_mode):
5398                                 raise KeyError(mycpv)
5399                 except OSError, e:
5400                         if e.errno == errno.ENOENT:
5401                                 raise KeyError(mycpv)
5402                         del e
5403                         raise
5404                 results = []
5405                 for x in wants:
5406                         try:
5407                                 myf = open(os.path.join(mydir, x), "r")
5408                                 try:
5409                                         myd = myf.read()
5410                                 finally:
5411                                         myf.close()
5412                                 myd = " ".join(myd.split())
5413                         except IOError:
5414                                 myd = ""
5415                         if x == "EAPI" and not myd:
5416                                 results.append("0")
5417                         else:
5418                                 results.append(myd)
5419                 return results
5420
5421         def aux_update(self, cpv, values):
5422                 cat, pkg = cpv.split("/")
5423                 mylink = dblink(cat, pkg, self.root, self.settings,
5424                 treetype="vartree", vartree=self.vartree)
5425                 if not mylink.exists():
5426                         raise KeyError(cpv)
5427                 for k, v in values.iteritems():
5428                         mylink.setfile(k, v)
5429
5430         def counter_tick(self,myroot,mycpv=None):
5431                 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5432
5433         def get_counter_tick_core(self,myroot,mycpv=None):
5434                 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5435
5436         def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5437                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
5438                 cpath=myroot+"var/cache/edb/counter"
5439                 changed=0
5440                 min_counter = 0
5441                 if mycpv:
5442                         mysplit = pkgsplit(mycpv)
5443                         for x in self.match(mysplit[0],use_cache=0):
5444                                 if x==mycpv:
5445                                         continue
5446                                 try:
5447                                         old_counter = long(self.aux_get(x,["COUNTER"])[0])
5448                                         writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5449                                 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5450                                         old_counter = 0
5451                                         writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5452                                 if old_counter > min_counter:
5453                                         min_counter = old_counter
5454
5455                 # We write our new counter value to a new file that gets moved into
5456                 # place to avoid filesystem corruption.
5457                 find_counter = ("find '%s' -type f -name COUNTER | " + \
5458                         "while read f; do echo $(<\"${f}\"); done | " + \
5459                         "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5460                 if os.path.exists(cpath):
5461                         cfile=open(cpath, "r")
5462                         try:
5463                                 counter=long(cfile.readline())
5464                         except (ValueError,OverflowError):
5465                                 try:
5466                                         counter = long(commands.getoutput(find_counter).strip())
5467                                         writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5468                                                 noiselevel=-1)
5469                                         changed=1
5470                                 except (ValueError,OverflowError):
5471                                         writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5472                                                 noiselevel=-1)
5473                                         writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5474                                                 noiselevel=-1)
5475                                         writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5476                                         sys.exit(2)
5477                         cfile.close()
5478                 else:
5479                         try:
5480                                 counter = long(commands.getoutput(find_counter).strip())
5481                                 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5482                                         noiselevel=-1)
5483                         except ValueError: # Value Error for long(), probably others for commands.getoutput
5484                                 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5485                                 counter=long(0)
5486                         changed=1
5487
5488                 if counter < min_counter:
5489                         counter = min_counter+1000
5490                         changed = 1
5491
5492                 if incrementing or changed:
5493
5494                         #increment counter
5495                         counter += 1
5496                         # update new global counter file
5497                         write_atomic(cpath, str(counter))
5498                 return counter
5499
5500 class vartree(object):
5501         "this tree will scan a var/db/pkg database located at root (passed to init)"
5502         def __init__(self, root="/", virtual=None, clone=None, categories=None,
5503                 settings=None):
5504                 if clone:
5505                         writemsg("vartree.__init__(): deprecated " + \
5506                                 "use of clone parameter\n", noiselevel=-1)
5507                         self.root       = clone.root[:]
5508                         self.dbapi      = copy.deepcopy(clone.dbapi)
5509                         self.populated  = 1
5510                         self.settings   = config(clone=clone.settings)
5511                 else:
5512                         self.root       = root[:]
5513                         if settings is None:
5514                                 settings = globals()["settings"]
5515                         self.settings = settings # for key_expand calls
5516                         if categories is None:
5517                                 categories = settings.categories
5518                         self.dbapi = vardbapi(self.root, categories=categories,
5519                                 settings=settings, vartree=self)
5520                         self.populated  = 1
5521
5522         def zap(self,mycpv):
5523                 return
5524
5525         def inject(self,mycpv):
5526                 return
5527
5528         def get_provide(self,mycpv):
5529                 myprovides=[]
5530                 mylines = None
5531                 try:
5532                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5533                         if mylines:
5534                                 myuse = myuse.split()
5535                                 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5536                                 for myprovide in mylines:
5537                                         mys = catpkgsplit(myprovide)
5538                                         if not mys:
5539                                                 mys = myprovide.split("/")
5540                                         myprovides += [mys[0] + "/" + mys[1]]
5541                         return myprovides
5542                 except SystemExit, e:
5543                         raise
5544                 except Exception, e:
5545                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
5546                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5547                                 noiselevel=-1)
5548                         if mylines:
5549                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5550                                         noiselevel=-1)
5551                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5552                         return []
5553
5554         def get_all_provides(self):
5555                 myprovides = {}
5556                 for node in self.getallcpv():
5557                         for mykey in self.get_provide(node):
5558                                 if myprovides.has_key(mykey):
5559                                         myprovides[mykey] += [node]
5560                                 else:
5561                                         myprovides[mykey]  = [node]
5562                 return myprovides
5563
5564         def dep_bestmatch(self,mydep,use_cache=1):
5565                 "compatibility method -- all matches, not just visible ones"
5566                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5567                 mymatch = best(self.dbapi.match(
5568                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5569                         use_cache=use_cache))
5570                 if mymatch is None:
5571                         return ""
5572                 else:
5573                         return mymatch
5574
5575         def dep_match(self,mydep,use_cache=1):
5576                 "compatibility method -- we want to see all matches, not just visible ones"
5577                 #mymatch=match(mydep,self.dbapi)
5578                 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5579                 if mymatch is None:
5580                         return []
5581                 else:
5582                         return mymatch
5583
5584         def exists_specific(self,cpv):
5585                 return self.dbapi.cpv_exists(cpv)
5586
5587         def getallcpv(self):
5588                 """temporary function, probably to be renamed --- Gets a list of all
5589                 category/package-versions installed on the system."""
5590                 return self.dbapi.cpv_all()
5591
5592         def getallnodes(self):
5593                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
5594                 masked package for nodes in this nodes list."""
5595                 return self.dbapi.cp_all()
5596
5597         def exists_specific_cat(self,cpv,use_cache=1):
5598                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5599                         settings=self.settings)
5600                 a=catpkgsplit(cpv)
5601                 if not a:
5602                         return 0
5603                 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5604                 for x in mylist:
5605                         b=pkgsplit(x)
5606                         if not b:
5607                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5608                                 continue
5609                         if a[1]==b[0]:
5610                                 return 1
5611                 return 0
5612
5613         def getebuildpath(self,fullpackage):
5614                 cat,package=fullpackage.split("/")
5615                 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5616
5617         def getnode(self,mykey,use_cache=1):
5618                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5619                         settings=self.settings)
5620                 if not mykey:
5621                         return []
5622                 mysplit=mykey.split("/")
5623                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5624                 returnme=[]
5625                 for x in mydirlist:
5626                         mypsplit=pkgsplit(x)
5627                         if not mypsplit:
5628                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5629                                 continue
5630                         if mypsplit[0]==mysplit[1]:
5631                                 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5632                                 returnme.append(appendme)
5633                 return returnme
5634
5635
5636         def getslot(self,mycatpkg):
5637                 "Get a slot for a catpkg; assume it exists."
5638                 try:
5639                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5640                 except KeyError:
5641                         return ""
5642
5643         def hasnode(self,mykey,use_cache):
5644                 """Does the particular node (cat/pkg key) exist?"""
5645                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5646                         settings=self.settings)
5647                 mysplit=mykey.split("/")
5648                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5649                 for x in mydirlist:
5650                         mypsplit=pkgsplit(x)
5651                         if not mypsplit:
5652                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5653                                 continue
5654                         if mypsplit[0]==mysplit[1]:
5655                                 return 1
5656                 return 0
5657
5658         def populate(self):
5659                 self.populated=1
5660
5661 auxdbkeys=[
5662   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
5663         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
5664         'KEYWORDS',  'INHERITED', 'IUSE',      'CDEPEND',
5665         'PDEPEND',   'PROVIDE', 'EAPI',
5666         'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5667         'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5668         ]
5669 auxdbkeylen=len(auxdbkeys)
5670
5671 def close_portdbapi_caches():
5672         for i in portdbapi.portdbapi_instances:
5673                 i.close_caches()
5674
5675
5676 class portdbapi(dbapi):
5677         """this tree will scan a portage directory located at root (passed to init)"""
5678         portdbapi_instances = []
5679
5680         def __init__(self,porttree_root,mysettings=None):
5681                 portdbapi.portdbapi_instances.append(self)
5682
5683                 if mysettings:
5684                         self.mysettings = mysettings
5685                 else:
5686                         global settings
5687                         self.mysettings = config(clone=settings)
5688                 self._categories = set(self.mysettings.categories)
5689                 # This is strictly for use in aux_get() doebuild calls when metadata
5690                 # is generated by the depend phase.  It's safest to use a clone for
5691                 # this purpose because doebuild makes many changes to the config
5692                 # instance that is passed in.
5693                 self.doebuild_settings = config(clone=self.mysettings)
5694
5695                 self.manifestVerifyLevel  = None
5696                 self.manifestVerifier     = None
5697                 self.manifestCache        = {}    # {location: [stat, md5]}
5698                 self.manifestMissingCache = []
5699
5700                 if "gpg" in self.mysettings.features:
5701                         self.manifestVerifyLevel   = portage_gpg.EXISTS
5702                         if "strict" in self.mysettings.features:
5703                                 self.manifestVerifyLevel = portage_gpg.MARGINAL
5704                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5705                         elif "severe" in self.mysettings.features:
5706                                 self.manifestVerifyLevel = portage_gpg.TRUSTED
5707                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5708                         else:
5709                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5710
5711                 #self.root=settings["PORTDIR"]
5712                 self.porttree_root = os.path.realpath(porttree_root)
5713
5714                 self.depcachedir = self.mysettings.depcachedir[:]
5715
5716                 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5717                 if self.tmpfs and not os.path.exists(self.tmpfs):
5718                         self.tmpfs = None
5719                 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5720                         self.tmpfs = None
5721                 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5722                         self.tmpfs = None
5723
5724                 self.eclassdb = eclass_cache.cache(self.porttree_root,
5725                         overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5726
5727                 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5728
5729                 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5730                 self.xcache={}
5731                 self.frozen=0
5732
5733                 self.porttrees = [self.porttree_root] + \
5734                         [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5735                 self.auxdbmodule  = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5736                 self.auxdb        = {}
5737                 self._init_cache_dirs()
5738                 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5739                 # ~harring
5740                 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5741                 if secpass < 1:
5742                         from cache import metadata_overlay, volatile
5743                         for x in self.porttrees:
5744                                 db_ro = self.auxdbmodule(self.depcachedir, x,
5745                                         filtered_auxdbkeys, gid=portage_gid, readonly=True)
5746                                 self.auxdb[x] = metadata_overlay.database(
5747                                         self.depcachedir, x, filtered_auxdbkeys,
5748                                         gid=portage_gid, db_rw=volatile.database,
5749                                         db_ro=db_ro)
5750                 else:
5751                         for x in self.porttrees:
5752                                 # location, label, auxdbkeys
5753                                 self.auxdb[x] = self.auxdbmodule(
5754                                         self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5755                 # Selectively cache metadata in order to optimize dep matching.
5756                 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5757                 self._aux_cache = {}
5758                 self._broken_ebuilds = set()
5759
5760         def _init_cache_dirs(self):
5761                 """Create /var/cache/edb/dep and adjust permissions for the portage
5762                 group."""
5763
5764                 dirmode  = 02070
5765                 filemode =   060
5766                 modemask =    02
5767
5768                 try:
5769                         for mydir in (self.depcachedir,):
5770                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5771                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5772                                                 noiselevel=-1)
5773                                         def onerror(e):
5774                                                 raise # bail out on the first error that occurs during recursion
5775                                         if not apply_recursive_permissions(mydir,
5776                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5777                                                 filemode=filemode, filemask=modemask, onerror=onerror):
5778                                                 raise portage_exception.OperationNotPermitted(
5779                                                         "Failed to apply recursive permissions for the portage group.")
5780                 except portage_exception.PortageException, e:
5781                         pass
5782
5783         def close_caches(self):
5784                 for x in self.auxdb.keys():
5785                         self.auxdb[x].sync()
5786                 self.auxdb.clear()
5787
5788         def flush_cache(self):
5789                 for x in self.auxdb.values():
5790                         x.sync()
5791
5792         def finddigest(self,mycpv):
5793                 try:
5794                         mydig   = self.findname2(mycpv)[0]
5795                         if not mydig:
5796                                 return ""
5797                         mydigs  = mydig.split("/")[:-1]
5798                         mydig   = "/".join(mydigs)
5799                         mysplit = mycpv.split("/")
5800                 except OSError:
5801                         return ""
5802                 return mydig+"/files/digest-"+mysplit[-1]
5803
5804         def findname(self,mycpv):
5805                 return self.findname2(mycpv)[0]
5806
5807         def findname2(self, mycpv, mytree=None):
5808                 """ 
5809                 Returns the location of the CPV, and what overlay it was in.
5810                 Searches overlays first, then PORTDIR; this allows us to return the first
5811                 matching file.  As opposed to starting in portdir and then doing overlays
5812                 second, we would have to exhaustively search the overlays until we found
5813                 the file we wanted.
5814                 """
5815                 if not mycpv:
5816                         return "",0
5817                 mysplit=mycpv.split("/")
5818                 psplit=pkgsplit(mysplit[1])
5819
5820                 if mytree:
5821                         mytrees = [mytree]
5822                 else:
5823                         mytrees = self.porttrees[:]
5824                         mytrees.reverse()
5825                 if psplit:
5826                         for x in mytrees:
5827                                 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5828                                 if os.access(file, os.R_OK):
5829                                         return[file, x]
5830                 return None, 0
5831
5832         def aux_get(self, mycpv, mylist, mytree=None):
5833                 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5834                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5835                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5836                 cache_me = False
5837                 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5838                         aux_cache = self._aux_cache.get(mycpv)
5839                         if aux_cache is not None:
5840                                 return [aux_cache[x] for x in mylist]
5841                         cache_me = True
5842                 global auxdbkeys,auxdbkeylen
5843                 cat,pkg = mycpv.split("/", 1)
5844
5845                 myebuild, mylocation = self.findname2(mycpv, mytree)
5846
5847                 if not myebuild:
5848                         writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5849                                 noiselevel=1)
5850                         writemsg("!!!            %s\n" % myebuild, noiselevel=1)
5851                         raise KeyError(mycpv)
5852
5853                 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5854                 if "gpg" in self.mysettings.features:
5855                         try:
5856                                 mys = portage_gpg.fileStats(myManifestPath)
5857                                 if (myManifestPath in self.manifestCache) and \
5858                                    (self.manifestCache[myManifestPath] == mys):
5859                                         pass
5860                                 elif self.manifestVerifier:
5861                                         if not self.manifestVerifier.verify(myManifestPath):
5862                                                 # Verification failed the desired level.
5863                                                 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5864
5865                                 if ("severe" in self.mysettings.features) and \
5866                                    (mys != portage_gpg.fileStats(myManifestPath)):
5867                                         raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5868
5869                         except portage_exception.InvalidSignature, e:
5870                                 if ("strict" in self.mysettings.features) or \
5871                                    ("severe" in self.mysettings.features):
5872                                         raise
5873                                 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5874                         except portage_exception.MissingSignature, e:
5875                                 if ("severe" in self.mysettings.features):
5876                                         raise
5877                                 if ("strict" in self.mysettings.features):
5878                                         if myManifestPath not in self.manifestMissingCache:
5879                                                 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5880                                                 self.manifestMissingCache.insert(0,myManifestPath)
5881                         except (OSError,portage_exception.FileNotFound), e:
5882                                 if ("strict" in self.mysettings.features) or \
5883                                    ("severe" in self.mysettings.features):
5884                                         raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5885                                 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5886                                         noiselevel=-1)
5887
5888
5889                 if os.access(myebuild, os.R_OK):
5890                         emtime=os.stat(myebuild)[stat.ST_MTIME]
5891                 else:
5892                         writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5893                                 noiselevel=-1)
5894                         writemsg("!!!            %s\n" % myebuild,
5895                                 noiselevel=-1)
5896                         raise KeyError
5897
5898                 try:
5899                         mydata = self.auxdb[mylocation][mycpv]
5900                         if emtime != long(mydata.get("_mtime_", 0)):
5901                                 doregen = True
5902                         elif len(mydata.get("_eclasses_", [])) > 0:
5903                                 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5904                         else:
5905                                 doregen = False
5906                                 
5907                 except KeyError:
5908                         doregen = True
5909                 except CacheError:
5910                         doregen = True
5911                         try:                            del self.auxdb[mylocation][mycpv]
5912                         except KeyError:        pass
5913
5914                 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5915
5916                 if doregen:
5917                         if myebuild in self._broken_ebuilds:
5918                                 raise KeyError(mycpv)
5919                         writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5920                         writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5921
5922                         self.doebuild_settings.reset()
5923                         mydata = {}
5924                         myret = doebuild(myebuild, "depend",
5925                                 self.doebuild_settings["ROOT"], self.doebuild_settings,
5926                                 dbkey=mydata, tree="porttree", mydbapi=self)
5927                         if myret != os.EX_OK:
5928                                 self._broken_ebuilds.add(myebuild)
5929                                 raise KeyError(mycpv)
5930
5931                         if "EAPI" not in mydata or not mydata["EAPI"].strip():
5932                                 mydata["EAPI"] = "0"
5933
5934                         if not eapi_is_supported(mydata["EAPI"]):
5935                                 # if newer version, wipe everything and negate eapi
5936                                 eapi = mydata["EAPI"]
5937                                 mydata = {}
5938                                 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5939                                 mydata["EAPI"] = "-"+eapi
5940
5941                         if mydata.get("INHERITED", False):
5942                                 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5943                         else:
5944                                 mydata["_eclasses_"] = {}
5945                         
5946                         del mydata["INHERITED"]
5947
5948                         mydata["_mtime_"] = emtime
5949
5950                         self.auxdb[mylocation][mycpv] = mydata
5951
5952                 if not mydata.setdefault("EAPI", "0"):
5953                         mydata["EAPI"] = "0"
5954
5955                 #finally, we look at our internal cache entry and return the requested data.
5956                 returnme = []
5957                 for x in mylist:
5958                         if x == "INHERITED":
5959                                 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5960                         else:
5961                                 returnme.append(mydata.get(x,""))
5962
5963                 if cache_me:
5964                         aux_cache = {}
5965                         for x in self._aux_cache_keys:
5966                                 aux_cache[x] = mydata.get(x, "")
5967                         self._aux_cache[mycpv] = aux_cache
5968
5969                 return returnme
5970
5971         def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5972                 if mysettings is None:
5973                         mysettings = self.mysettings
5974                 try:
5975                         myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5976                 except KeyError:
5977                         # Convert this to an InvalidDependString exception since callers
5978                         # already handle it.
5979                         raise portage_exception.InvalidDependString(
5980                                 "getfetchlist(): aux_get() error reading "+mypkg+"; aborting.")
5981
5982                 if useflags is None:
5983                         useflags = mysettings["USE"].split()
5984
5985                 myurilist = portage_dep.paren_reduce(myuris)
5986                 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5987                 newuris = flatten(myurilist)
5988
5989                 myfiles = []
5990                 for x in newuris:
5991                         mya = os.path.basename(x)
5992                         if not mya:
5993                                 raise portage_exception.InvalidDependString("URI has no basename: '%s'" % x)
5994                         if not mya in myfiles:
5995                                 myfiles.append(mya)
5996                 return [newuris, myfiles]
5997
5998         def getfetchsizes(self,mypkg,useflags=None,debug=0):
5999                 # returns a filename:size dictionnary of remaining downloads
6000                 myebuild = self.findname(mypkg)
6001                 pkgdir = os.path.dirname(myebuild)
6002                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
6003                 checksums = mf.getDigests()
6004                 if not checksums:
6005                         if debug: print "[empty/missing/bad digest]: "+mypkg
6006                         return None
6007                 filesdict={}
6008                 if useflags is None:
6009                         myuris, myfiles = self.getfetchlist(mypkg,all=1)
6010                 else:
6011                         myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
6012                 #XXX: maybe this should be improved: take partial downloads
6013                 # into account? check checksums?
6014                 for myfile in myfiles:
6015                         if myfile not in checksums:
6016                                 if debug:
6017                                         writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
6018                                 continue
6019                         file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
6020                         mystat = None
6021                         try:
6022                                 mystat = os.stat(file_path)
6023                         except OSError, e:
6024                                 pass
6025                         if mystat is None:
6026                                 existing_size = 0
6027                         else:
6028                                 existing_size = mystat.st_size
6029                         remaining_size = int(checksums[myfile]["size"]) - existing_size
6030                         if remaining_size > 0:
6031                                 # Assume the download is resumable.
6032                                 filesdict[myfile] = remaining_size
6033                         elif remaining_size < 0:
6034                                 # The existing file is too large and therefore corrupt.
6035                                 filesdict[myfile] = int(checksums[myfile]["size"])
6036                 return filesdict
6037
6038         def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
6039                 if not useflags:
6040                         if mysettings:
6041                                 useflags = mysettings["USE"].split()
6042                 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
6043                 myebuild = self.findname(mypkg)
6044                 pkgdir = os.path.dirname(myebuild)
6045                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
6046                 mysums = mf.getDigests()
6047
6048                 failures = {}
6049                 for x in myfiles:
6050                         if not mysums or x not in mysums:
6051                                 ok     = False
6052                                 reason = "digest missing"
6053                         else:
6054                                 try:
6055                                         ok, reason = portage_checksum.verify_all(
6056                                                 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
6057                                 except portage_exception.FileNotFound, e:
6058                                         ok = False
6059                                         reason = "File Not Found: '%s'" % str(e)
6060                         if not ok:
6061                                 failures[x] = reason
6062                 if failures:
6063                         return False
6064                 return True
6065
6066         def cpv_exists(self,mykey):
6067                 "Tells us whether an actual ebuild exists on disk (no masking)"
6068                 cps2=mykey.split("/")
6069                 cps=catpkgsplit(mykey,silent=0)
6070                 if not cps:
6071                         #invalid cat/pkg-v
6072                         return 0
6073                 if self.findname(cps[0]+"/"+cps2[1]):
6074                         return 1
6075                 else:
6076                         return 0
6077
6078         def cp_all(self):
6079                 "returns a list of all keys in our tree"
6080                 d={}
6081                 for x in self.mysettings.categories:
6082                         for oroot in self.porttrees:
6083                                 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
6084                                         d[x+"/"+y] = None
6085                 l = d.keys()
6086                 l.sort()
6087                 return l
6088
6089         def p_list(self,mycp):
6090                 d={}
6091                 for oroot in self.porttrees:
6092                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6093                                 if x[-7:]==".ebuild":
6094                                         d[x[:-7]] = None
6095                 return d.keys()
6096
6097         def cp_list(self, mycp, use_cache=1, mytree=None):
6098                 mysplit=mycp.split("/")
6099                 invalid_category = mysplit[0] not in self._categories
6100                 d={}
6101                 if mytree:
6102                         mytrees = [mytree]
6103                 else:
6104                         mytrees = self.porttrees
6105                 for oroot in mytrees:
6106                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6107                                 if x.endswith(".ebuild"):
6108                                         pf = x[:-7]
6109                                         ps = pkgsplit(pf)
6110                                         if not ps:
6111                                                 writemsg("\nInvalid ebuild name: %s\n" % \
6112                                                         os.path.join(oroot, mycp, x), noiselevel=-1)
6113                                                 continue
6114                                         d[mysplit[0]+"/"+pf] = None
6115                 if invalid_category and d:
6116                         writemsg(("\n!!! '%s' has a category that is not listed in " + \
6117                                 "/etc/portage/categories\n") % mycp, noiselevel=-1)
6118                         return []
6119                 return d.keys()
6120
6121         def freeze(self):
6122                 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
6123                         self.xcache[x]={}
6124                 self.frozen=1
6125
6126         def melt(self):
6127                 self.xcache={}
6128                 self.frozen=0
6129
6130         def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
6131                 "caching match function; very trick stuff"
6132                 #if no updates are being made to the tree, we can consult our xcache...
6133                 if self.frozen:
6134                         try:
6135                                 return self.xcache[level][origdep][:]
6136                         except KeyError:
6137                                 pass
6138
6139                 if not mydep:
6140                         #this stuff only runs on first call of xmatch()
6141                         #create mydep, mykey from origdep
6142                         mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
6143                         mykey=dep_getkey(mydep)
6144
6145                 if level=="list-visible":
6146                         #a list of all visible packages, not called directly (just by xmatch())
6147                         #myval=self.visible(self.cp_list(mykey))
6148                         myval=self.gvisible(self.visible(self.cp_list(mykey)))
6149                 elif level=="bestmatch-visible":
6150                         #dep match -- best match of all visible packages
6151                         myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
6152                         #get all visible matches (from xmatch()), then choose the best one
6153                 elif level=="bestmatch-list":
6154                         #dep match -- find best match but restrict search to sublist
6155                         myval=best(match_from_list(mydep,mylist))
6156                         #no point is calling xmatch again since we're not caching list deps
6157                 elif level=="match-list":
6158                         #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
6159                         myval=match_from_list(mydep,mylist)
6160                 elif level=="match-visible":
6161                         #dep match -- find all visible matches
6162                         myval = match_from_list(mydep,
6163                                 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
6164                         #get all visible packages, then get the matching ones
6165                 elif level=="match-all":
6166                         #match *all* visible *and* masked packages
6167                         myval=match_from_list(mydep,self.cp_list(mykey))
6168                 else:
6169                         print "ERROR: xmatch doesn't handle",level,"query!"
6170                         raise KeyError
6171                 myslot = portage_dep.dep_getslot(mydep)
6172                 if myslot is not None:
6173                         slotmatches = []
6174                         for cpv in myval:
6175                                 try:
6176                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot:
6177                                                 slotmatches.append(cpv)
6178                                 except KeyError:
6179                                         pass # ebuild masked by corruption
6180                         myval = slotmatches
6181                 if self.frozen and (level not in ["match-list","bestmatch-list"]):
6182                         self.xcache[level][mydep]=myval
6183                         if origdep and origdep != mydep:
6184                                 self.xcache[level][origdep] = myval
6185                 return myval[:]
6186
6187         def match(self,mydep,use_cache=1):
6188                 return self.xmatch("match-visible",mydep)
6189
6190         def visible(self,mylist):
6191                 """two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
6192                 packages file to remove invisible entries, returning remaining items.  This function assumes
6193                 that all entries in mylist have the same category and package name."""
6194                 if (mylist is None) or (len(mylist)==0):
6195                         return []
6196                 newlist=mylist[:]
6197                 #first, we mask out packages in the package.mask file
6198                 mykey=newlist[0]
6199                 cpv=catpkgsplit(mykey)
6200                 if not cpv:
6201                         #invalid cat/pkg-v
6202                         print "visible(): invalid cat/pkg-v:",mykey
6203                         return []
6204                 mycp=cpv[0]+"/"+cpv[1]
6205                 maskdict=self.mysettings.pmaskdict
6206                 unmaskdict=self.mysettings.punmaskdict
6207                 if maskdict.has_key(mycp):
6208                         for x in maskdict[mycp]:
6209                                 mymatches=self.xmatch("match-all",x)
6210                                 if mymatches is None:
6211                                         #error in package.mask file; print warning and continue:
6212                                         print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
6213                                         continue
6214                                 for y in mymatches:
6215                                         unmask=0
6216                                         if unmaskdict.has_key(mycp):
6217                                                 for z in unmaskdict[mycp]:
6218                                                         mymatches_unmask=self.xmatch("match-all",z)
6219                                                         if y in mymatches_unmask:
6220                                                                 unmask=1
6221                                                                 break
6222                                         if unmask==0:
6223                                                 try:
6224                                                         newlist.remove(y)
6225                                                 except ValueError:
6226                                                         pass
6227
6228                 revmaskdict=self.mysettings.prevmaskdict
6229                 if revmaskdict.has_key(mycp):
6230                         for x in revmaskdict[mycp]:
6231                                 #important: only match against the still-unmasked entries...
6232                                 #notice how we pass "newlist" to the xmatch() call below....
6233                                 #Without this, ~ deps in the packages files are broken.
6234                                 mymatches=self.xmatch("match-list",x,mylist=newlist)
6235                                 if mymatches is None:
6236                                         #error in packages file; print warning and continue:
6237                                         print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6238                                         continue
6239                                 pos=0
6240                                 while pos<len(newlist):
6241                                         if newlist[pos] not in mymatches:
6242                                                 del newlist[pos]
6243                                         else:
6244                                                 pos += 1
6245                 return newlist
6246
6247         def gvisible(self,mylist):
6248                 "strip out group-masked (not in current group) entries"
6249
6250                 if mylist is None:
6251                         return []
6252                 newlist=[]
6253
6254                 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6255                 pkgdict = self.mysettings.pkeywordsdict
6256                 for mycpv in mylist:
6257                         try:
6258                                 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6259                         except KeyError:
6260                                 continue
6261                         except portage_exception.PortageException, e:
6262                                 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6263                                         mycpv, noiselevel=-1)
6264                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6265                                 del e
6266                                 continue
6267                         mygroups=keys.split()
6268                         # Repoman may modify this attribute as necessary.
6269                         pgroups = accept_keywords[:]
6270                         match=0
6271                         cp = dep_getkey(mycpv)
6272                         if pkgdict.has_key(cp):
6273                                 matches = match_to_list(mycpv, pkgdict[cp].keys())
6274                                 for atom in matches:
6275                                         pgroups.extend(pkgdict[cp][atom])
6276                                 if matches:
6277                                         inc_pgroups = []
6278                                         for x in pgroups:
6279                                                 # The -* special case should be removed once the tree 
6280                                                 # is clean of KEYWORDS=-* crap
6281                                                 if x != "-*" and x.startswith("-"):
6282                                                         try:
6283                                                                 inc_pgroups.remove(x[1:])
6284                                                         except ValueError:
6285                                                                 pass
6286                                                 if x not in inc_pgroups:
6287                                                         inc_pgroups.append(x)
6288                                         pgroups = inc_pgroups
6289                                         del inc_pgroups
6290                         hasstable = False
6291                         hastesting = False
6292                         for gp in mygroups:
6293                                 if gp=="*":
6294                                         writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6295                                                 noiselevel=-1)
6296                                         match=1
6297                                         break
6298                                 elif gp in pgroups:
6299                                         match=1
6300                                         break
6301                                 elif gp[0] == "~":
6302                                         hastesting = True
6303                                 elif gp[0] != "-":
6304                                         hasstable = True
6305                         if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
6306                                 match=1
6307                         if match and eapi_is_supported(eapi):
6308                                 newlist.append(mycpv)
6309                 return newlist
6310
6311 class binarytree(object):
6312         "this tree scans for a list of all packages available in PKGDIR"
6313         def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6314                 if clone:
6315                         writemsg("binarytree.__init__(): deprecated " + \
6316                                 "use of clone parameter\n", noiselevel=-1)
6317                         # XXX This isn't cloning. It's an instance of the same thing.
6318                         self.root=clone.root
6319                         self.pkgdir=clone.pkgdir
6320                         self.dbapi=clone.dbapi
6321                         self.populated=clone.populated
6322                         self.tree=clone.tree
6323                         self.remotepkgs=clone.remotepkgs
6324                         self.invalids=clone.invalids
6325                         self.settings = clone.settings
6326                 else:
6327                         self.root=root
6328                         #self.pkgdir=settings["PKGDIR"]
6329                         self.pkgdir = normalize_path(pkgdir)
6330                         self.dbapi = bindbapi(self, settings=settings)
6331                         self.populated=0
6332                         self.tree={}
6333                         self.remotepkgs={}
6334                         self.invalids=[]
6335                         self.settings = settings
6336                         self._pkg_paths = {}
6337
6338         def move_ent(self,mylist):
6339                 if not self.populated:
6340                         self.populate()
6341                 origcp=mylist[1]
6342                 newcp=mylist[2]
6343                 # sanity check
6344                 for cp in [origcp,newcp]:
6345                         if not (isvalidatom(cp) and isjustname(cp)):
6346                                 raise portage_exception.InvalidPackageName(cp)
6347                 origcat = origcp.split("/")[0]
6348                 mynewcat=newcp.split("/")[0]
6349                 origmatches=self.dbapi.cp_list(origcp)
6350                 if not origmatches:
6351                         return
6352                 for mycpv in origmatches:
6353
6354                         mycpsplit=catpkgsplit(mycpv)
6355                         mynewcpv=newcp+"-"+mycpsplit[2]
6356                         if mycpsplit[3]!="r0":
6357                                 mynewcpv += "-"+mycpsplit[3]
6358                         myoldpkg=mycpv.split("/")[1]
6359                         mynewpkg=mynewcpv.split("/")[1]
6360
6361                         if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6362                                 writemsg("!!! Cannot update binary: Destination exists.\n",
6363                                         noiselevel=-1)
6364                                 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6365                                 continue
6366
6367                         tbz2path=self.getname(mycpv)
6368                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6369                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6370                                         noiselevel=-1)
6371                                 continue
6372
6373                         #print ">>> Updating data in:",mycpv
6374                         writemsg_stdout("%")
6375                         mytbz2 = xpak.tbz2(tbz2path)
6376                         mydata = mytbz2.get_data()
6377                         updated_items = update_dbentries([mylist], mydata)
6378                         mydata.update(updated_items)
6379                         mydata["CATEGORY"] = mynewcat+"\n"
6380                         if mynewpkg != myoldpkg:
6381                                 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6382                                 del mydata[myoldpkg+".ebuild"]
6383                                 mydata["PF"] = mynewpkg + "\n"
6384                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6385
6386                         self.dbapi.cpv_remove(mycpv)
6387                         del self._pkg_paths[mycpv]
6388                         new_path = self.getname(mynewcpv)
6389                         self._pkg_paths[mynewcpv] = os.path.join(
6390                                 *new_path.split(os.path.sep)[-2:])
6391                         if new_path != mytbz2:
6392                                 try:
6393                                         os.makedirs(os.path.dirname(new_path))
6394                                 except OSError, e:
6395                                         if e.errno != errno.EEXIST:
6396                                                 raise
6397                                         del e
6398                                 os.rename(tbz2path, new_path)
6399                                 self._remove_symlink(mycpv)
6400                                 if new_path.split(os.path.sep)[-2] == "All":
6401                                         self._create_symlink(mynewcpv)
6402                         self.dbapi.cpv_inject(mynewcpv)
6403
6404                 return 1
6405
6406         def _remove_symlink(self, cpv):
6407                 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6408                 the ${PKGDIR}/${CATEGORY} directory if empty.  The file will not be
6409                 removed if os.path.islink() returns False."""
6410                 mycat, mypkg = catsplit(cpv)
6411                 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6412                 if os.path.islink(mylink):
6413                         """Only remove it if it's really a link so that this method never
6414                         removes a real package that was placed here to avoid a collision."""
6415                         os.unlink(mylink)
6416                 try:
6417                         os.rmdir(os.path.join(self.pkgdir, mycat))
6418                 except OSError, e:
6419                         if e.errno not in (errno.ENOENT,
6420                                 errno.ENOTEMPTY, errno.EEXIST):
6421                                 raise
6422                         del e
6423
6424         def _create_symlink(self, cpv):
6425                 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6426                 ${PKGDIR}/${CATEGORY} directory, if necessary).  Any file that may
6427                 exist in the location of the symlink will first be removed."""
6428                 mycat, mypkg = catsplit(cpv)
6429                 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6430                 try:
6431                         os.makedirs(os.path.dirname(full_path))
6432                 except OSError, e:
6433                         if e.errno != errno.EEXIST:
6434                                 raise
6435                         del e
6436                 try:
6437                         os.unlink(full_path)
6438                 except OSError, e:
6439                         if e.errno != errno.ENOENT:
6440                                 raise
6441                         del e
6442                 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6443
6444         def move_slot_ent(self, mylist):
6445                 if not self.populated:
6446                         self.populate()
6447                 pkg=mylist[1]
6448                 origslot=mylist[2]
6449                 newslot=mylist[3]
6450                 
6451                 if not isvalidatom(pkg):
6452                         raise portage_exception.InvalidAtom(pkg)
6453                 
6454                 origmatches=self.dbapi.match(pkg)
6455                 if not origmatches:
6456                         return
6457                 for mycpv in origmatches:
6458                         mycpsplit=catpkgsplit(mycpv)
6459                         myoldpkg=mycpv.split("/")[1]
6460                         tbz2path=self.getname(mycpv)
6461                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6462                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6463                                         noiselevel=-1)
6464                                 continue
6465
6466                         #print ">>> Updating data in:",mycpv
6467                         mytbz2 = xpak.tbz2(tbz2path)
6468                         mydata = mytbz2.get_data()
6469
6470                         slot = mydata["SLOT"]
6471                         if (not slot):
6472                                 continue
6473
6474                         if (slot[0]!=origslot):
6475                                 continue
6476
6477                         writemsg_stdout("S")
6478                         mydata["SLOT"] = newslot+"\n"
6479                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6480                 return 1
6481
6482         def update_ents(self, update_iter):
6483                 if len(update_iter) == 0:
6484                         return
6485                 if not self.populated:
6486                         self.populate()
6487
6488                 for mycpv in self.dbapi.cp_all():
6489                         tbz2path=self.getname(mycpv)
6490                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6491                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6492                                         noiselevel=-1)
6493                                 continue
6494                         #print ">>> Updating binary data:",mycpv
6495                         writemsg_stdout("*")
6496                         mytbz2 = xpak.tbz2(tbz2path)
6497                         mydata = mytbz2.get_data()
6498                         updated_items = update_dbentries(update_iter, mydata)
6499                         if len(updated_items) > 0:
6500                                 mydata.update(updated_items)
6501                                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6502                 return 1
6503
6504         def prevent_collision(self, cpv):
6505                 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6506                 use for a given cpv.  If a collision will occur with an existing
6507                 package from another category, the existing package will be bumped to
6508                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6509                 if not self.populated:
6510                         # Try to avoid the population routine when possible, so that
6511                         # FEATURES=buildpkg doesn't always force population.
6512                         mycat, mypkg = catsplit(cpv)
6513                         myfile = mypkg + ".tbz2"
6514                         full_path = os.path.join(self.pkgdir, "All", myfile)
6515                         if not os.path.exists(full_path):
6516                                 return
6517                         tbz2_cat = xpak.tbz2(full_path).getfile("CATEGORY")
6518                         if tbz2_cat and tbz2_cat.strip() == mycat:
6519                                 return
6520                 full_path = self.getname(cpv)
6521                 if "All" == full_path.split(os.path.sep)[-2]:
6522                         return
6523                 """Move a colliding package if it exists.  Code below this point only
6524                 executes in rare cases."""
6525                 mycat, mypkg = catsplit(cpv)
6526                 myfile = mypkg + ".tbz2"
6527                 mypath = os.path.join("All", myfile)
6528                 dest_path = os.path.join(self.pkgdir, mypath)
6529                 if os.path.exists(dest_path):
6530                         # For invalid packages, other_cat could be None.
6531                         other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6532                         if other_cat:
6533                                 other_cat = other_cat.strip()
6534                                 self._move_from_all(other_cat + "/" + mypkg)
6535                 """The file may or may not exist. Move it if necessary and update
6536                 internal state for future calls to getname()."""
6537                 self._move_to_all(cpv)
6538
6539         def _move_to_all(self, cpv):
6540                 """If the file exists, move it.  Whether or not it exists, update state
6541                 for future getname() calls."""
6542                 mycat , mypkg = catsplit(cpv)
6543                 myfile = mypkg + ".tbz2"
6544                 src_path = os.path.join(self.pkgdir, mycat, myfile)
6545                 try:
6546                         mystat = os.lstat(src_path)
6547                 except OSError, e:
6548                         mystat = None
6549                 if mystat and stat.S_ISREG(mystat.st_mode):
6550                         try:
6551                                 os.makedirs(os.path.join(self.pkgdir, "All"))
6552                         except OSError, e:
6553                                 if e.errno != errno.EEXIST:
6554                                         raise
6555                                 del e
6556                         os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6557                         self._create_symlink(cpv)
6558                 self._pkg_paths[cpv] = os.path.join("All", myfile)
6559
6560         def _move_from_all(self, cpv):
6561                 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6562                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6563                 self._remove_symlink(cpv)
6564                 mycat , mypkg = catsplit(cpv)
6565                 myfile = mypkg + ".tbz2"
6566                 mypath = os.path.join(mycat, myfile)
6567                 dest_path = os.path.join(self.pkgdir, mypath)
6568                 try:
6569                         os.makedirs(os.path.dirname(dest_path))
6570                 except OSError, e:
6571                         if e.errno != errno.EEXIST:
6572                                 raise
6573                         del e
6574                 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6575                 self._pkg_paths[cpv] = mypath
6576
6577         def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6578                 "populates the binarytree"
6579                 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6580                         return 0
6581
6582                 categories = set(self.settings.categories)
6583
6584                 if not getbinpkgsonly:
6585                         pkg_paths = {}
6586                         dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6587                         if "All" in dirs:
6588                                 dirs.remove("All")
6589                         dirs.sort()
6590                         dirs.insert(0, "All")
6591                         for mydir in dirs:
6592                                 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6593                                         if not myfile.endswith(".tbz2"):
6594                                                 continue
6595                                         mypath = os.path.join(mydir, myfile)
6596                                         full_path = os.path.join(self.pkgdir, mypath)
6597                                         if os.path.islink(full_path):
6598                                                 continue
6599                                         mytbz2 = xpak.tbz2(full_path)
6600                                         # For invalid packages, mycat could be None.
6601                                         mycat = mytbz2.getfile("CATEGORY")
6602                                         mypf = mytbz2.getfile("PF")
6603                                         mypkg = myfile[:-5]
6604                                         if not mycat or not mypf:
6605                                                 #old-style or corrupt package
6606                                                 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6607                                                         noiselevel=-1)
6608                                                 writemsg("!!! This binary package is not " + \
6609                                                         "recoverable and should be deleted.\n",
6610                                                         noiselevel=-1)
6611                                                 self.invalids.append(mypkg)
6612                                                 continue
6613                                         mycat = mycat.strip()
6614                                         if mycat != mydir and mydir != "All":
6615                                                 continue
6616                                         if mypkg != mypf.strip():
6617                                                 continue
6618                                         mycpv = mycat + "/" + mypkg
6619                                         if mycpv in pkg_paths:
6620                                                 # All is first, so it's preferred.
6621                                                 continue
6622                                         if mycat not in categories:
6623                                                 writemsg(("!!! Binary package has an " + \
6624                                                         "unrecognized category: '%s'\n") % full_path,
6625                                                         noiselevel=-1)
6626                                                 writemsg(("!!! '%s' has a category that is not" + \
6627                                                         " listed in /etc/portage/categories\n") % mycpv,
6628                                                         noiselevel=-1)
6629                                                 continue
6630                                         pkg_paths[mycpv] = mypath
6631                                         self.dbapi.cpv_inject(mycpv)
6632                         self._pkg_paths = pkg_paths
6633
6634                 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6635                         writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6636                                 noiselevel=-1)
6637
6638                 if getbinpkgs and \
6639                         self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6640                         try:
6641                                 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6642                                 if chunk_size < 8:
6643                                         chunk_size = 8
6644                         except (ValueError, KeyError):
6645                                 chunk_size = 3000
6646
6647                         writemsg(green("Fetching binary packages info...\n"))
6648                         self.remotepkgs = getbinpkg.dir_get_metadata(
6649                                 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6650                         writemsg(green("  -- DONE!\n\n"))
6651
6652                         for mypkg in self.remotepkgs.keys():
6653                                 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6654                                         #old-style or corrupt package
6655                                         writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6656                                                 noiselevel=-1)
6657                                         del self.remotepkgs[mypkg]
6658                                         continue
6659                                 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6660                                 fullpkg=mycat+"/"+mypkg[:-5]
6661                                 if mycat not in categories:
6662                                         writemsg(("!!! Remote binary package has an " + \
6663                                                 "unrecognized category: '%s'\n") % fullpkg,
6664                                                 noiselevel=-1)
6665                                         writemsg(("!!! '%s' has a category that is not" + \
6666                                                 " listed in /etc/portage/categories\n") % fullpkg,
6667                                                 noiselevel=-1)
6668                                         continue
6669                                 mykey=dep_getkey(fullpkg)
6670                                 try:
6671                                         # invalid tbz2's can hurt things.
6672                                         #print "cpv_inject("+str(fullpkg)+")"
6673                                         self.dbapi.cpv_inject(fullpkg)
6674                                         #print "  -- Injected"
6675                                 except SystemExit, e:
6676                                         raise
6677                                 except:
6678                                         writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6679                                                 noiselevel=-1)
6680                                         del self.remotepkgs[mypkg]
6681                                         continue
6682                 self.populated=1
6683
6684         def inject(self,cpv):
6685                 return self.dbapi.cpv_inject(cpv)
6686
6687         def exists_specific(self,cpv):
6688                 if not self.populated:
6689                         self.populate()
6690                 return self.dbapi.match(
6691                         dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6692
6693         def dep_bestmatch(self,mydep):
6694                 "compatibility method -- all matches, not just visible ones"
6695                 if not self.populated:
6696                         self.populate()
6697                 writemsg("\n\n", 1)
6698                 writemsg("mydep: %s\n" % mydep, 1)
6699                 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6700                 writemsg("mydep: %s\n" % mydep, 1)
6701                 mykey=dep_getkey(mydep)
6702                 writemsg("mykey: %s\n" % mykey, 1)
6703                 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6704                 writemsg("mymatch: %s\n" % mymatch, 1)
6705                 if mymatch is None:
6706                         return ""
6707                 return mymatch
6708
6709         def getname(self,pkgname):
6710                 """Returns a file location for this package.  The default location is
6711                 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6712                 in the rare event of a collision.  The prevent_collision() method can
6713                 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6714                 specific cpv."""
6715                 if not self.populated:
6716                         self.populate()
6717                 mycpv = pkgname
6718                 mypath = self._pkg_paths.get(mycpv, None)
6719                 if mypath:
6720                         return os.path.join(self.pkgdir, mypath)
6721                 mycat, mypkg = catsplit(mycpv)
6722                 mypath = os.path.join("All", mypkg + ".tbz2")
6723                 if mypath in self._pkg_paths.values():
6724                         mypath = os.path.join(mycat, mypkg + ".tbz2")
6725                 self._pkg_paths[mycpv] = mypath # cache for future lookups
6726                 return os.path.join(self.pkgdir, mypath)
6727
6728         def isremote(self,pkgname):
6729                 "Returns true if the package is kept remotely."
6730                 mysplit=pkgname.split("/")
6731                 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6732                 return remote
6733
6734         def get_use(self,pkgname):
6735                 mysplit=pkgname.split("/")
6736                 if self.isremote(pkgname):
6737                         return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6738                 tbz2=xpak.tbz2(self.getname(pkgname))
6739                 return tbz2.getfile("USE").split()
6740
6741         def gettbz2(self,pkgname):
6742                 "fetches the package from a remote site, if necessary."
6743                 print "Fetching '"+str(pkgname)+"'"
6744                 mysplit  = pkgname.split("/")
6745                 tbz2name = mysplit[1]+".tbz2"
6746                 if not self.isremote(pkgname):
6747                         if (tbz2name not in self.invalids):
6748                                 return
6749                         else:
6750                                 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6751                                         noiselevel=-1)
6752                 mydest = self.pkgdir+"/All/"
6753                 try:
6754                         os.makedirs(mydest, 0775)
6755                 except (OSError, IOError):
6756                         pass
6757                 return getbinpkg.file_get(
6758                         self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6759                         mydest, fcmd=self.settings["RESUMECOMMAND"])
6760
6761         def getslot(self,mycatpkg):
6762                 "Get a slot for a catpkg; assume it exists."
6763                 myslot = ""
6764                 try:
6765                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6766                 except SystemExit, e:
6767                         raise
6768                 except Exception, e:
6769                         pass
6770                 return myslot
6771
6772 class dblink:
6773         """
6774         This class provides an interface to the installed package database
6775         At present this is implemented as a text backend in /var/db/pkg.
6776         """
6777         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6778                 vartree=None):
6779                 """
6780                 Creates a DBlink object for a given CPV.
6781                 The given CPV may not be present in the database already.
6782                 
6783                 @param cat: Category
6784                 @type cat: String
6785                 @param pkg: Package (PV)
6786                 @type pkg: String
6787                 @param myroot: Typically ${ROOT}
6788                 @type myroot: String (Path)
6789                 @param mysettings: Typically portage.config
6790                 @type mysettings: An instance of portage.config
6791                 @param treetype: one of ['porttree','bintree','vartree']
6792                 @type treetype: String
6793                 @param vartree: an instance of vartree corresponding to myroot.
6794                 @type vartree: vartree
6795                 """
6796                 
6797                 self.cat     = cat
6798                 self.pkg     = pkg
6799                 self.mycpv   = self.cat+"/"+self.pkg
6800                 self.mysplit = pkgsplit(self.mycpv)
6801                 self.treetype = treetype
6802                 if vartree is None:
6803                         global db
6804                         vartree = db[myroot]["vartree"]
6805                 self.vartree = vartree
6806
6807                 self.dbroot   = normalize_path(os.path.join(myroot, VDB_PATH))
6808                 self.dbcatdir = self.dbroot+"/"+cat
6809                 self.dbpkgdir = self.dbcatdir+"/"+pkg
6810                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6811                 self.dbdir    = self.dbpkgdir
6812
6813                 self._lock_vdb = None
6814
6815                 self.settings = mysettings
6816                 if self.settings==1:
6817                         raise ValueError
6818
6819                 self.myroot=myroot
6820                 protect_obj = portage_util.ConfigProtect(myroot,
6821                         mysettings.get("CONFIG_PROTECT","").split(),
6822                         mysettings.get("CONFIG_PROTECT_MASK","").split())
6823                 self.updateprotect = protect_obj.updateprotect
6824                 self._config_protect = protect_obj
6825                 self._installed_instance = None
6826                 self.contentscache=[]
6827                 self._contents_inodes = None
6828
6829         def lockdb(self):
6830                 if self._lock_vdb:
6831                         raise AssertionError("Lock already held.")
6832                 # At least the parent needs to exist for the lock file.
6833                 portage_util.ensure_dirs(self.dbroot)
6834                 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6835
6836         def unlockdb(self):
6837                 if self._lock_vdb:
6838                         portage_locks.unlockdir(self._lock_vdb)
6839                         self._lock_vdb = None
6840
6841         def getpath(self):
6842                 "return path to location of db information (for >>> informational display)"
6843                 return self.dbdir
6844
6845         def exists(self):
6846                 "does the db entry exist?  boolean."
6847                 return os.path.exists(self.dbdir)
6848
6849         def create(self):
6850                 "create the skeleton db directory structure.  No contents, virtuals, provides or anything.  Also will create /var/db/pkg if necessary."
6851                 """
6852                 This function should never get called (there is no reason to use it).
6853                 """
6854                 # XXXXX Delete this eventually
6855                 raise Exception, "This is bad. Don't use it."
6856                 if not os.path.exists(self.dbdir):
6857                         os.makedirs(self.dbdir)
6858
6859         def delete(self):
6860                 """
6861                 Remove this entry from the database
6862                 """
6863                 if not os.path.exists(self.dbdir):
6864                         return
6865                 try:
6866                         for x in listdir(self.dbdir):
6867                                 os.unlink(self.dbdir+"/"+x)
6868                         os.rmdir(self.dbdir)
6869                 except OSError, e:
6870                         print "!!! Unable to remove db entry for this package."
6871                         print "!!! It is possible that a directory is in this one. Portage will still"
6872                         print "!!! register this package as installed as long as this directory exists."
6873                         print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6874                         print "!!! "+str(e)
6875                         print
6876                         sys.exit(1)
6877
6878         def clearcontents(self):
6879                 """
6880                 For a given db entry (self), erase the CONTENTS values.
6881                 """
6882                 if os.path.exists(self.dbdir+"/CONTENTS"):
6883                         os.unlink(self.dbdir+"/CONTENTS")
6884
6885         def getcontents(self):
6886                 """
6887                 Get the installed files of a given package (aka what that package installed)
6888                 """
6889                 if not os.path.exists(self.dbdir+"/CONTENTS"):
6890                         return None
6891                 if self.contentscache != []:
6892                         return self.contentscache
6893                 pkgfiles={}
6894                 myc=open(self.dbdir+"/CONTENTS","r")
6895                 mylines=myc.readlines()
6896                 myc.close()
6897                 null_byte = "\0"
6898                 contents_file = os.path.join(self.dbdir, "CONTENTS")
6899                 pos = 0
6900                 for line in mylines:
6901                         pos += 1
6902                         if null_byte in line:
6903                                 # Null bytes are a common indication of corruption.
6904                                 writemsg("!!! Null byte found in contents " + \
6905                                         "file, line %d: '%s'\n" % (pos, contents_file),
6906                                         noiselevel=-1)
6907                                 continue
6908                         mydat = line.split()
6909                         # we do this so we can remove from non-root filesystems
6910                         # (use the ROOT var to allow maintenance on other partitions)
6911                         try:
6912                                 mydat[1] = normalize_path(os.path.join(
6913                                         self.myroot, mydat[1].lstrip(os.path.sep)))
6914                                 if mydat[0]=="obj":
6915                                         #format: type, mtime, md5sum
6916                                         pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6917                                 elif mydat[0]=="dir":
6918                                         #format: type
6919                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6920                                 elif mydat[0]=="sym":
6921                                         #format: type, mtime, dest
6922                                         x=len(mydat)-1
6923                                         if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6924                                                 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6925                                                 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6926                                                 x=len(mydat)-1
6927                                         splitter=-1
6928                                         while(x>=0):
6929                                                 if mydat[x]=="->":
6930                                                         splitter=x
6931                                                         break
6932                                                 x=x-1
6933                                         if splitter==-1:
6934                                                 return None
6935                                         pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6936                                 elif mydat[0]=="dev":
6937                                         #format: type
6938                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6939                                 elif mydat[0]=="fif":
6940                                         #format: type
6941                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6942                                 else:
6943                                         return None
6944                         except (KeyError,IndexError):
6945                                 print "portage: CONTENTS line",pos,"corrupt!"
6946                 self.contentscache=pkgfiles
6947                 return pkgfiles
6948
6949         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6950                 ldpath_mtimes=None):
6951                 """
6952                 Calls prerm
6953                 Unmerges a given package (CPV)
6954                 calls postrm
6955                 calls cleanrm
6956                 calls env_update
6957                 
6958                 @param pkgfiles: files to unmerge (generally self.getcontents() )
6959                 @type pkgfiles: Dictionary
6960                 @param trimworld: Remove CPV from world file if True, not if False
6961                 @type trimworld: Boolean
6962                 @param cleanup: cleanup to pass to doebuild (see doebuild)
6963                 @type cleanup: Boolean
6964                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6965                 @type ldpath_mtimes: Dictionary
6966                 @rtype: Integer
6967                 @returns:
6968                 1. os.EX_OK if everything went well.
6969                 2. return code of the failed phase (for prerm, postrm, cleanrm)
6970                 
6971                 Notes:
6972                 The caller must ensure that lockdb() and unlockdb() are called
6973                 before and after this method.
6974                 """
6975
6976                 contents = self.getcontents()
6977                 # Now, don't assume that the name of the ebuild is the same as the
6978                 # name of the dir; the package may have been moved.
6979                 myebuildpath = None
6980                 mystuff = listdir(self.dbdir, EmptyOnError=1)
6981                 for x in mystuff:
6982                         if x.endswith(".ebuild"):
6983                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6984                                 if x[:-7] != self.pkg:
6985                                         # Clean up after vardbapi.move_ent() breakage in
6986                                         # portage versions before 2.1.2
6987                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
6988                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6989                                 break
6990
6991                 self.settings.load_infodir(self.dbdir)
6992                 if myebuildpath:
6993                         try:
6994                                 doebuild_environment(myebuildpath, "prerm", self.myroot,
6995                                         self.settings, 0, 0, self.vartree.dbapi)
6996                         except portage_exception.UnsupportedAPIException, e:
6997                                 # Sometimes this happens due to corruption of the EAPI file.
6998                                 writemsg("!!! FAILED prerm: %s\n" % \
6999                                         os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
7000                                 writemsg("%s\n" % str(e), noiselevel=-1)
7001                                 return 1
7002                         catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
7003                         portage_util.ensure_dirs(os.path.dirname(catdir),
7004                                 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7005                 builddir_lock = None
7006                 catdir_lock = None
7007                 try:
7008                         if myebuildpath:
7009                                 catdir_lock = portage_locks.lockdir(catdir)
7010                                 portage_util.ensure_dirs(catdir,
7011                                         uid=portage_uid, gid=portage_gid,
7012                                         mode=070, mask=0)
7013                                 builddir_lock = portage_locks.lockdir(
7014                                         self.settings["PORTAGE_BUILDDIR"])
7015                                 try:
7016                                         portage_locks.unlockdir(catdir_lock)
7017                                 finally:
7018                                         catdir_lock = None
7019                                 # Eventually, we'd like to pass in the saved ebuild env here...
7020                                 retval = doebuild(myebuildpath, "prerm", self.myroot,
7021                                         self.settings, cleanup=cleanup, use_cache=0,
7022                                         mydbapi=self.vartree.dbapi, tree="vartree",
7023                                         vartree=self.vartree)
7024                                 # XXX: Decide how to handle failures here.
7025                                 if retval != os.EX_OK:
7026                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
7027                                         return retval
7028
7029                         self._unmerge_pkgfiles(pkgfiles)
7030
7031                         if myebuildpath:
7032                                 retval = doebuild(myebuildpath, "postrm", self.myroot,
7033                                          self.settings, use_cache=0, tree="vartree",
7034                                          mydbapi=self.vartree.dbapi, vartree=self.vartree)
7035
7036                                 # process logs created during pre/postrm
7037                                 elog_process(self.mycpv, self.settings)
7038
7039                                 # XXX: Decide how to handle failures here.
7040                                 if retval != os.EX_OK:
7041                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
7042                                         return retval
7043                                 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
7044                                         tree="vartree", mydbapi=self.vartree.dbapi,
7045                                         vartree=self.vartree)
7046
7047                 finally:
7048                         if builddir_lock:
7049                                 portage_locks.unlockdir(builddir_lock)
7050                         try:
7051                                 if myebuildpath and not catdir_lock:
7052                                         # Lock catdir for removal if empty.
7053                                         catdir_lock = portage_locks.lockdir(catdir)
7054                         finally:
7055                                 if catdir_lock:
7056                                         try:
7057                                                 os.rmdir(catdir)
7058                                         except OSError, e:
7059                                                 if e.errno not in (errno.ENOENT,
7060                                                         errno.ENOTEMPTY, errno.EEXIST):
7061                                                         raise
7062                                                 del e
7063                                         portage_locks.unlockdir(catdir_lock)
7064                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
7065                         contents=contents)
7066                 return os.EX_OK
7067
7068         def _unmerge_pkgfiles(self, pkgfiles):
7069                 """
7070                 
7071                 Unmerges the contents of a package from the liveFS
7072                 Removes the VDB entry for self
7073                 
7074                 @param pkgfiles: typically self.getcontents()
7075                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
7076                 @rtype: None
7077                 """
7078                 global dircache
7079                 dircache={}
7080
7081                 if not pkgfiles:
7082                         writemsg_stdout("No package files given... Grabbing a set.\n")
7083                         pkgfiles=self.getcontents()
7084
7085                 if pkgfiles:
7086                         mykeys=pkgfiles.keys()
7087                         mykeys.sort()
7088                         mykeys.reverse()
7089
7090                         #process symlinks second-to-last, directories last.
7091                         mydirs=[]
7092                         modprotect="/lib/modules/"
7093                         for objkey in mykeys:
7094                                 obj = normalize_path(objkey)
7095                                 if obj[:2]=="//":
7096                                         obj=obj[1:]
7097                                 statobj = None
7098                                 try:
7099                                         statobj = os.stat(obj)
7100                                 except OSError:
7101                                         pass
7102                                 lstatobj = None
7103                                 try:
7104                                         lstatobj = os.lstat(obj)
7105                                 except (OSError, AttributeError):
7106                                         pass
7107                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
7108                                 if statobj is None:
7109                                         if not islink:
7110                                                 #we skip this if we're dealing with a symlink
7111                                                 #because os.stat() will operate on the
7112                                                 #link target rather than the link itself.
7113                                                 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
7114                                                 continue
7115                                 # next line includes a tweak to protect modules from being unmerged,
7116                                 # but we don't protect modules from being overwritten if they are
7117                                 # upgraded. We effectively only want one half of the config protection
7118                                 # functionality for /lib/modules. For portage-ng both capabilities
7119                                 # should be able to be independently specified.
7120                                 if obj.startswith(modprotect):
7121                                         writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
7122                                         continue
7123
7124                                 lmtime=str(lstatobj[stat.ST_MTIME])
7125                                 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
7126                                         writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
7127                                         continue
7128
7129                                 if pkgfiles[objkey][0]=="dir":
7130                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
7131                                                 writemsg_stdout("--- !dir   %s %s\n" % ("dir", obj))
7132                                                 continue
7133                                         mydirs.append(obj)
7134                                 elif pkgfiles[objkey][0]=="sym":
7135                                         if not islink:
7136                                                 writemsg_stdout("--- !sym   %s %s\n" % ("sym", obj))
7137                                                 continue
7138                                         try:
7139                                                 os.unlink(obj)
7140                                                 writemsg_stdout("<<<        %s %s\n" % ("sym",obj))
7141                                         except (OSError,IOError),e:
7142                                                 writemsg_stdout("!!!        %s %s\n" % ("sym",obj))
7143                                 elif pkgfiles[objkey][0]=="obj":
7144                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
7145                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7146                                                 continue
7147                                         mymd5 = None
7148                                         try:
7149                                                 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
7150                                         except portage_exception.FileNotFound, e:
7151                                                 # the file has disappeared between now and our stat call
7152                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7153                                                 continue
7154
7155                                         # string.lower is needed because db entries used to be in upper-case.  The
7156                                         # string.lower allows for backwards compatibility.
7157                                         if mymd5 != pkgfiles[objkey][2].lower():
7158                                                 writemsg_stdout("--- !md5   %s %s\n" % ("obj", obj))
7159                                                 continue
7160                                         try:
7161                                                 if statobj.st_mode & (stat.S_ISUID | stat.S_ISGID):
7162                                                         # Always blind chmod 0 before unlinking to avoid race conditions.
7163                                                         os.chmod(obj, 0000)
7164                                                         if statobj.st_nlink > 1:
7165                                                                 writemsg("setXid: "+str(statobj.st_nlink-1)+ \
7166                                                                         " hardlinks to '%s'\n" % obj)
7167                                                 os.unlink(obj)
7168                                         except (OSError,IOError),e:
7169                                                 pass
7170                                         writemsg_stdout("<<<        %s %s\n" % ("obj",obj))
7171                                 elif pkgfiles[objkey][0]=="fif":
7172                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
7173                                                 writemsg_stdout("--- !fif   %s %s\n" % ("fif", obj))
7174                                                 continue
7175                                         writemsg_stdout("---        %s %s\n" % ("fif",obj))
7176                                 elif pkgfiles[objkey][0]=="dev":
7177                                         writemsg_stdout("---        %s %s\n" % ("dev",obj))
7178
7179                         mydirs.sort()
7180                         mydirs.reverse()
7181
7182                         for obj in mydirs:
7183                                 try:
7184                                         os.rmdir(obj)
7185                                         writemsg_stdout("<<<        %s %s\n" % ("dir",obj))
7186                                 except (OSError, IOError):
7187                                         writemsg_stdout("--- !empty dir %s\n" % obj)
7188
7189                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
7190                 self.vartree.zap(self.mycpv)
7191
7192         def isowner(self,filename,destroot):
7193                 """ 
7194                 Check if filename is a new file or belongs to this package
7195                 (for this or a previous version)
7196                 
7197                 @param filename:
7198                 @type filename:
7199                 @param destroot:
7200                 @type destroot:
7201                 @rtype: Boolean
7202                 @returns:
7203                 1. True if this package owns the file.
7204                 2. False if this package does not own the file.
7205                 """
7206                 destfile = normalize_path(
7207                         os.path.join(destroot, filename.lstrip(os.path.sep)))
7208                 try:
7209                         mylstat = os.lstat(destfile)
7210                 except (OSError, IOError):
7211                         return True
7212
7213                 pkgfiles = self.getcontents()
7214                 if pkgfiles and filename in pkgfiles:
7215                         return True
7216                 if pkgfiles:
7217                         if self._contents_inodes is None:
7218                                 self._contents_inodes = set()
7219                                 for x in pkgfiles:
7220                                         try:
7221                                                 lstat = os.lstat(x)
7222                                                 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
7223                                         except OSError:
7224                                                 pass
7225                         if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
7226                                  return True
7227
7228                 return False
7229
7230         def isprotected(self, filename):
7231                 """In cases where an installed package in the same slot owns a
7232                 protected file that will be merged, bump the mtime on the installed
7233                 file in order to ensure that it isn't unmerged."""
7234                 if not self._config_protect.isprotected(filename):
7235                         return False
7236                 if self._installed_instance is None:
7237                         return True
7238                 mydata = self._installed_instance.getcontents().get(filename, None)
7239                 if mydata is None:
7240                         return True
7241
7242                 # Bump the mtime in order to ensure that the old config file doesn't
7243                 # get unmerged.  The user will have an opportunity to merge the new
7244                 # config with the old one.
7245                 try:
7246                         os.utime(filename, None)
7247                 except OSError, e:
7248                         if e.errno != errno.ENOENT:
7249                                 raise
7250                         del e
7251                         # The file has disappeared, so it's not protected.
7252                         return False
7253                 return True
7254
7255         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
7256                 mydbapi=None, prev_mtimes=None):
7257                 """
7258                 
7259                 This function does the following:
7260                 
7261                 Collision Protection.
7262                 calls doebuild(mydo=pkg_preinst)
7263                 Merges the package to the livefs
7264                 unmerges old version (if required)
7265                 calls doebuild(mydo=pkg_postinst)
7266                 calls env_update
7267                 
7268                 @param srcroot: Typically this is ${D}
7269                 @type srcroot: String (Path)
7270                 @param destroot: Path to merge to (usually ${ROOT})
7271                 @type destroot: String (Path)
7272                 @param inforoot: root of the vardb entry ?
7273                 @type inforoot: String (Path)
7274                 @param myebuild: path to the ebuild that we are processing
7275                 @type myebuild: String (Path)
7276                 @param mydbapi: dbapi which is handed to doebuild.
7277                 @type mydbapi: portdbapi instance
7278                 @param prev_mtimes: { Filename:mtime } mapping for env_update
7279                 @type prev_mtimes: Dictionary
7280                 @rtype: Boolean
7281                 @returns:
7282                 1. 0 on success
7283                 2. 1 on failure
7284                 
7285                 secondhand is a list of symlinks that have been skipped due to their target
7286                 not existing; we will merge these symlinks at a later time.
7287                 """
7288                 if not os.path.isdir(srcroot):
7289                         writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7290                         noiselevel=-1)
7291                         return 1
7292
7293                 if not os.path.exists(self.dbcatdir):
7294                         os.makedirs(self.dbcatdir)
7295
7296                 otherversions=[]
7297                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7298                         otherversions.append(v.split("/")[1])
7299
7300                 slot_matches = self.vartree.dbapi.match(
7301                         "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7302                 if slot_matches:
7303                         # Used by self.isprotected().
7304                         self._installed_instance = dblink(self.cat,
7305                                 catsplit(slot_matches[0])[1], destroot, self.settings,
7306                                 vartree=self.vartree)
7307
7308                 # check for package collisions
7309                 if "collision-protect" in self.settings.features:
7310                         collision_ignore = set([normalize_path(myignore) for myignore in \
7311                                 self.settings.get("COLLISION_IGNORE", "").split()])
7312                         myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7313
7314                         # the linkcheck only works if we are in srcroot
7315                         mycwd = getcwd()
7316                         os.chdir(srcroot)
7317                         mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7318                         myfilelist.extend(mysymlinks)
7319                         mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7320                         del mysymlinks
7321
7322
7323                         stopmerge=False
7324                         starttime=time.time()
7325                         i=0
7326
7327                         otherpkg=[]
7328                         mypkglist=[]
7329
7330                         if self.pkg in otherversions:
7331                                 otherversions.remove(self.pkg)  # we already checked this package
7332
7333                         myslot = self.settings["SLOT"]
7334                         for v in otherversions:
7335                                 # only allow versions with same slot to overwrite files
7336                                 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7337                                         mypkglist.append(
7338                                                 dblink(self.cat, v, destroot, self.settings,
7339                                                         vartree=self.vartree))
7340
7341                         collisions = []
7342
7343                         print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7344                         for f in myfilelist:
7345                                 nocheck = False
7346                                 # listdir isn't intelligent enough to exclude symlinked dirs,
7347                                 # so we have to do it ourself
7348                                 for s in mysymlinked_directories:
7349                                         if f.startswith(s):
7350                                                 nocheck = True
7351                                                 break
7352                                 if nocheck:
7353                                         continue
7354                                 i=i+1
7355                                 if i % 1000 == 0:
7356                                         print str(i)+" files checked ..."
7357                                 if f[0] != "/":
7358                                         f="/"+f
7359                                 isowned = False
7360                                 for ver in [self]+mypkglist:
7361                                         if (ver.isowner(f, destroot) or ver.isprotected(f)):
7362                                                 isowned = True
7363                                                 break
7364                                 if not isowned:
7365                                         collisions.append(f)
7366                                         print "existing file "+f+" is not owned by this package"
7367                                         stopmerge=True
7368                                         if collision_ignore:
7369                                                 if f in collision_ignore:
7370                                                         stopmerge = False
7371                                                 else:
7372                                                         for myignore in collision_ignore:
7373                                                                 if f.startswith(myignore + os.path.sep):
7374                                                                         stopmerge = False
7375                                                                         break
7376                         #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7377                         if stopmerge:
7378                                 print red("*")+" This package is blocked because it wants to overwrite"
7379                                 print red("*")+" files belonging to other packages (see messages above)."
7380                                 print red("*")+" If you have no clue what this is all about report it "
7381                                 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7382                                 print
7383                                 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7384                                 print
7385                                 print
7386                                 print "Searching all installed packages for file collisions..."
7387                                 print "Press Ctrl-C to Stop"
7388                                 print
7389                                 """ Note: The isowner calls result in a stat call for *every*
7390                                 single installed file, since the inode numbers are used to work
7391                                 around the problem of ambiguous paths caused by symlinked files
7392                                 and/or directories.  Though it is slow, it is as accurate as
7393                                 possible."""
7394                                 found_owner = False
7395                                 for cpv in self.vartree.dbapi.cpv_all():
7396                                         cat, pkg = catsplit(cpv)
7397                                         mylink = dblink(cat, pkg, destroot, self.settings,
7398                                                 vartree=self.vartree)
7399                                         mycollisions = []
7400                                         for f in collisions:
7401                                                 if mylink.isowner(f, destroot):
7402                                                         mycollisions.append(f)
7403                                         if mycollisions:
7404                                                 found_owner = True
7405                                                 print " * %s:" % cpv
7406                                                 print
7407                                                 for f in mycollisions:
7408                                                         print "     '%s'" % \
7409                                                                 os.path.join(destroot, f.lstrip(os.path.sep))
7410                                                 print
7411                                 if not found_owner:
7412                                         print "None of the installed packages claim the above file(s)."
7413                                         print
7414                                 sys.exit(1)
7415                         try:
7416                                 os.chdir(mycwd)
7417                         except OSError:
7418                                 pass
7419
7420                 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7421                         """ The merge process may move files out of the image directory,
7422                         which causes invalidation of the .installed flag."""
7423                         try:
7424                                 os.unlink(os.path.join(
7425                                         os.path.dirname(normalize_path(srcroot)), ".installed"))
7426                         except OSError, e:
7427                                 if e.errno != errno.ENOENT:
7428                                         raise
7429                                 del e
7430
7431                 # get old contents info for later unmerging
7432                 oldcontents = self.getcontents()
7433
7434                 self.dbdir = self.dbtmpdir
7435                 self.delete()
7436                 if not os.path.exists(self.dbtmpdir):
7437                         os.makedirs(self.dbtmpdir)
7438
7439                 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7440
7441                 # run preinst script
7442                 if myebuild is None:
7443                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7444                 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7445                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7446                         vartree=self.vartree)
7447
7448                 # XXX: Decide how to handle failures here.
7449                 if a != os.EX_OK:
7450                         writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7451                         return a
7452
7453                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7454                 for x in listdir(inforoot):
7455                         self.copyfile(inforoot+"/"+x)
7456
7457                 # get current counter value (counter_tick also takes care of incrementing it)
7458                 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7459                 # XXX bis: leads to some invalidentry() call through cp_all().
7460                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7461                 # write local package counter for recording
7462                 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7463                 lcfile.write(str(counter))
7464                 lcfile.close()
7465
7466                 # open CONTENTS file (possibly overwriting old one) for recording
7467                 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7468
7469                 self.updateprotect()
7470
7471                 #if we have a file containing previously-merged config file md5sums, grab it.
7472                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7473                 cfgfiledict = grabdict(conf_mem_file)
7474                 if self.settings.has_key("NOCONFMEM"):
7475                         cfgfiledict["IGNORE"]=1
7476                 else:
7477                         cfgfiledict["IGNORE"]=0
7478
7479                 # Timestamp for files being merged.  Use time() - 1 in order to prevent
7480                 # a collision with timestamps that are bumped by the utime() call
7481                 # inside isprotected().  This ensures that the new and old config have
7482                 # different timestamps (for the benefit of programs like rsync that
7483                 # that need distiguishable timestamps to detect file changes).
7484                 mymtime = long(time.time() - 1)
7485
7486                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7487                 prevmask   = os.umask(0)
7488                 secondhand = []
7489
7490                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7491                 # "second hand" of symlinks to merge later
7492                 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7493                         return 1
7494
7495                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
7496                 # broken symlinks.  We'll merge them too.
7497                 lastlen=0
7498                 while len(secondhand) and len(secondhand)!=lastlen:
7499                         # clear the thirdhand.  Anything from our second hand that
7500                         # couldn't get merged will be added to thirdhand.
7501
7502                         thirdhand=[]
7503                         self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7504
7505                         #swap hands
7506                         lastlen=len(secondhand)
7507
7508                         # our thirdhand now becomes our secondhand.  It's ok to throw
7509                         # away secondhand since thirdhand contains all the stuff that
7510                         # couldn't be merged.
7511                         secondhand = thirdhand
7512
7513                 if len(secondhand):
7514                         # force merge of remaining symlinks (broken or circular; oh well)
7515                         self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7516
7517                 #restore umask
7518                 os.umask(prevmask)
7519
7520                 #if we opened it, close it
7521                 outfile.flush()
7522                 outfile.close()
7523
7524                 if os.path.exists(self.dbpkgdir):
7525                         writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7526                         self.dbdir = self.dbpkgdir
7527                         self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7528                         self.dbdir = self.dbtmpdir
7529                         writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7530
7531                 # We hold both directory locks.
7532                 self.dbdir = self.dbpkgdir
7533                 self.delete()
7534                 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7535                 contents = self.getcontents()
7536
7537                 #write out our collection of md5sums
7538                 if cfgfiledict.has_key("IGNORE"):
7539                         del cfgfiledict["IGNORE"]
7540
7541                 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7542                 if not os.path.exists(my_private_path):
7543                         os.makedirs(my_private_path)
7544                         os.chown(my_private_path, os.getuid(), portage_gid)
7545                         os.chmod(my_private_path, 02770)
7546
7547                 writedict(cfgfiledict, conf_mem_file)
7548                 del conf_mem_file
7549
7550                 #do postinst script
7551                 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7552                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7553
7554                 # XXX: Decide how to handle failures here.
7555                 if a != os.EX_OK:
7556                         writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7557                         return a
7558
7559                 downgrade = False
7560                 for v in otherversions:
7561                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7562                                 downgrade = True
7563
7564                 #update environment settings, library paths. DO NOT change symlinks.
7565                 env_update(makelinks=(not downgrade),
7566                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7567                         contents=contents)
7568                 #dircache may break autoclean because it remembers the -MERGING-pkg file
7569                 global dircache
7570                 if dircache.has_key(self.dbcatdir):
7571                         del dircache[self.dbcatdir]
7572                 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7573
7574                 # Process ebuild logfiles
7575                 elog_process(self.mycpv, self.settings)
7576                 if "noclean" not in self.settings.features:
7577                         doebuild(myebuild, "clean", destroot, self.settings,
7578                                 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7579                 return os.EX_OK
7580
7581         def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7582                 """
7583                 
7584                 This function handles actual merging of the package contents to the livefs.
7585                 It also handles config protection.
7586                 
7587                 @param srcroot: Where are we copying files from (usually ${D})
7588                 @type srcroot: String (Path)
7589                 @param destroot: Typically ${ROOT}
7590                 @type destroot: String (Path)
7591                 @param outfile: File to log operations to
7592                 @type outfile: File Object
7593                 @param secondhand: A set of items to merge in pass two (usually
7594                 or symlinks that point to non-existing files that may get merged later)
7595                 @type secondhand: List
7596                 @param stufftomerge: Either a diretory to merge, or a list of items.
7597                 @type stufftomerge: String or List
7598                 @param cfgfiledict: { File:mtime } mapping for config_protected files
7599                 @type cfgfiledict: Dictionary
7600                 @param thismtime: The current time (typically long(time.time())
7601                 @type thismtime: Long
7602                 @rtype: None or Boolean
7603                 @returns:
7604                 1. True on failure
7605                 2. None otherwise
7606                 
7607                 """
7608                 from os.path import sep, join
7609                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7610                 destroot = normalize_path(destroot).rstrip(sep) + sep
7611                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
7612                 if type(stufftomerge)==types.StringType:
7613                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
7614                         mergelist = listdir(join(srcroot, stufftomerge))
7615                         offset=stufftomerge
7616                 else:
7617                         mergelist=stufftomerge
7618                         offset=""
7619                 for x in mergelist:
7620                         mysrc = join(srcroot, offset, x)
7621                         mydest = join(destroot, offset, x)
7622                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7623                         myrealdest = join(sep, offset, x)
7624                         # stat file once, test using S_* macros many times (faster that way)
7625                         try:
7626                                 mystat=os.lstat(mysrc)
7627                         except OSError, e:
7628                                 writemsg("\n")
7629                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7630                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
7631                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
7632                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7633                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
7634                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
7635                                 sys.exit(1)
7636                         except Exception, e:
7637                                 writemsg("\n")
7638                                 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7639                                 writemsg(red("!!!        A stat call returned the following error for the following file:"))
7640                                 writemsg(    "!!!        Please ensure that your filesystem is intact, otherwise report\n")
7641                                 writemsg(    "!!!        this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7642                                 writemsg(    "!!!        File:  "+str(mysrc)+"\n", noiselevel=-1)
7643                                 writemsg(    "!!!        Error: "+str(e)+"\n", noiselevel=-1)
7644                                 sys.exit(1)
7645
7646
7647                         mymode=mystat[stat.ST_MODE]
7648                         # handy variables; mydest is the target object on the live filesystems;
7649                         # mysrc is the source object in the temporary install dir
7650                         try:
7651                                 mydmode = os.lstat(mydest).st_mode
7652                         except OSError, e:
7653                                 if e.errno != errno.ENOENT:
7654                                         raise
7655                                 del e
7656                                 #dest file doesn't exist
7657                                 mydmode=None
7658
7659                         if stat.S_ISLNK(mymode):
7660                                 # we are merging a symbolic link
7661                                 myabsto=abssymlink(mysrc)
7662                                 if myabsto.startswith(srcroot):
7663                                         myabsto=myabsto[len(srcroot):]
7664                                 myabsto = myabsto.lstrip(sep)
7665                                 myto=os.readlink(mysrc)
7666                                 if self.settings and self.settings["D"]:
7667                                         if myto.startswith(self.settings["D"]):
7668                                                 myto=myto[len(self.settings["D"]):]
7669                                 # myrealto contains the path of the real file to which this symlink points.
7670                                 # we can simply test for existence of this file to see if the target has been merged yet
7671                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
7672                                 if mydmode!=None:
7673                                         #destination exists
7674                                         if not stat.S_ISLNK(mydmode):
7675                                                 if stat.S_ISDIR(mydmode):
7676                                                         # directory in the way: we can't merge a symlink over a directory
7677                                                         # we won't merge this, continue with next file...
7678                                                         continue
7679
7680                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7681                                                         # Kill file blocking installation of symlink to dir #71787
7682                                                         pass
7683                                                 elif self.isprotected(mydest):
7684                                                         # Use md5 of the target in ${D} if it exists...
7685                                                         try:
7686                                                                 newmd5 = portage_checksum.perform_md5(
7687                                                                         join(srcroot, myabsto))
7688                                                         except portage_exception.FileNotFound:
7689                                                                 # Maybe the target is merged already.
7690                                                                 try:
7691                                                                         newmd5 = portage_checksum.perform_md5(
7692                                                                                 myrealto)
7693                                                                 except portage_exception.FileNotFound:
7694                                                                         newmd5 = None
7695                                                         mydest = new_protect_filename(mydest,newmd5=newmd5)
7696
7697                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7698                                 if (secondhand!=None) and (not os.path.exists(myrealto)):
7699                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
7700                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
7701                                         # it later.
7702                                         secondhand.append(mysrc[len(srcroot):])
7703                                         continue
7704                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7705                                 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7706                                 if mymtime!=None:
7707                                         writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7708                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7709                                 else:
7710                                         print "!!! Failed to move file."
7711                                         print "!!!",mydest,"->",myto
7712                                         sys.exit(1)
7713                         elif stat.S_ISDIR(mymode):
7714                                 # we are merging a directory
7715                                 if mydmode!=None:
7716                                         # destination exists
7717
7718                                         if bsd_chflags:
7719                                                 # Save then clear flags on dest.
7720                                                 dflags=bsd_chflags.lgetflags(mydest)
7721                                                 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7722                                                         writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7723                                                                 noiselevel=-1)
7724
7725                                         if not os.access(mydest, os.W_OK):
7726                                                 pkgstuff = pkgsplit(self.pkg)
7727                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7728                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7729                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
7730                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7731                                                 writemsg("!!! And finish by running this: env-update\n\n")
7732                                                 return 1
7733
7734                                         if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7735                                                 # a symlink to an existing directory will work for us; keep it:
7736                                                 writemsg_stdout("--- %s/\n" % mydest)
7737                                                 if bsd_chflags:
7738                                                         bsd_chflags.lchflags(mydest, dflags)
7739                                         else:
7740                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
7741                                                 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7742                                                         sys.exit(1)
7743                                                 print "bak",mydest,mydest+".backup"
7744                                                 #now create our directory
7745                                                 if self.settings.selinux_enabled():
7746                                                         sid = selinux.get_sid(mysrc)
7747                                                         selinux.secure_mkdir(mydest,sid)
7748                                                 else:
7749                                                         os.mkdir(mydest)
7750                                                 if bsd_chflags:
7751                                                         bsd_chflags.lchflags(mydest, dflags)
7752                                                 os.chmod(mydest,mystat[0])
7753                                                 os.chown(mydest,mystat[4],mystat[5])
7754                                                 writemsg_stdout(">>> %s/\n" % mydest)
7755                                 else:
7756                                         #destination doesn't exist
7757                                         if self.settings.selinux_enabled():
7758                                                 sid = selinux.get_sid(mysrc)
7759                                                 selinux.secure_mkdir(mydest,sid)
7760                                         else:
7761                                                 os.mkdir(mydest)
7762                                         os.chmod(mydest,mystat[0])
7763                                         os.chown(mydest,mystat[4],mystat[5])
7764                                         writemsg_stdout(">>> %s/\n" % mydest)
7765                                 outfile.write("dir "+myrealdest+"\n")
7766                                 # recurse and merge this directory
7767                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
7768                                         join(offset, x), cfgfiledict, thismtime):
7769                                         return 1
7770                         elif stat.S_ISREG(mymode):
7771                                 # we are merging a regular file
7772                                 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7773                                 # calculate config file protection stuff
7774                                 mydestdir=os.path.dirname(mydest)
7775                                 moveme=1
7776                                 zing="!!!"
7777                                 if mydmode!=None:
7778                                         # destination file exists
7779                                         if stat.S_ISDIR(mydmode):
7780                                                 # install of destination is blocked by an existing directory with the same name
7781                                                 moveme=0
7782                                                 writemsg_stdout("!!! %s\n" % mydest)
7783                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7784                                                 cfgprot=0
7785                                                 # install of destination is blocked by an existing regular file,
7786                                                 # or by a symlink to an existing regular file;
7787                                                 # now, config file management may come into play.
7788                                                 # we only need to tweak mydest if cfg file management is in play.
7789                                                 if self.isprotected(mydest):
7790                                                         # we have a protection path; enable config file management.
7791                                                         destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7792                                                         if mymd5==destmd5:
7793                                                                 #file already in place; simply update mtimes of destination
7794                                                                 os.utime(mydest,(thismtime,thismtime))
7795                                                                 zing="---"
7796                                                                 moveme=0
7797                                                         else:
7798                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7799                                                                         """ An identical update has previously been
7800                                                                         merged.  Skip it unless the user has chosen
7801                                                                         --noconfmem."""
7802                                                                         zing = "-o-"
7803                                                                         moveme = cfgfiledict["IGNORE"]
7804                                                                         cfgprot = cfgfiledict["IGNORE"]
7805                                                                 else:
7806                                                                         moveme = 1
7807                                                                         cfgprot = 1
7808                                                         if moveme:
7809                                                                 # Merging a new file, so update confmem.
7810                                                                 cfgfiledict[myrealdest] = [mymd5]
7811                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7812                                                                 """A previously remembered update has been
7813                                                                 accepted, so it is removed from confmem."""
7814                                                                 del cfgfiledict[myrealdest]
7815                                                 if cfgprot:
7816                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
7817
7818                                 # whether config protection or not, we merge the new file the
7819                                 # same way.  Unless moveme=0 (blocking directory)
7820                                 if moveme:
7821                                         mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7822                                         if mymtime is None:
7823                                                 sys.exit(1)
7824                                         zing=">>>"
7825                                 else:
7826                                         mymtime = long(time.time())
7827                                         # We need to touch the destination so that on --update the
7828                                         # old package won't yank the file with it. (non-cfgprot related)
7829                                         os.utime(mydest, (mymtime, mymtime))
7830                                         zing="---"
7831                                 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7832
7833                                         # XXX kludge, can be killed when portage stops relying on
7834                                         # md5+mtime, and uses refcounts
7835                                         # alright, we've fooled w/ mtime on the file; this pisses off static archives
7836                                         # basically internal mtime != file's mtime, so the linker (falsely) thinks
7837                                         # the archive is stale, and needs to have it's toc rebuilt.
7838
7839                                         myf = open(mydest, "r+")
7840
7841                                         # ar mtime field is digits padded with spaces, 12 bytes.
7842                                         lms=str(thismtime+5).ljust(12)
7843                                         myf.seek(0)
7844                                         magic=myf.read(8)
7845                                         if magic != "!<arch>\n":
7846                                                 # not an archive (dolib.a from portage.py makes it here fex)
7847                                                 myf.close()
7848                                         else:
7849                                                 st = os.stat(mydest)
7850                                                 while myf.tell() < st.st_size - 12:
7851                                                         # skip object name
7852                                                         myf.seek(16,1)
7853
7854                                                         # update mtime
7855                                                         myf.write(lms)
7856
7857                                                         # skip uid/gid/mperm
7858                                                         myf.seek(20,1)
7859
7860                                                         # read the archive member's size
7861                                                         x=long(myf.read(10))
7862
7863                                                         # skip the trailing newlines, and add the potential
7864                                                         # extra padding byte if it's not an even size
7865                                                         myf.seek(x + 2 + (x % 2),1)
7866
7867                                                 # and now we're at the end. yay.
7868                                                 myf.close()
7869                                                 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7870                                         os.utime(mydest,(thismtime,thismtime))
7871
7872                                 if mymtime!=None:
7873                                         zing=">>>"
7874                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7875                                 writemsg_stdout("%s %s\n" % (zing,mydest))
7876                         else:
7877                                 # we are merging a fifo or device node
7878                                 zing="!!!"
7879                                 if mydmode is None:
7880                                         # destination doesn't exist
7881                                         if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7882                                                 zing=">>>"
7883                                         else:
7884                                                 sys.exit(1)
7885                                 if stat.S_ISFIFO(mymode):
7886                                         outfile.write("fif %s\n" % myrealdest)
7887                                 else:
7888                                         outfile.write("dev %s\n" % myrealdest)
7889                                 writemsg_stdout(zing+" "+mydest+"\n")
7890
7891         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7892                 mydbapi=None, prev_mtimes=None):
7893                 try:
7894                         self.lockdb()
7895                         return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7896                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7897                 finally:
7898                         self.unlockdb()
7899
7900         def getstring(self,name):
7901                 "returns contents of a file with whitespace converted to spaces"
7902                 if not os.path.exists(self.dbdir+"/"+name):
7903                         return ""
7904                 myfile=open(self.dbdir+"/"+name,"r")
7905                 mydata=myfile.read().split()
7906                 myfile.close()
7907                 return " ".join(mydata)
7908
7909         def copyfile(self,fname):
7910                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7911
7912         def getfile(self,fname):
7913                 if not os.path.exists(self.dbdir+"/"+fname):
7914                         return ""
7915                 myfile=open(self.dbdir+"/"+fname,"r")
7916                 mydata=myfile.read()
7917                 myfile.close()
7918                 return mydata
7919
7920         def setfile(self,fname,data):
7921                 write_atomic(os.path.join(self.dbdir, fname), data)
7922
7923         def getelements(self,ename):
7924                 if not os.path.exists(self.dbdir+"/"+ename):
7925                         return []
7926                 myelement=open(self.dbdir+"/"+ename,"r")
7927                 mylines=myelement.readlines()
7928                 myreturn=[]
7929                 for x in mylines:
7930                         for y in x[:-1].split():
7931                                 myreturn.append(y)
7932                 myelement.close()
7933                 return myreturn
7934
7935         def setelements(self,mylist,ename):
7936                 myelement=open(self.dbdir+"/"+ename,"w")
7937                 for x in mylist:
7938                         myelement.write(x+"\n")
7939                 myelement.close()
7940
7941         def isregular(self):
7942                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
7943                 return os.path.exists(self.dbdir+"/CATEGORY")
7944
7945 class FetchlistDict(UserDict.DictMixin):
7946         """This provide a mapping interface to retrieve fetch lists.  It's used
7947         to allow portage_manifest.Manifest to access fetch lists via a standard
7948         mapping interface rather than use the dbapi directly."""
7949         def __init__(self, pkgdir, settings, mydbapi):
7950                 """pkgdir is a directory containing ebuilds and settings is passed into
7951                 portdbapi.getfetchlist for __getitem__ calls."""
7952                 self.pkgdir = pkgdir
7953                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7954                 self.settings = settings
7955                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7956                 self.portdb = mydbapi
7957         def __getitem__(self, pkg_key):
7958                 """Returns the complete fetch list for a given package."""
7959                 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7960                         all=True, mytree=self.mytree)[1]
7961         def has_key(self, pkg_key):
7962                 """Returns true if the given package exists within pkgdir."""
7963                 return pkg_key in self.keys()
7964         def keys(self):
7965                 """Returns keys for all packages within pkgdir"""
7966                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7967
7968 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7969         """will merge a .tbz2 file, returning a list of runtime dependencies
7970                 that must be satisfied, or None if there was a merge error.     This
7971                 code assumes the package exists."""
7972         global db
7973         if mydbapi is None:
7974                 mydbapi = db[myroot]["bintree"].dbapi
7975         if vartree is None:
7976                 vartree = db[myroot]["vartree"]
7977         if mytbz2[-5:]!=".tbz2":
7978                 print "!!! Not a .tbz2 file"
7979                 return 1
7980
7981         tbz2_lock = None
7982         builddir_lock = None
7983         catdir_lock = None
7984         try:
7985                 """ Don't lock the tbz2 file because the filesytem could be readonly or
7986                 shared by a cluster."""
7987                 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7988
7989                 mypkg = os.path.basename(mytbz2)[:-5]
7990                 xptbz2 = xpak.tbz2(mytbz2)
7991                 mycat = xptbz2.getfile("CATEGORY")
7992                 if not mycat:
7993                         writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7994                                 noiselevel=-1)
7995                         return 1
7996                 mycat = mycat.strip()
7997
7998                 # These are the same directories that would be used at build time.
7999                 builddir = os.path.join(
8000                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
8001                 catdir = os.path.dirname(builddir)
8002                 pkgloc = os.path.join(builddir, "image")
8003                 infloc = os.path.join(builddir, "build-info")
8004                 myebuild = os.path.join(
8005                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
8006                 portage_util.ensure_dirs(os.path.dirname(catdir),
8007                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
8008                 catdir_lock = portage_locks.lockdir(catdir)
8009                 portage_util.ensure_dirs(catdir,
8010                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
8011                 builddir_lock = portage_locks.lockdir(builddir)
8012                 try:
8013                         portage_locks.unlockdir(catdir_lock)
8014                 finally:
8015                         catdir_lock = None
8016                 try:
8017                         shutil.rmtree(builddir)
8018                 except (IOError, OSError), e:
8019                         if e.errno != errno.ENOENT:
8020                                 raise
8021                         del e
8022                 for mydir in (builddir, pkgloc, infloc):
8023                         portage_util.ensure_dirs(mydir, uid=portage_uid,
8024                                 gid=portage_gid, mode=0755)
8025                 writemsg_stdout(">>> Extracting info\n")
8026                 xptbz2.unpackinfo(infloc)
8027                 mysettings.load_infodir(infloc)
8028                 # Store the md5sum in the vdb.
8029                 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
8030                 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
8031                 fp.close()
8032
8033                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8034
8035                 # Eventually we'd like to pass in the saved ebuild env here.
8036                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8037                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
8038                 if retval != os.EX_OK:
8039                         writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
8040                         return retval
8041
8042                 writemsg_stdout(">>> Extracting %s\n" % mypkg)
8043                 retval = portage_exec.spawn_bash(
8044                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8045                         env=mysettings.environ())
8046                 if retval != os.EX_OK:
8047                         writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
8048                         return retval
8049                 #portage_locks.unlockfile(tbz2_lock)
8050                 #tbz2_lock = None
8051
8052                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8053                         treetype="bintree")
8054                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8055                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8056                 return retval
8057         finally:
8058                 if tbz2_lock:
8059                         portage_locks.unlockfile(tbz2_lock)
8060                 if builddir_lock:
8061                         try:
8062                                 shutil.rmtree(builddir)
8063                         except (IOError, OSError), e:
8064                                 if e.errno != errno.ENOENT:
8065                                         raise
8066                                 del e
8067                         portage_locks.unlockdir(builddir_lock)
8068                         try:
8069                                 if not catdir_lock:
8070                                         # Lock catdir for removal if empty.
8071                                         catdir_lock = portage_locks.lockdir(catdir)
8072                         finally:
8073                                 if catdir_lock:
8074                                         try:
8075                                                 os.rmdir(catdir)
8076                                         except OSError, e:
8077                                                 if e.errno not in (errno.ENOENT,
8078                                                         errno.ENOTEMPTY, errno.EEXIST):
8079                                                         raise
8080                                                 del e
8081                                         portage_locks.unlockdir(catdir_lock)
8082
8083 def deprecated_profile_check():
8084         if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
8085                 return False
8086         deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
8087         dcontent = deprecatedfile.readlines()
8088         deprecatedfile.close()
8089         newprofile = dcontent[0]
8090         writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
8091                 noiselevel=-1)
8092         writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
8093                 noiselevel=-1)
8094         writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
8095         if len(dcontent) > 1:
8096                 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
8097                 for myline in dcontent[1:]:
8098                         writemsg(myline, noiselevel=-1)
8099                 writemsg("\n\n", noiselevel=-1)
8100         return True
8101
8102 # gets virtual package settings
8103 def getvirtuals(myroot):
8104         global settings
8105         writemsg("--- DEPRECATED call to getvirtual\n")
8106         return settings.getvirtuals(myroot)
8107
8108 def commit_mtimedb(mydict=None, filename=None):
8109         if mydict is None:
8110                 global mtimedb
8111                 if "mtimedb" not in globals() or mtimedb is None:
8112                         return
8113                 mtimedb.commit()
8114                 return
8115         if filename is None:
8116                 global mtimedbfile
8117                 filename = mtimedbfile
8118         mydict["version"] = VERSION
8119         d = {} # for full backward compat, pickle it as a plain dict object.
8120         d.update(mydict)
8121         try:
8122                 f = atomic_ofstream(filename)
8123                 cPickle.dump(d, f, -1)
8124                 f.close()
8125                 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
8126         except (IOError, OSError), e:
8127                 pass
8128
8129 def portageexit():
8130         global uid,portage_gid,portdb,db
8131         if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
8132                 close_portdbapi_caches()
8133                 commit_mtimedb()
8134
8135 atexit_register(portageexit)
8136
8137 def _global_updates(trees, prev_mtimes):
8138         """
8139         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8140
8141         @param trees: A dictionary containing portage trees.
8142         @type trees: dict
8143         @param prev_mtimes: A dictionary containing mtimes of files located in
8144                 $PORTDIR/profiles/updates/.
8145         @type prev_mtimes: dict
8146         @rtype: None or List
8147         @return: None if no were no updates, otherwise a list of update commands
8148                 that have been performed.
8149         """
8150         # only do this if we're root and not running repoman/ebuild digest
8151         global secpass
8152         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8153                 return
8154         mysettings = trees["/"]["vartree"].settings
8155         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8156
8157         try:
8158                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8159                         update_data = grab_updates(updpath)
8160                 else:
8161                         update_data = grab_updates(updpath, prev_mtimes)
8162         except portage_exception.DirectoryNotFound:
8163                 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
8164                 return
8165         myupd = None
8166         if len(update_data) > 0:
8167                 do_upgrade_packagesmessage = 0
8168                 myupd = []
8169                 timestamps = {}
8170                 for mykey, mystat, mycontent in update_data:
8171                         writemsg_stdout("\n\n")
8172                         writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
8173                         writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
8174                         writemsg_stdout("  "+bold(".")+"='update pass'  "+bold("*")+"='binary update'  "+bold("@")+"='/var/db move'\n"+"  "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
8175                         valid_updates, errors = parse_updates(mycontent)
8176                         myupd.extend(valid_updates)
8177                         writemsg_stdout(len(valid_updates) * "." + "\n")
8178                         if len(errors) == 0:
8179                                 # Update our internal mtime since we
8180                                 # processed all of our directives.
8181                                 timestamps[mykey] = long(mystat.st_mtime)
8182                         else:
8183                                 for msg in errors:
8184                                         writemsg("%s\n" % msg, noiselevel=-1)
8185
8186                 update_config_files("/",
8187                         mysettings.get("CONFIG_PROTECT","").split(),
8188                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
8189                         myupd)
8190
8191                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8192                         settings=mysettings)
8193                 for update_cmd in myupd:
8194                         if update_cmd[0] == "move":
8195                                 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
8196                                 trees["/"]["bintree"].move_ent(update_cmd)
8197                         elif update_cmd[0] == "slotmove":
8198                                 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
8199                                 trees["/"]["bintree"].move_slot_ent(update_cmd)
8200
8201                 # The above global updates proceed quickly, so they
8202                 # are considered a single mtimedb transaction.
8203                 if len(timestamps) > 0:
8204                         # We do not update the mtime in the mtimedb
8205                         # until after _all_ of the above updates have
8206                         # been processed because the mtimedb will
8207                         # automatically commit when killed by ctrl C.
8208                         for mykey, mtime in timestamps.iteritems():
8209                                 prev_mtimes[mykey] = mtime
8210
8211                 # We gotta do the brute force updates for these now.
8212                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8213                 "fixpackages" in mysettings.features:
8214                         trees["/"]["bintree"].update_ents(myupd)
8215                 else:
8216                         do_upgrade_packagesmessage = 1
8217
8218                 # Update progress above is indicated by characters written to stdout so
8219                 # we print a couple new lines here to separate the progress output from
8220                 # what follows.
8221                 print
8222                 print
8223
8224                 if do_upgrade_packagesmessage and \
8225                         listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
8226                         writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
8227                         writemsg_stdout("\n    tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
8228                         writemsg_stdout("\n")
8229         if myupd:
8230                 return myupd
8231
8232 #continue setting up other trees
8233
8234 class MtimeDB(dict):
8235         def __init__(self, filename):
8236                 dict.__init__(self)
8237                 self.filename = filename
8238                 self._load(filename)
8239
8240         def _load(self, filename):
8241                 try:
8242                         f = open(filename)
8243                         mypickle = cPickle.Unpickler(f)
8244                         mypickle.find_global = None
8245                         d = mypickle.load()
8246                         f.close()
8247                         del f
8248                 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
8249                         d = {}
8250
8251                 if "old" in d:
8252                         d["updates"] = d["old"]
8253                         del d["old"]
8254                 if "cur" in d:
8255                         del d["cur"]
8256
8257                 d.setdefault("starttime", 0)
8258                 d.setdefault("version", "")
8259                 for k in ("info", "ldpath", "updates"):
8260                         d.setdefault(k, {})
8261
8262                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8263                         "starttime", "updates", "version"))
8264
8265                 for k in d.keys():
8266                         if k not in mtimedbkeys:
8267                                 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
8268                                 del d[k]
8269                 self.update(d)
8270                 self._clean_data = copy.deepcopy(d)
8271
8272         def commit(self):
8273                 if not self.filename:
8274                         return
8275                 d = {}
8276                 d.update(self)
8277                 # Only commit if the internal state has changed.
8278                 if d != self._clean_data:
8279                         commit_mtimedb(mydict=d, filename=self.filename)
8280                         self._clean_data = copy.deepcopy(d)
8281
8282 def create_trees(config_root=None, target_root=None, trees=None):
8283         if trees is None:
8284                 trees = {}
8285         else:
8286                 # clean up any existing portdbapi instances
8287                 for myroot in trees:
8288                         portdb = trees[myroot]["porttree"].dbapi
8289                         portdb.close_caches()
8290                         portdbapi.portdbapi_instances.remove(portdb)
8291                         del trees[myroot]["porttree"], myroot, portdb
8292
8293         settings = config(config_root=config_root, target_root=target_root,
8294                 config_incrementals=portage_const.INCREMENTALS)
8295         settings.lock()
8296         settings.validate()
8297
8298         myroots = [(settings["ROOT"], settings)]
8299         if settings["ROOT"] != "/":
8300                 settings = config(config_root=None, target_root=None,
8301                         config_incrementals=portage_const.INCREMENTALS)
8302                 settings.lock()
8303                 settings.validate()
8304                 myroots.append((settings["ROOT"], settings))
8305
8306         for myroot, mysettings in myroots:
8307                 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8308                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8309                 trees[myroot].addLazySingleton(
8310                         "vartree", vartree, myroot, categories=mysettings.categories,
8311                                 settings=mysettings)
8312                 trees[myroot].addLazySingleton("porttree",
8313                         portagetree, myroot, settings=mysettings)
8314                 trees[myroot].addLazySingleton("bintree",
8315                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8316         return trees
8317
8318 # Initialization of legacy globals.  No functions/classes below this point
8319 # please!  When the above functions and classes become independent of the
8320 # below global variables, it will be possible to make the below code
8321 # conditional on a backward compatibility flag (backward compatibility could
8322 # be disabled via an environment variable, for example).  This will enable new
8323 # code that is aware of this flag to import portage without the unnecessary
8324 # overhead (and other issues!) of initializing the legacy globals.
8325
8326 def init_legacy_globals():
8327         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8328         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8329         profiledir, flushmtimedb
8330
8331         # Portage needs to ensure a sane umask for the files it creates.
8332         os.umask(022)
8333
8334         kwargs = {}
8335         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8336                 kwargs[k] = os.environ.get(envvar, "/")
8337
8338         db = create_trees(**kwargs)
8339
8340         settings = db["/"]["vartree"].settings
8341         portdb = db["/"]["porttree"].dbapi
8342
8343         for myroot in db:
8344                 if myroot != "/":
8345                         settings = db[myroot]["vartree"].settings
8346                         portdb = db[myroot]["porttree"].dbapi
8347                         break
8348
8349         root = settings["ROOT"]
8350
8351         mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8352         mtimedb = MtimeDB(mtimedbfile)
8353
8354         # ========================================================================
8355         # COMPATIBILITY
8356         # These attributes should not be used
8357         # within Portage under any circumstances.
8358         # ========================================================================
8359         archlist    = settings.archlist()
8360         features    = settings.features
8361         groups      = settings["ACCEPT_KEYWORDS"].split()
8362         pkglines    = settings.packages
8363         selinux_enabled   = settings.selinux_enabled()
8364         thirdpartymirrors = settings.thirdpartymirrors()
8365         usedefaults       = settings.use_defs
8366         profiledir  = None
8367         if os.path.isdir(PROFILE_PATH):
8368                 profiledir = PROFILE_PATH
8369         def flushmtimedb(record):
8370                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8371         # ========================================================================
8372         # COMPATIBILITY
8373         # These attributes should not be used
8374         # within Portage under any circumstances.
8375         # ========================================================================
8376
8377 # WARNING!
8378 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8379 # use within Portage.  External use of this variable is unsupported because
8380 # it is experimental and it's behavior is likely to change.
8381 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8382         init_legacy_globals()
8383
8384 # Clear the cache
8385 dircache={}
8386
8387 # ============================================================================
8388 # ============================================================================
8389