Detect potential issues with mtime granlarity in env_update() and sleep if necessary...
[portage.git] / pym / portage.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6
7 VERSION="$Rev$"[6:-2] + "-svn"
8
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
12
13 try:
14         import sys
15 except ImportError:
16         print "Failed to import sys! Something is _VERY_ wrong with python."
17         raise
18
19 try:
20         import copy, errno, os, re, shutil, time, types
21         try:
22                 import cPickle
23         except ImportError:
24                 import pickle as cPickle
25
26         import stat
27         import commands
28         from time import sleep
29         from random import shuffle
30         import UserDict
31         if getattr(__builtins__, "set", None) is None:
32                 from sets import Set as set
33         from itertools import chain, izip
34 except ImportError, e:
35         sys.stderr.write("\n\n")
36         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
39
40         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42         sys.stderr.write("    "+str(e)+"\n\n");
43         raise
44
45 try:
46         # XXX: This should get renamed to bsd_chflags, I think.
47         import chflags
48         bsd_chflags = chflags
49 except ImportError:
50         bsd_chflags = None
51
52 try:
53         from cache.cache_errors import CacheError
54         import cvstree
55         import xpak
56         import getbinpkg
57         import portage_dep
58         from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59                 isjustname, isspecific, isvalidatom, \
60                 match_from_list, match_to_list, best_match_to_list
61
62         # XXX: This needs to get cleaned up.
63         import output
64         from output import bold, colorize, green, red, yellow
65
66         import portage_const
67         from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68           USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69           PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70           EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71           MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72           DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73           INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74           INCREMENTALS, EAPI, MISC_SH_BINARY
75
76         from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77                                  portage_uid, portage_gid, userpriv_groups
78         from portage_manifest import Manifest
79
80         import portage_util
81         from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82                 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83                 map_dictlist_vals, new_protect_filename, normalize_path, \
84                 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85                 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86         import portage_exception
87         import portage_gpg
88         import portage_locks
89         import portage_exec
90         from portage_exec import atexit_register, run_exitfuncs
91         from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92         import portage_checksum
93         from portage_checksum import perform_md5,perform_checksum,prelink_capable
94         import eclass_cache
95         from portage_localization import _
96         from portage_update import dep_transform, fixdbentries, grab_updates, \
97                 parse_updates, update_config_files, update_dbentries
98
99         # Need these functions directly in portage namespace to not break every external tool in existence
100         from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101                 pkgsplit, vercmp, ververify
102
103         # endversion and endversion_keys are for backward compatibility only.
104         from portage_versions import endversion_keys
105         from portage_versions import suffix_value as endversion
106
107 except ImportError, e:
108         sys.stderr.write("\n\n")
109         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114         sys.stderr.write("!!! a recovery of portage.\n")
115         sys.stderr.write("    "+str(e)+"\n\n")
116         raise
117
118
119 try:
120         import portage_selinux as selinux
121 except OSError, e:
122         writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
123         del e
124 except ImportError:
125         pass
126
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
130
131
132 def load_mod(name):
133         modname = ".".join(name.split(".")[:-1])
134         mod = __import__(modname)
135         components = name.split('.')
136         for comp in components[1:]:
137                 mod = getattr(mod, comp)
138         return mod
139
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
141         for x in key_order:
142                 if top_dict.has_key(x) and top_dict[x].has_key(key):
143                         if FullCopy:
144                                 return copy.deepcopy(top_dict[x][key])
145                         else:
146                                 return top_dict[x][key]
147         if EmptyOnError:
148                 return ""
149         else:
150                 raise KeyError, "Key not found in list; '%s'" % key
151
152 def getcwd():
153         "this fixes situations where the current directory doesn't exist"
154         try:
155                 return os.getcwd()
156         except OSError: #dir doesn't exist
157                 os.chdir("/")
158                 return "/"
159 getcwd()
160
161 def abssymlink(symlink):
162         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163         mylink=os.readlink(symlink)
164         if mylink[0] != '/':
165                 mydir=os.path.dirname(symlink)
166                 mylink=mydir+"/"+mylink
167         return os.path.normpath(mylink)
168
169 dircache = {}
170 cacheHit=0
171 cacheMiss=0
172 cacheStale=0
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174         global cacheHit,cacheMiss,cacheStale
175         mypath = normalize_path(my_original_path)
176         if dircache.has_key(mypath):
177                 cacheHit += 1
178                 cached_mtime, list, ftype = dircache[mypath]
179         else:
180                 cacheMiss += 1
181                 cached_mtime, list, ftype = -1, [], []
182         try:
183                 pathstat = os.stat(mypath)
184                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185                         mtime = pathstat[stat.ST_MTIME]
186                 else:
187                         raise portage_exception.DirectoryNotFound(mypath)
188         except (IOError,OSError,portage_exception.PortageException):
189                 if EmptyOnError:
190                         return [], []
191                 return None, None
192         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193         if mtime != cached_mtime or time.time() - mtime < 4:
194                 if dircache.has_key(mypath):
195                         cacheStale += 1
196                 list = os.listdir(mypath)
197                 ftype = []
198                 for x in list:
199                         try:
200                                 if followSymlinks:
201                                         pathstat = os.stat(mypath+"/"+x)
202                                 else:
203                                         pathstat = os.lstat(mypath+"/"+x)
204
205                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
206                                         ftype.append(0)
207                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
208                                         ftype.append(1)
209                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
210                                         ftype.append(2)
211                                 else:
212                                         ftype.append(3)
213                         except (IOError, OSError):
214                                 ftype.append(3)
215                 dircache[mypath] = mtime, list, ftype
216
217         ret_list = []
218         ret_ftype = []
219         for x in range(0, len(list)):
220                 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221                         ret_list.append(list[x])
222                         ret_ftype.append(ftype[x])
223                 elif (list[x] not in ignorelist):
224                         ret_list.append(list[x])
225                         ret_ftype.append(ftype[x])
226
227         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228         return ret_list, ret_ftype
229
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231         EmptyOnError=False, dirsonly=False):
232         """
233         Portage-specific implementation of os.listdir
234
235         @param mypath: Path whose contents you wish to list
236         @type mypath: String
237         @param recursive: Recursively scan directories contained within mypath
238         @type recursive: Boolean
239         @param filesonly; Only return files, not more directories
240         @type filesonly: Boolean
241         @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242         @type ignorecvs: Boolean
243         @param ignorelist: List of filenames/directories to exclude
244         @type ignorelist: List
245         @param followSymlinks: Follow Symlink'd files and directories
246         @type followSymlinks: Boolean
247         @param EmptyOnError: Return [] if an error occurs.
248         @type EmptyOnError: Boolean
249         @param dirsonly: Only return directories.
250         @type dirsonly: Boolean
251         @rtype: List
252         @returns: A list of files and directories (or just files or just directories) or an empty list.
253         """
254
255         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
256
257         if list is None:
258                 list=[]
259         if ftype is None:
260                 ftype=[]
261
262         if not (filesonly or dirsonly or recursive):
263                 return list
264
265         if recursive:
266                 x=0
267                 while x<len(ftype):
268                         if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
270                                         followSymlinks)
271
272                                 l=l[:]
273                                 for y in range(0,len(l)):
274                                         l[y]=list[x]+"/"+l[y]
275                                 list=list+l
276                                 ftype=ftype+f
277                         x+=1
278         if filesonly:
279                 rlist=[]
280                 for x in range(0,len(ftype)):
281                         if ftype[x]==0:
282                                 rlist=rlist+[list[x]]
283         elif dirsonly:
284                 rlist = []
285                 for x in range(0, len(ftype)):
286                         if ftype[x] == 1:
287                                 rlist = rlist + [list[x]]       
288         else:
289                 rlist=list
290
291         return rlist
292
293 def flatten(mytokens):
294         """this function now turns a [1,[2,3]] list into
295         a [1,2,3] list and returns it."""
296         newlist=[]
297         for x in mytokens:
298                 if type(x)==types.ListType:
299                         newlist.extend(flatten(x))
300                 else:
301                         newlist.append(x)
302         return newlist
303
304 #beautiful directed graph object
305
306 class digraph:
307         def __init__(self):
308                 """Create an empty digraph"""
309                 
310                 # { node : ( { child : priority } , { parent : priority } ) }
311                 self.nodes = {}
312                 self.order = []
313
314         def add(self, node, parent, priority=0):
315                 """Adds the specified node with the specified parent.
316                 
317                 If the dep is a soft-dep and the node already has a hard
318                 relationship to the parent, the relationship is left as hard."""
319                 
320                 if node not in self.nodes:
321                         self.nodes[node] = ({}, {})
322                         self.order.append(node)
323                 
324                 if not parent:
325                         return
326                 
327                 if parent not in self.nodes:
328                         self.nodes[parent] = ({}, {})
329                         self.order.append(parent)
330                 
331                 if parent in self.nodes[node][1]:
332                         if priority > self.nodes[node][1][parent]:
333                                 self.nodes[node][1][parent] = priority
334                 else:
335                         self.nodes[node][1][parent] = priority
336                 
337                 if node in self.nodes[parent][0]:
338                         if priority > self.nodes[parent][0][node]:
339                                 self.nodes[parent][0][node] = priority
340                 else:
341                         self.nodes[parent][0][node] = priority
342
343         def remove(self, node):
344                 """Removes the specified node from the digraph, also removing
345                 and ties to other nodes in the digraph. Raises KeyError if the
346                 node doesn't exist."""
347                 
348                 if node not in self.nodes:
349                         raise KeyError(node)
350                 
351                 for parent in self.nodes[node][1]:
352                         del self.nodes[parent][0][node]
353                 for child in self.nodes[node][0]:
354                         del self.nodes[child][1][node]
355                 
356                 del self.nodes[node]
357                 self.order.remove(node)
358
359         def contains(self, node):
360                 """Checks if the digraph contains mynode"""
361                 return node in self.nodes
362
363         def all_nodes(self):
364                 """Return a list of all nodes in the graph"""
365                 return self.order[:]
366
367         def child_nodes(self, node, ignore_priority=None):
368                 """Return all children of the specified node"""
369                 if ignore_priority is None:
370                         return self.nodes[node][0].keys()
371                 children = []
372                 for child, priority in self.nodes[node][0].iteritems():
373                         if priority > ignore_priority:
374                                 children.append(child)
375                 return children
376
377         def parent_nodes(self, node):
378                 """Return all parents of the specified node"""
379                 return self.nodes[node][1].keys()
380
381         def leaf_nodes(self, ignore_priority=None):
382                 """Return all nodes that have no children
383                 
384                 If ignore_soft_deps is True, soft deps are not counted as
385                 children in calculations."""
386                 
387                 leaf_nodes = []
388                 for node in self.order:
389                         is_leaf_node = True
390                         for child in self.nodes[node][0]:
391                                 if self.nodes[node][0][child] > ignore_priority:
392                                         is_leaf_node = False
393                                         break
394                         if is_leaf_node:
395                                 leaf_nodes.append(node)
396                 return leaf_nodes
397
398         def root_nodes(self, ignore_priority=None):
399                 """Return all nodes that have no parents.
400                 
401                 If ignore_soft_deps is True, soft deps are not counted as
402                 parents in calculations."""
403                 
404                 root_nodes = []
405                 for node in self.order:
406                         is_root_node = True
407                         for parent in self.nodes[node][1]:
408                                 if self.nodes[node][1][parent] > ignore_priority:
409                                         is_root_node = False
410                                         break
411                         if is_root_node:
412                                 root_nodes.append(node)
413                 return root_nodes
414
415         def is_empty(self):
416                 """Checks if the digraph is empty"""
417                 return len(self.nodes) == 0
418
419         def clone(self):
420                 clone = digraph()
421                 clone.nodes = copy.deepcopy(self.nodes)
422                 clone.order = self.order[:]
423                 return clone
424
425         # Backward compatibility
426         addnode = add
427         allnodes = all_nodes
428         allzeros = leaf_nodes
429         hasnode = contains
430         empty = is_empty
431         copy = clone
432
433         def delnode(self, node):
434                 try:
435                         self.remove(node)
436                 except KeyError:
437                         pass
438
439         def firstzero(self):
440                 leaf_nodes = self.leaf_nodes()
441                 if leaf_nodes:
442                         return leaf_nodes[0]
443                 return None
444
445         def hasallzeros(self, ignore_priority=None):
446                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
447                         len(self.order)
448
449         def debug_print(self):
450                 for node in self.nodes:
451                         print node,
452                         if self.nodes[node][0]:
453                                 print "depends on"
454                         else:
455                                 print "(no children)"
456                         for child in self.nodes[node][0]:
457                                 print "  ",child,
458                                 print "(%s)" % self.nodes[node][0][child]
459
460
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463         mylogfiles = listdir(mysettings["T"]+"/logging/")
464         # shortcut for packages without any messages
465         if len(mylogfiles) == 0:
466                 return
467         # exploit listdir() file order so we process log entries in chronological order
468         mylogfiles.reverse()
469         mylogentries = {}
470         my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
471         for f in mylogfiles:
472                 msgfunction, msgtype = f.split(".")
473                 if msgtype.upper() not in my_elog_classes \
474                                 and msgtype.lower() not in my_elog_classes:
475                         continue
476                 if msgfunction not in portage_const.EBUILD_PHASES:
477                         writemsg("!!! can't process invalid log file: %s\n" % f,
478                                 noiselevel=-1)
479                         continue
480                 if not msgfunction in mylogentries:
481                         mylogentries[msgfunction] = []
482                 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483                 mylogentries[msgfunction].append((msgtype, msgcontent))
484
485         # in case the filters matched all messages
486         if len(mylogentries) == 0:
487                 return
488
489         # generate a single string with all log messages
490         fulllog = ""
491         for phase in portage_const.EBUILD_PHASES:
492                 if not phase in mylogentries:
493                         continue
494                 for msgtype,msgcontent in mylogentries[phase]:
495                         fulllog += "%s: %s\n" % (msgtype, phase)
496                         for line in msgcontent:
497                                 fulllog += line
498                         fulllog += "\n"
499
500         # pass the processing to the individual modules
501         logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
502         for s in logsystems:
503                 # - is nicer than _ for module names, so allow people to use it.
504                 s = s.replace("-", "_")
505                 try:
506                         # FIXME: ugly ad.hoc import code
507                         # TODO:  implement a common portage module loader
508                         logmodule = __import__("elog_modules.mod_"+s)
509                         m = getattr(logmodule, "mod_"+s)
510                         def timeout_handler(signum, frame):
511                                 raise portage_exception.PortageException(
512                                         "Timeout in elog_process for system '%s'" % s)
513                         import signal
514                         signal.signal(signal.SIGALRM, timeout_handler)
515                         # Timeout after one minute (in case something like the mail
516                         # module gets hung).
517                         signal.alarm(60)
518                         try:
519                                 m.process(mysettings, cpv, mylogentries, fulllog)
520                         finally:
521                                 signal.alarm(0)
522                         if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523                                 _elog_atexit_handlers.append(m.finalize)
524                                 atexit_register(m.finalize, mysettings)
525                 except (ImportError, AttributeError), e:
526                         writemsg("!!! Error while importing logging modules " + \
527                                 "while loading \"mod_%s\":\n" % str(s))
528                         writemsg("%s\n" % str(e), noiselevel=-1)
529                 except portage_exception.PortageException, e:
530                         writemsg("%s\n" % str(e), noiselevel=-1)
531
532         # clean logfiles to avoid repetitions
533         for f in mylogfiles:
534                 try:
535                         os.unlink(os.path.join(mysettings["T"], "logging", f))
536                 except OSError:
537                         pass
538
539 #parse /etc/env.d and generate /etc/profile.env
540
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542         if target_root is None:
543                 global root
544                 target_root = root
545         if prev_mtimes is None:
546                 global mtimedb
547                 prev_mtimes = mtimedb["ldpath"]
548         envd_dir = os.path.join(target_root, "etc", "env.d")
549         portage_util.ensure_dirs(envd_dir, mode=0755)
550         fns = listdir(envd_dir, EmptyOnError=1)
551         fns.sort()
552         templist = []
553         for x in fns:
554                 if len(x) < 3:
555                         continue
556                 if not x[0].isdigit() or not x[1].isdigit():
557                         continue
558                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
559                         continue
560                 templist.append(x)
561         fns = templist
562         del templist
563
564         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568                   "PYTHONPATH", "ROOTPATH"])
569
570         config_list = []
571
572         for x in fns:
573                 file_path = os.path.join(envd_dir, x)
574                 try:
575                         myconfig = getconfig(file_path, expand=False)
576                 except portage_exception.ParseError, e:
577                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
578                         del e
579                         continue
580                 if myconfig is None:
581                         # broken symlink or file removed by a concurrent process
582                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
583                         continue
584                 config_list.append(myconfig)
585                 if "SPACE_SEPARATED" in myconfig:
586                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
587                         del myconfig["SPACE_SEPARATED"]
588                 if "COLON_SEPARATED" in myconfig:
589                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
590                         del myconfig["COLON_SEPARATED"]
591
592         env = {}
593         specials = {}
594         for var in space_separated:
595                 mylist = []
596                 for myconfig in config_list:
597                         if var in myconfig:
598                                 mylist.extend(filter(None, myconfig[var].split()))
599                                 del myconfig[var] # prepare for env.update(myconfig)
600                 if mylist:
601                         env[var] = " ".join(mylist)
602                 specials[var] = mylist
603
604         for var in colon_separated:
605                 mylist = []
606                 for myconfig in config_list:
607                         if var in myconfig:
608                                 mylist.extend(filter(None, myconfig[var].split(":")))
609                                 del myconfig[var] # prepare for env.update(myconfig)
610                 if mylist:
611                         env[var] = ":".join(mylist)
612                 specials[var] = mylist
613
614         for myconfig in config_list:
615                 """Cumulative variables have already been deleted from myconfig so that
616                 they won't be overwritten by this dict.update call."""
617                 env.update(myconfig)
618
619         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
620         try:
621                 myld = open(ldsoconf_path)
622                 myldlines=myld.readlines()
623                 myld.close()
624                 oldld=[]
625                 for x in myldlines:
626                         #each line has at least one char (a newline)
627                         if x[0]=="#":
628                                 continue
629                         oldld.append(x[:-1])
630         except (IOError, OSError), e:
631                 if e.errno != errno.ENOENT:
632                         raise
633                 oldld = None
634
635         ld_cache_update=False
636
637         newld = specials["LDPATH"]
638         if (oldld!=newld):
639                 #ld.so.conf needs updating and ldconfig needs to be run
640                 myfd = atomic_ofstream(ldsoconf_path)
641                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642                 myfd.write("# contents of /etc/env.d directory\n")
643                 for x in specials["LDPATH"]:
644                         myfd.write(x+"\n")
645                 myfd.close()
646                 ld_cache_update=True
647
648         # Update prelink.conf if we are prelink-enabled
649         if prelink_capable:
650                 newprelink = atomic_ofstream(
651                         os.path.join(target_root, "etc", "prelink.conf"))
652                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653                 newprelink.write("# contents of /etc/env.d directory\n")
654
655                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656                         newprelink.write("-l "+x+"\n");
657                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
658                         if not x:
659                                 continue
660                         if x[-1]!='/':
661                                 x=x+"/"
662                         plmasked=0
663                         for y in specials["PRELINK_PATH_MASK"]:
664                                 if not y:
665                                         continue
666                                 if y[-1]!='/':
667                                         y=y+"/"
668                                 if y==x[0:len(y)]:
669                                         plmasked=1
670                                         break
671                         if not plmasked:
672                                 newprelink.write("-h "+x+"\n")
673                 for x in specials["PRELINK_PATH_MASK"]:
674                         newprelink.write("-b "+x+"\n")
675                 newprelink.close()
676
677         # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
678         # granularity is possible.  In order to avoid the potential ambiguity of
679         # mtimes that differ by less than 1 second, sleep here if any of the
680         # directories have been modified during the current second.
681         sleep_for_mtime_granularity = False
682         current_time = long(time.time())
683         mtime_changed = False
684         lib_dirs = set()
685         for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
686                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
687                 try:
688                         newldpathtime = long(os.stat(x).st_mtime)
689                         lib_dirs.add(normalize_path(x))
690                 except OSError, oe:
691                         if oe.errno == errno.ENOENT:
692                                 try:
693                                         del prev_mtimes[x]
694                                 except KeyError:
695                                         pass
696                                 # ignore this path because it doesn't exist
697                                 continue
698                         raise
699                 if newldpathtime == current_time:
700                         sleep_for_mtime_granularity = True
701                 if x in prev_mtimes:
702                         if prev_mtimes[x] == newldpathtime:
703                                 pass
704                         else:
705                                 prev_mtimes[x] = newldpathtime
706                                 mtime_changed = True
707                 else:
708                         prev_mtimes[x] = newldpathtime
709                         mtime_changed = True
710
711         if mtime_changed:
712                 ld_cache_update = True
713
714         if makelinks and \
715                 not ld_cache_update and \
716                 contents is not None:
717                 libdir_contents_changed = False
718                 for mypath, mydata in contents.iteritems():
719                         if mydata[0] not in ("obj","sym"):
720                                 continue
721                         head, tail = os.path.split(mypath)
722                         if head in lib_dirs:
723                                 libdir_contents_changed = True
724                                 break
725                 if not libdir_contents_changed:
726                         makelinks = False
727
728         # Only run ldconfig as needed
729         if (ld_cache_update or makelinks):
730                 # ldconfig has very different behaviour between FreeBSD and Linux
731                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
732                         # We can't update links if we haven't cleaned other versions first, as
733                         # an older package installed ON TOP of a newer version will cause ldconfig
734                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
735                         # we can safely create links.
736                         writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
737                         if makelinks:
738                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
739                         else:
740                                 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
741                 elif ostype in ("FreeBSD","DragonFly"):
742                         writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
743                         commands.getstatusoutput(
744                                 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
745                                 (target_root, target_root))
746
747         del specials["LDPATH"]
748
749         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
750         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
751         cenvnotice  = penvnotice[:]
752         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
753         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
754
755         #create /etc/profile.env for bash support
756         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
757         outfile.write(penvnotice)
758
759         env_keys = [ x for x in env if x != "LDPATH" ]
760         env_keys.sort()
761         for x in env_keys:
762                 outfile.write("export %s='%s'\n" % (x, env[x]))
763         outfile.close()
764
765         #create /etc/csh.env for (t)csh support
766         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
767         outfile.write(cenvnotice)
768         for x in env_keys:
769                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
770         outfile.close()
771
772         if sleep_for_mtime_granularity:
773                 while current_time == long(time.time()):
774                         sleep(1)
775
776 def ExtractKernelVersion(base_dir):
777         """
778         Try to figure out what kernel version we are running
779         @param base_dir: Path to sources (usually /usr/src/linux)
780         @type base_dir: string
781         @rtype: tuple( version[string], error[string])
782         @returns:
783         1. tuple( version[string], error[string])
784         Either version or error is populated (but never both)
785
786         """
787         lines = []
788         pathname = os.path.join(base_dir, 'Makefile')
789         try:
790                 f = open(pathname, 'r')
791         except OSError, details:
792                 return (None, str(details))
793         except IOError, details:
794                 return (None, str(details))
795
796         try:
797                 for i in range(4):
798                         lines.append(f.readline())
799         except OSError, details:
800                 return (None, str(details))
801         except IOError, details:
802                 return (None, str(details))
803
804         lines = [l.strip() for l in lines]
805
806         version = ''
807
808         #XXX: The following code relies on the ordering of vars within the Makefile
809         for line in lines:
810                 # split on the '=' then remove annoying whitespace
811                 items = line.split("=")
812                 items = [i.strip() for i in items]
813                 if items[0] == 'VERSION' or \
814                         items[0] == 'PATCHLEVEL':
815                         version += items[1]
816                         version += "."
817                 elif items[0] == 'SUBLEVEL':
818                         version += items[1]
819                 elif items[0] == 'EXTRAVERSION' and \
820                         items[-1] != items[0]:
821                         version += items[1]
822
823         # Grab a list of files named localversion* and sort them
824         localversions = os.listdir(base_dir)
825         for x in range(len(localversions)-1,-1,-1):
826                 if localversions[x][:12] != "localversion":
827                         del localversions[x]
828         localversions.sort()
829
830         # Append the contents of each to the version string, stripping ALL whitespace
831         for lv in localversions:
832                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
833
834         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
835         kernelconfig = getconfig(base_dir+"/.config")
836         if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
837                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
838
839         return (version,None)
840
841 def autouse(myvartree, use_cache=1, mysettings=None):
842         """
843         autuse returns a list of USE variables auto-enabled to packages being installed
844
845         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
846         @type myvartree: vartree
847         @param use_cache: read values from cache
848         @type use_cache: Boolean
849         @param mysettings: Instance of config
850         @type mysettings: config
851         @rtype: string
852         @returns: A string containing a list of USE variables that are enabled via use.defaults
853         """
854         if mysettings is None:
855                 global settings
856                 mysettings = settings
857         if mysettings.profile_path is None:
858                 return ""
859         myusevars=""
860         usedefaults = mysettings.use_defs
861         for myuse in usedefaults:
862                 dep_met = True
863                 for mydep in usedefaults[myuse]:
864                         if not myvartree.dep_match(mydep,use_cache=True):
865                                 dep_met = False
866                                 break
867                 if dep_met:
868                         myusevars += " "+myuse
869         return myusevars
870
871 def check_config_instance(test):
872         if not test or (str(test.__class__) != 'portage.config'):
873                 raise TypeError, "Invalid type for config object: %s" % test.__class__
874
875 class config:
876         """
877         This class encompasses the main portage configuration.  Data is pulled from
878         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
879         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
880         overrides.
881         
882         Generally if you need data like USE flags, FEATURES, environment variables,
883         virtuals ...etc you look in here.
884         """
885         
886         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
887                 config_incrementals=None, config_root=None, target_root=None,
888                 local_config=True):
889                 """
890                 @param clone: If provided, init will use deepcopy to copy by value the instance.
891                 @type clone: Instance of config class.
892                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
893                 and then calling instance.setcpv(mycpv).
894                 @type mycpv: String
895                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
896                 @type config_profile_path: String
897                 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
898                 @type config_incrementals: List
899                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
900                 @type config_root: String
901                 @param target_root: __init__ override of $ROOT env variable.
902                 @type target_root: String
903                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
904                 ignore local config (keywording and unmasking)
905                 @type local_config: Boolean
906                 """
907
908                 debug = os.environ.get("PORTAGE_DEBUG") == "1"
909
910                 self.already_in_regenerate = 0
911
912                 self.locked   = 0
913                 self.mycpv    = None
914                 self.puse     = []
915                 self.modifiedkeys = []
916                 self.uvlist = []
917
918                 self.virtuals = {}
919                 self.virts_p = {}
920                 self.dirVirtuals = None
921                 self.v_count  = 0
922
923                 # Virtuals obtained from the vartree
924                 self.treeVirtuals = {}
925                 # Virtuals by user specification. Includes negatives.
926                 self.userVirtuals = {}
927                 # Virtual negatives from user specifications.
928                 self.negVirtuals  = {}
929
930                 self.user_profile_dir = None
931                 self.local_config = local_config
932
933                 if clone:
934                         self.incrementals = copy.deepcopy(clone.incrementals)
935                         self.profile_path = copy.deepcopy(clone.profile_path)
936                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
937                         self.local_config = copy.deepcopy(clone.local_config)
938
939                         self.module_priority = copy.deepcopy(clone.module_priority)
940                         self.modules         = copy.deepcopy(clone.modules)
941
942                         self.depcachedir = copy.deepcopy(clone.depcachedir)
943
944                         self.packages = copy.deepcopy(clone.packages)
945                         self.virtuals = copy.deepcopy(clone.virtuals)
946
947                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
948                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
949                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
950
951                         self.use_defs = copy.deepcopy(clone.use_defs)
952                         self.usemask  = copy.deepcopy(clone.usemask)
953                         self.usemask_list = copy.deepcopy(clone.usemask_list)
954                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
955                         self.useforce      = copy.deepcopy(clone.useforce)
956                         self.useforce_list = copy.deepcopy(clone.useforce_list)
957                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
958                         self.puse     = copy.deepcopy(clone.puse)
959                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
960                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
961                         self.mycpv    = copy.deepcopy(clone.mycpv)
962
963                         self.configlist = copy.deepcopy(clone.configlist)
964                         self.lookuplist = self.configlist[:]
965                         self.lookuplist.reverse()
966                         self.configdict = {
967                                 "env.d":     self.configlist[0],
968                                 "pkginternal": self.configlist[1],
969                                 "globals":     self.configlist[2],
970                                 "defaults":    self.configlist[3],
971                                 "conf":        self.configlist[4],
972                                 "pkg":         self.configlist[5],
973                                 "auto":        self.configlist[6],
974                                 "backupenv":   self.configlist[7],
975                                 "env":         self.configlist[8] }
976                         self.profiles = copy.deepcopy(clone.profiles)
977                         self.backupenv  = self.configdict["backupenv"]
978                         self.pusedict   = copy.deepcopy(clone.pusedict)
979                         self.categories = copy.deepcopy(clone.categories)
980                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
981                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
982                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
983                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
984                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
985                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
986                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
987                         self.features = copy.deepcopy(clone.features)
988                 else:
989
990                         # backupenv is for calculated incremental variables.
991                         self.backupenv = os.environ.copy()
992
993                         def check_var_directory(varname, var):
994                                 if not os.path.isdir(var):
995                                         writemsg(("!!! Error: %s='%s' is not a directory. " + \
996                                                 "Please correct this.\n") % (varname, var),
997                                                 noiselevel=-1)
998                                         raise portage_exception.DirectoryNotFound(var)
999
1000                         if config_root is None:
1001                                 config_root = "/"
1002
1003                         config_root = \
1004                                 normalize_path(config_root).rstrip(os.path.sep) + os.path.sep
1005
1006                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
1007
1008                         self.depcachedir = DEPCACHE_PATH
1009
1010                         if not config_profile_path:
1011                                 config_profile_path = \
1012                                         os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1013                                 if os.path.isdir(config_profile_path):
1014                                         self.profile_path = config_profile_path
1015                                 else:
1016                                         self.profile_path = None
1017                         else:
1018                                 self.profile_path = config_profile_path[:]
1019
1020                         if not config_incrementals:
1021                                 writemsg("incrementals not specified to class config\n")
1022                                 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1023                         else:
1024                                 self.incrementals = copy.deepcopy(config_incrementals)
1025
1026                         self.module_priority    = ["user","default"]
1027                         self.modules            = {}
1028                         self.modules["user"] = getconfig(
1029                                 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1030                         if self.modules["user"] is None:
1031                                 self.modules["user"] = {}
1032                         self.modules["default"] = {
1033                                 "portdbapi.metadbmodule": "cache.metadata.database",
1034                                 "portdbapi.auxdbmodule":  "cache.flat_hash.database",
1035                         }
1036
1037                         self.usemask=[]
1038                         self.configlist=[]
1039
1040                         # back up our incremental variables:
1041                         self.configdict={}
1042                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1043                         self.configlist.append({})
1044                         self.configdict["env.d"] = self.configlist[-1]
1045
1046                         self.configlist.append({})
1047                         self.configdict["pkginternal"] = self.configlist[-1]
1048
1049                         # The symlink might not exist or might not be a symlink.
1050                         if self.profile_path is None:
1051                                 self.profiles = []
1052                         else:
1053                                 self.profiles = []
1054                                 def addProfile(currentPath):
1055                                         parentsFile = os.path.join(currentPath, "parent")
1056                                         if os.path.exists(parentsFile):
1057                                                 parents = grabfile(parentsFile)
1058                                                 if not parents:
1059                                                         raise portage_exception.ParseError(
1060                                                                 "Empty parent file: '%s'" % parents_file)
1061                                                 for parentPath in parents:
1062                                                         parentPath = normalize_path(os.path.join(
1063                                                                 currentPath, parentPath))
1064                                                         if os.path.exists(parentPath):
1065                                                                 addProfile(parentPath)
1066                                                         else:
1067                                                                 raise portage_exception.ParseError(
1068                                                                         "Parent '%s' not found: '%s'" %  \
1069                                                                         (parentPath, parentsFile))
1070                                         self.profiles.append(currentPath)
1071                                 addProfile(os.path.realpath(self.profile_path))
1072                         if local_config:
1073                                 custom_prof = os.path.join(
1074                                         config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1075                                 if os.path.exists(custom_prof):
1076                                         self.user_profile_dir = custom_prof
1077                                         self.profiles.append(custom_prof)
1078                                 del custom_prof
1079
1080                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1081                         self.packages      = stack_lists(self.packages_list, incremental=1)
1082                         del self.packages_list
1083                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1084
1085                         # revmaskdict
1086                         self.prevmaskdict={}
1087                         for x in self.packages:
1088                                 mycatpkg=dep_getkey(x)
1089                                 if not self.prevmaskdict.has_key(mycatpkg):
1090                                         self.prevmaskdict[mycatpkg]=[x]
1091                                 else:
1092                                         self.prevmaskdict[mycatpkg].append(x)
1093
1094                         # get profile-masked use flags -- INCREMENTAL Child over parent
1095                         self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1096                                 for x in self.profiles]
1097                         self.usemask  = set(stack_lists(
1098                                 self.usemask_list, incremental=True))
1099                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1100                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1101                         del use_defs_lists
1102
1103                         self.pusemask_list = []
1104                         rawpusemask = [grabdict_package(
1105                                 os.path.join(x, "package.use.mask")) \
1106                                 for x in self.profiles]
1107                         for i in xrange(len(self.profiles)):
1108                                 cpdict = {}
1109                                 for k, v in rawpusemask[i].iteritems():
1110                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1111                                 self.pusemask_list.append(cpdict)
1112                         del rawpusemask
1113
1114                         self.pkgprofileuse = []
1115                         rawprofileuse = [grabdict_package(
1116                                 os.path.join(x, "package.use"), juststrings=True) \
1117                                 for x in self.profiles]
1118                         for i in xrange(len(self.profiles)):
1119                                 cpdict = {}
1120                                 for k, v in rawprofileuse[i].iteritems():
1121                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1122                                 self.pkgprofileuse.append(cpdict)
1123                         del rawprofileuse
1124
1125                         self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1126                                 for x in self.profiles]
1127                         self.useforce  = set(stack_lists(
1128                                 self.useforce_list, incremental=True))
1129
1130                         self.puseforce_list = []
1131                         rawpuseforce = [grabdict_package(
1132                                 os.path.join(x, "package.use.force")) \
1133                                 for x in self.profiles]
1134                         for i in xrange(len(self.profiles)):
1135                                 cpdict = {}
1136                                 for k, v in rawpuseforce[i].iteritems():
1137                                         cpdict.setdefault(dep_getkey(k), {})[k] = v
1138                                 self.puseforce_list.append(cpdict)
1139                         del rawpuseforce
1140
1141                         try:
1142                                 self.mygcfg   = getconfig(os.path.join(config_root, "etc", "make.globals"))
1143
1144                                 if self.mygcfg is None:
1145                                         self.mygcfg = {}
1146                         except SystemExit, e:
1147                                 raise
1148                         except Exception, e:
1149                                 if debug:
1150                                         raise
1151                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1152                                 if not isinstance(e, EnvironmentError):
1153                                         writemsg("!!! Incorrect multiline literals can cause " + \
1154                                                 "this. Do not use them.\n", noiselevel=-1)
1155                                 sys.exit(1)
1156                         self.configlist.append(self.mygcfg)
1157                         self.configdict["globals"]=self.configlist[-1]
1158
1159                         self.make_defaults_use = []
1160                         self.mygcfg = {}
1161                         if self.profiles:
1162                                 try:
1163                                         mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1164                                         for cfg in mygcfg_dlists:
1165                                                 if cfg:
1166                                                         self.make_defaults_use.append(cfg.get("USE", ""))
1167                                                 else:
1168                                                         self.make_defaults_use.append("")
1169                                         self.mygcfg   = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1170                                         #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1171                                         if self.mygcfg is None:
1172                                                 self.mygcfg = {}
1173                                 except SystemExit, e:
1174                                         raise
1175                                 except Exception, e:
1176                                         if debug:
1177                                                 raise
1178                                         writemsg("!!! %s\n" % (e), noiselevel=-1)
1179                                         if not isinstance(e, EnvironmentError):
1180                                                 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1181                                                         "emerge sync' may fix this. If it does\n",
1182                                                         noiselevel=-1)
1183                                                 writemsg("!!! not then please report this to " + \
1184                                                         "bugs.gentoo.org and, if possible, a dev\n",
1185                                                                 noiselevel=-1)
1186                                                 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1187                                                         noiselevel=-1)
1188                                         sys.exit(1)
1189                         self.configlist.append(self.mygcfg)
1190                         self.configdict["defaults"]=self.configlist[-1]
1191
1192                         try:
1193                                 self.mygcfg = getconfig(
1194                                         os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1195                                         allow_sourcing=True)
1196                                 if self.mygcfg is None:
1197                                         self.mygcfg = {}
1198                         except SystemExit, e:
1199                                 raise
1200                         except Exception, e:
1201                                 if debug:
1202                                         raise
1203                                 writemsg("!!! %s\n" % (e), noiselevel=-1)
1204                                 if not isinstance(e, EnvironmentError):
1205                                         writemsg("!!! Incorrect multiline literals can cause " + \
1206                                                 "this. Do not use them.\n", noiselevel=-1)
1207                                 sys.exit(1)
1208
1209                         # Allow ROOT setting to come from make.conf if it's not overridden
1210                         # by the constructor argument (from the calling environment).  As a
1211                         # special exception for a very common use case, config_root == "/"
1212                         # implies that ROOT in make.conf should be ignored.  That way, the
1213                         # user can chroot into $ROOT and the ROOT setting in make.conf will
1214                         # be automatically ignored (unless config_root is other than "/").
1215                         if config_root != "/" and \
1216                                 target_root is None and "ROOT" in self.mygcfg:
1217                                 target_root = self.mygcfg["ROOT"]
1218                         
1219                         self.configlist.append(self.mygcfg)
1220                         self.configdict["conf"]=self.configlist[-1]
1221
1222                         self.configlist.append({})
1223                         self.configdict["pkg"]=self.configlist[-1]
1224
1225                         #auto-use:
1226                         self.configlist.append({})
1227                         self.configdict["auto"]=self.configlist[-1]
1228
1229                         self.configlist.append(self.backupenv) # XXX Why though?
1230                         self.configdict["backupenv"]=self.configlist[-1]
1231
1232                         self.configlist.append(os.environ.copy())
1233                         self.configdict["env"]=self.configlist[-1]
1234
1235
1236                         # make lookuplist for loading package.*
1237                         self.lookuplist=self.configlist[:]
1238                         self.lookuplist.reverse()
1239
1240                         # Blacklist vars that could interfere with portage internals.
1241                         for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]:
1242                                 for cfg in self.lookuplist:
1243                                         try:
1244                                                 del cfg[blacklisted]
1245                                         except KeyError:
1246                                                 pass
1247                         del blacklisted, cfg
1248
1249                         if target_root is None:
1250                                 target_root = "/"
1251
1252                         target_root = \
1253                                 normalize_path(target_root).rstrip(os.path.sep) + os.path.sep
1254
1255                         check_var_directory("ROOT", target_root)
1256
1257                         env_d = getconfig(
1258                                 os.path.join(target_root, "etc", "profile.env"), expand=False)
1259                         # env_d will be None if profile.env doesn't exist.
1260                         if env_d:
1261                                 self.configdict["env.d"].update(env_d)
1262                                 # Remove duplicate values so they don't override updated
1263                                 # profile.env values later (profile.env is reloaded in each
1264                                 # call to self.regenerate).
1265                                 for cfg in (self.configdict["backupenv"],
1266                                         self.configdict["env"]):
1267                                         for k, v in env_d.iteritems():
1268                                                 try:
1269                                                         if cfg[k] == v:
1270                                                                 del cfg[k]
1271                                                 except KeyError:
1272                                                         pass
1273                                 del cfg, k, v
1274
1275                         self["PORTAGE_CONFIGROOT"] = config_root
1276                         self.backup_changes("PORTAGE_CONFIGROOT")
1277                         self["ROOT"] = target_root
1278                         self.backup_changes("ROOT")
1279
1280                         self.pusedict = {}
1281                         self.pkeywordsdict = {}
1282                         self.punmaskdict = {}
1283                         abs_user_config = os.path.join(config_root,
1284                                 USER_CONFIG_PATH.lstrip(os.path.sep))
1285
1286                         # locations for "categories" and "arch.list" files
1287                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1288                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1289                         pmask_locations.extend(self.profiles)
1290
1291                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1292                         special cases are needed here."""
1293                         overlay_profiles = []
1294                         for ov in self["PORTDIR_OVERLAY"].split():
1295                                 ov = normalize_path(ov)
1296                                 profiles_dir = os.path.join(ov, "profiles")
1297                                 if os.path.isdir(profiles_dir):
1298                                         overlay_profiles.append(profiles_dir)
1299                         locations += overlay_profiles
1300                         
1301                         pmask_locations.extend(overlay_profiles)
1302
1303                         if local_config:
1304                                 locations.append(abs_user_config)
1305                                 pmask_locations.append(abs_user_config)
1306                                 pusedict = grabdict_package(
1307                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1308                                 for key in pusedict.keys():
1309                                         cp = dep_getkey(key)
1310                                         if not self.pusedict.has_key(cp):
1311                                                 self.pusedict[cp] = {}
1312                                         self.pusedict[cp][key] = pusedict[key]
1313
1314                                 #package.keywords
1315                                 pkgdict = grabdict_package(
1316                                         os.path.join(abs_user_config, "package.keywords"),
1317                                         recursive=1)
1318                                 for key in pkgdict.keys():
1319                                         # default to ~arch if no specific keyword is given
1320                                         if not pkgdict[key]:
1321                                                 mykeywordlist = []
1322                                                 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1323                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1324                                                 else:
1325                                                         groups = []
1326                                                 for keyword in groups:
1327                                                         if not keyword[0] in "~-":
1328                                                                 mykeywordlist.append("~"+keyword)
1329                                                 pkgdict[key] = mykeywordlist
1330                                         cp = dep_getkey(key)
1331                                         if not self.pkeywordsdict.has_key(cp):
1332                                                 self.pkeywordsdict[cp] = {}
1333                                         self.pkeywordsdict[cp][key] = pkgdict[key]
1334
1335                                 #package.unmask
1336                                 pkgunmasklines = grabfile_package(
1337                                         os.path.join(abs_user_config, "package.unmask"),
1338                                         recursive=1)
1339                                 for x in pkgunmasklines:
1340                                         mycatpkg=dep_getkey(x)
1341                                         if self.punmaskdict.has_key(mycatpkg):
1342                                                 self.punmaskdict[mycatpkg].append(x)
1343                                         else:
1344                                                 self.punmaskdict[mycatpkg]=[x]
1345
1346                         #getting categories from an external file now
1347                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1348                         self.categories = stack_lists(categories, incremental=1)
1349                         del categories
1350
1351                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1352                         archlist = stack_lists(archlist, incremental=1)
1353                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1354
1355                         #package.mask
1356                         pkgmasklines = []
1357                         for x in pmask_locations:
1358                                 pkgmasklines.append(grabfile_package(
1359                                         os.path.join(x, "package.mask"), recursive=1))
1360                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1361
1362                         self.pmaskdict = {}
1363                         for x in pkgmasklines:
1364                                 mycatpkg=dep_getkey(x)
1365                                 if self.pmaskdict.has_key(mycatpkg):
1366                                         self.pmaskdict[mycatpkg].append(x)
1367                                 else:
1368                                         self.pmaskdict[mycatpkg]=[x]
1369
1370                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1371                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1372                         has_invalid_data = False
1373                         for x in range(len(pkgprovidedlines)-1, -1, -1):
1374                                 myline = pkgprovidedlines[x]
1375                                 if not isvalidatom("=" + myline):
1376                                         writemsg("Invalid package name in package.provided:" + \
1377                                                 " %s\n" % myline, noiselevel=-1)
1378                                         has_invalid_data = True
1379                                         del pkgprovidedlines[x]
1380                                         continue
1381                                 cpvr = catpkgsplit(pkgprovidedlines[x])
1382                                 if not cpvr or cpvr[0] == "null":
1383                                         writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1384                                                 noiselevel=-1)
1385                                         has_invalid_data = True
1386                                         del pkgprovidedlines[x]
1387                                         continue
1388                                 if cpvr[0] == "virtual":
1389                                         writemsg("Virtual package in package.provided: %s\n" % \
1390                                                 myline, noiselevel=-1)
1391                                         has_invalid_data = True
1392                                         del pkgprovidedlines[x]
1393                                         continue
1394                         if has_invalid_data:
1395                                 writemsg("See portage(5) for correct package.provided usage.\n",
1396                                         noiselevel=-1)
1397                         self.pprovideddict = {}
1398                         for x in pkgprovidedlines:
1399                                 cpv=catpkgsplit(x)
1400                                 if not x:
1401                                         continue
1402                                 mycatpkg=dep_getkey(x)
1403                                 if self.pprovideddict.has_key(mycatpkg):
1404                                         self.pprovideddict[mycatpkg].append(x)
1405                                 else:
1406                                         self.pprovideddict[mycatpkg]=[x]
1407
1408                         # reasonable defaults; this is important as without USE_ORDER,
1409                         # USE will always be "" (nothing set)!
1410                         if "USE_ORDER" not in self:
1411                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1412
1413                         self["PORTAGE_GID"] = str(portage_gid)
1414                         self.backup_changes("PORTAGE_GID")
1415
1416                         if self.get("PORTAGE_DEPCACHEDIR", None):
1417                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1418                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1419                         self.backup_changes("PORTAGE_DEPCACHEDIR")
1420
1421                         overlays = self.get("PORTDIR_OVERLAY","").split()
1422                         if overlays:
1423                                 new_ov = []
1424                                 for ov in overlays:
1425                                         ov = normalize_path(ov)
1426                                         if os.path.isdir(ov):
1427                                                 new_ov.append(ov)
1428                                         else:
1429                                                 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1430                                                         " (not a dir): '%s'\n" % ov, noiselevel=-1)
1431                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1432                                 self.backup_changes("PORTDIR_OVERLAY")
1433
1434                         if "CBUILD" not in self and "CHOST" in self:
1435                                 self["CBUILD"] = self["CHOST"]
1436                                 self.backup_changes("CBUILD")
1437
1438                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1439                         self.backup_changes("PORTAGE_BIN_PATH")
1440                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1441                         self.backup_changes("PORTAGE_PYM_PATH")
1442
1443                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1444                                 try:
1445                                         self[var] = str(int(self.get(var, "0")))
1446                                 except ValueError:
1447                                         writemsg(("!!! %s='%s' is not a valid integer.  " + \
1448                                                 "Falling back to '0'.\n") % (var, self[var]),
1449                                                 noiselevel=-1)
1450                                         self[var] = "0"
1451                                 self.backup_changes(var)
1452
1453                         self.regenerate()
1454                         self.features = portage_util.unique_array(self["FEATURES"].split())
1455
1456                         if "gpg" in self.features:
1457                                 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1458                                         not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1459                                         writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1460                                                 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1461                                         self.features.remove("gpg")
1462
1463                         if not portage_exec.sandbox_capable and \
1464                                 ("sandbox" in self.features or "usersandbox" in self.features):
1465                                 if self.profile_path is not None and \
1466                                         os.path.realpath(self.profile_path) == \
1467                                         os.path.realpath(PROFILE_PATH):
1468                                         """ Don't show this warning when running repoman and the
1469                                         sandbox feature came from a profile that doesn't belong to
1470                                         the user."""
1471                                         writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1472                                                 " binary. Disabling...\n\n"), noiselevel=-1)
1473                                 if "sandbox" in self.features:
1474                                         self.features.remove("sandbox")
1475                                 if "usersandbox" in self.features:
1476                                         self.features.remove("usersandbox")
1477
1478                         self.features.sort()
1479                         self["FEATURES"] = " ".join(self.features)
1480                         self.backup_changes("FEATURES")
1481
1482                         self._init_dirs()
1483
1484                 if mycpv:
1485                         self.setcpv(mycpv)
1486
1487         def _init_dirs(self):
1488                 """
1489                 Create a few directories that are critical to portage operation
1490                 """
1491                 if not os.access(self["ROOT"], os.W_OK):
1492                         return
1493
1494                 dir_mode_map = {
1495                         "tmp"             :(-1,          01777, 0),
1496                         "var/tmp"         :(-1,          01777, 0),
1497                         "var/lib/portage" :(portage_gid, 02750, 02),
1498                         "var/cache/edb"   :(portage_gid,  0755, 02)
1499                 }
1500
1501                 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1502                         try:
1503                                 mydir = os.path.join(self["ROOT"], mypath)
1504                                 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1505                         except portage_exception.PortageException, e:
1506                                 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1507                                         noiselevel=-1)
1508                                 writemsg("!!! %s\n" % str(e),
1509                                         noiselevel=-1)
1510
1511         def validate(self):
1512                 """Validate miscellaneous settings and display warnings if necessary.
1513                 (This code was previously in the global scope of portage.py)"""
1514
1515                 groups = self["ACCEPT_KEYWORDS"].split()
1516                 archlist = self.archlist()
1517                 if not archlist:
1518                         writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1519                 else:
1520                         for group in groups:
1521                                 if group not in archlist and group[0] != '-':
1522                                         writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1523                                                 noiselevel=-1)
1524
1525                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1526                         PROFILE_PATH.lstrip(os.path.sep))
1527                 if not os.path.islink(abs_profile_path) and \
1528                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1529                         os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1530                         writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1531                                 noiselevel=-1)
1532                         writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1533                         writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1534
1535                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1536                         USER_VIRTUALS_FILE.lstrip(os.path.sep))
1537                 if os.path.exists(abs_user_virtuals):
1538                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1539                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1540                         writemsg("!!! this new location.\n\n")
1541
1542         def loadVirtuals(self,root):
1543                 """Not currently used by portage."""
1544                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1545                 self.getvirtuals(root)
1546
1547         def load_best_module(self,property_string):
1548                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1549                 try:
1550                         mod = load_mod(best_mod)
1551                 except ImportError:
1552                         dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1553                         sys.exit(1)
1554                 return mod
1555
1556         def lock(self):
1557                 self.locked = 1
1558
1559         def unlock(self):
1560                 self.locked = 0
1561
1562         def modifying(self):
1563                 if self.locked:
1564                         raise Exception, "Configuration is locked."
1565
1566         def backup_changes(self,key=None):
1567                 self.modifying()
1568                 if key and self.configdict["env"].has_key(key):
1569                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1570                 else:
1571                         raise KeyError, "No such key defined in environment: %s" % key
1572
1573         def reset(self,keeping_pkg=0,use_cache=1):
1574                 """
1575                 Restore environment from self.backupenv, call self.regenerate()
1576                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1577                 @type keeping_pkg: Boolean
1578                 @param use_cache: Should self.regenerate use the cache or not
1579                 @type use_cache: Boolean
1580                 @rype: None
1581                 """
1582                 self.modifying()
1583                 self.configdict["env"].clear()
1584                 self.configdict["env"].update(self.backupenv)
1585
1586                 self.modifiedkeys = []
1587                 if not keeping_pkg:
1588                         self.mycpv = None
1589                         self.puse = ""
1590                         self.configdict["pkg"].clear()
1591                         self.configdict["pkginternal"].clear()
1592                         self.configdict["defaults"]["USE"] = \
1593                                 " ".join(self.make_defaults_use)
1594                         self.usemask  = set(stack_lists(
1595                                 self.usemask_list, incremental=True))
1596                         self.useforce  = set(stack_lists(
1597                                 self.useforce_list, incremental=True))
1598                 self.regenerate(use_cache=use_cache)
1599
1600         def load_infodir(self,infodir):
1601                 self.modifying()
1602                 if self.configdict.has_key("pkg"):
1603                         for x in self.configdict["pkg"].keys():
1604                                 del self.configdict["pkg"][x]
1605                 else:
1606                         writemsg("No pkg setup for settings instance?\n",
1607                                 noiselevel=-1)
1608                         sys.exit(17)
1609
1610                 if os.path.exists(infodir):
1611                         if os.path.exists(infodir+"/environment"):
1612                                 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1613
1614                         myre = re.compile('^[A-Z]+$')
1615                         null_byte = "\0"
1616                         for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1617                                 if myre.match(filename):
1618                                         try:
1619                                                 file_path = os.path.join(infodir, filename)
1620                                                 mydata = open(file_path).read().strip()
1621                                                 if len(mydata) < 2048 or filename == "USE":
1622                                                         if null_byte in mydata:
1623                                                                 writemsg("!!! Null byte found in metadata " + \
1624                                                                         "file: '%s'\n" % file_path, noiselevel=-1)
1625                                                                 continue
1626                                                         if filename == "USE":
1627                                                                 binpkg_flags = "-* " + mydata
1628                                                                 self.configdict["pkg"][filename] = binpkg_flags
1629                                                                 self.configdict["env"][filename] = mydata
1630                                                         else:
1631                                                                 self.configdict["pkg"][filename] = mydata
1632                                                                 self.configdict["env"][filename] = mydata
1633                                                 # CATEGORY is important because it's used in doebuild
1634                                                 # to infer the cpv.  If it's corrupted, it leads to
1635                                                 # strange errors later on, so we'll validate it and
1636                                                 # print a warning if necessary.
1637                                                 if filename == "CATEGORY":
1638                                                         matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1639                                                         if not matchobj or matchobj.start() != 0 or \
1640                                                                 matchobj.end() != len(mydata):
1641                                                                 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1642                                                                         os.path.join(infodir, filename), noiselevel=-1)
1643                                         except (OSError, IOError):
1644                                                 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1645                                                         noiselevel=-1)
1646                                                 pass
1647                         return 1
1648                 return 0
1649
1650         def setcpv(self, mycpv, use_cache=1, mydb=None):
1651                 """
1652                 Load a particular CPV into the config, this lets us see the
1653                 Default USE flags for a particular ebuild as well as the USE
1654                 flags from package.use.
1655
1656                 @param mycpv: A cpv to load
1657                 @type mycpv: string
1658                 @param use_cache: Enables caching
1659                 @type use_cache: Boolean
1660                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1661                 @type mydb: dbapi or derivative.
1662                 @rtype: None
1663                 """
1664
1665                 self.modifying()
1666                 if self.mycpv == mycpv:
1667                         return
1668                 has_changed = False
1669                 self.mycpv = mycpv
1670                 cp = dep_getkey(mycpv)
1671                 pkginternaluse = ""
1672                 if mydb:
1673                         pkginternaluse = " ".join([x[1:] \
1674                                 for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \
1675                                 if x.startswith("+")])
1676                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1677                         self.configdict["pkginternal"]["USE"] = pkginternaluse
1678                         has_changed = True
1679                 defaults = []
1680                 for i in xrange(len(self.profiles)):
1681                         defaults.append(self.make_defaults_use[i])
1682                         cpdict = self.pkgprofileuse[i].get(cp, None)
1683                         if cpdict:
1684                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1685                                 if best_match:
1686                                         defaults.append(cpdict[best_match])
1687                 defaults = " ".join(defaults)
1688                 if defaults != self.configdict["defaults"].get("USE",""):
1689                         self.configdict["defaults"]["USE"] = defaults
1690                         has_changed = True
1691                 useforce = []
1692                 for i in xrange(len(self.profiles)):
1693                         useforce.append(self.useforce_list[i])
1694                         cpdict = self.puseforce_list[i].get(cp, None)
1695                         if cpdict:
1696                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1697                                 if best_match:
1698                                         useforce.append(cpdict[best_match])
1699                 useforce = set(stack_lists(useforce, incremental=True))
1700                 if useforce != self.useforce:
1701                         self.useforce = useforce
1702                         has_changed = True
1703                 usemask = []
1704                 for i in xrange(len(self.profiles)):
1705                         usemask.append(self.usemask_list[i])
1706                         cpdict = self.pusemask_list[i].get(cp, None)
1707                         if cpdict:
1708                                 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1709                                 if best_match:
1710                                         usemask.append(cpdict[best_match])
1711                 usemask = set(stack_lists(usemask, incremental=True))
1712                 if usemask != self.usemask:
1713                         self.usemask = usemask
1714                         has_changed = True
1715                 oldpuse = self.puse
1716                 self.puse = ""
1717                 if self.pusedict.has_key(cp):
1718                         self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1719                         if self.pusekey:
1720                                 self.puse = " ".join(self.pusedict[cp][self.pusekey])
1721                 if oldpuse != self.puse:
1722                         has_changed = True
1723                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1724                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
1725                 # CATEGORY is essential for doebuild calls
1726                 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1727                 if has_changed:
1728                         self.reset(keeping_pkg=1,use_cache=use_cache)
1729
1730         def setinst(self,mycpv,mydbapi):
1731                 self.modifying()
1732                 if len(self.virtuals) == 0:
1733                         self.getvirtuals()
1734                 # Grab the virtuals this package provides and add them into the tree virtuals.
1735                 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1736                 if isinstance(mydbapi, portdbapi):
1737                         myuse = self["USE"]
1738                 else:
1739                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1740                 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1741
1742                 cp = dep_getkey(mycpv)
1743                 for virt in virts:
1744                         virt = dep_getkey(virt)
1745                         if not self.treeVirtuals.has_key(virt):
1746                                 self.treeVirtuals[virt] = []
1747                         # XXX: Is this bad? -- It's a permanent modification
1748                         if cp not in self.treeVirtuals[virt]:
1749                                 self.treeVirtuals[virt].append(cp)
1750
1751                 self.virtuals = self.__getvirtuals_compile()
1752
1753
1754         def regenerate(self,useonly=0,use_cache=1):
1755                 """
1756                 Regenerate settings
1757                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1758                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1759                 variables.  This also updates the env.d configdict; useful in case an ebuild
1760                 changes the environment.
1761
1762                 If FEATURES has already stacked, it is not stacked twice.
1763
1764                 @param useonly: Only regenerate USE flags (not any other incrementals)
1765                 @type useonly: Boolean
1766                 @param use_cache: Enable Caching (only for autouse)
1767                 @type use_cache: Boolean
1768                 @rtype: None
1769                 """
1770
1771                 self.modifying()
1772                 if self.already_in_regenerate:
1773                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1774                         writemsg("!!! Looping in regenerate.\n",1)
1775                         return
1776                 else:
1777                         self.already_in_regenerate = 1
1778
1779                 # We grab the latest profile.env here since it changes frequently.
1780                 self.configdict["env.d"].clear()
1781                 env_d = getconfig(
1782                         os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1783                 if env_d:
1784                         # env_d will be None if profile.env doesn't exist.
1785                         self.configdict["env.d"].update(env_d)
1786
1787                 if useonly:
1788                         myincrementals=["USE"]
1789                 else:
1790                         myincrementals = self.incrementals
1791                 myincrementals = set(myincrementals)
1792                 # If self.features exists, it has already been stacked and may have
1793                 # been mutated, so don't stack it again or else any mutations will be
1794                 # reverted.
1795                 if "FEATURES" in myincrementals and hasattr(self, "features"):
1796                         myincrementals.remove("FEATURES")
1797
1798                 if "USE" in myincrementals:
1799                         # Process USE last because it depends on USE_EXPAND which is also
1800                         # an incremental!
1801                         myincrementals.remove("USE")
1802
1803                 for mykey in myincrementals:
1804
1805                         mydbs=self.configlist[:-1]
1806
1807                         myflags=[]
1808                         for curdb in mydbs:
1809                                 if mykey not in curdb:
1810                                         continue
1811                                 #variables are already expanded
1812                                 mysplit = curdb[mykey].split()
1813
1814                                 for x in mysplit:
1815                                         if x=="-*":
1816                                                 # "-*" is a special "minus" var that means "unset all settings".
1817                                                 # so USE="-* gnome" will have *just* gnome enabled.
1818                                                 myflags = []
1819                                                 continue
1820
1821                                         if x[0]=="+":
1822                                                 # Not legal. People assume too much. Complain.
1823                                                 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1824                                                         noiselevel=-1)
1825                                                 x=x[1:]
1826                                                 if not x:
1827                                                         continue
1828
1829                                         if (x[0]=="-"):
1830                                                 if (x[1:] in myflags):
1831                                                         # Unset/Remove it.
1832                                                         del myflags[myflags.index(x[1:])]
1833                                                 continue
1834
1835                                         # We got here, so add it now.
1836                                         if x not in myflags:
1837                                                 myflags.append(x)
1838
1839                         myflags.sort()
1840                         #store setting in last element of configlist, the original environment:
1841                         if myflags or mykey in self:
1842                                 self.configlist[-1][mykey] = " ".join(myflags)
1843                         del myflags
1844
1845                 # Do the USE calculation last because it depends on USE_EXPAND.
1846                 if "auto" in self["USE_ORDER"].split(":"):
1847                         self.configdict["auto"]["USE"] = autouse(
1848                                 vartree(root=self["ROOT"], categories=self.categories,
1849                                         settings=self),
1850                                 use_cache=use_cache, mysettings=self)
1851                 else:
1852                         self.configdict["auto"]["USE"] = ""
1853
1854                 use_expand_protected = []
1855                 use_expand = self.get("USE_EXPAND", "").split()
1856                 for var in use_expand:
1857                         var_lower = var.lower()
1858                         for x in self.get(var, "").split():
1859                                 # Any incremental USE_EXPAND variables have already been
1860                                 # processed, so leading +/- operators are invalid here.
1861                                 if x[0] == "+":
1862                                         writemsg(colorize("BAD", "Invalid '+' operator in " + \
1863                                                 "non-incremental variable '%s': '%s'\n" % (var, x)),
1864                                                 noiselevel=-1)
1865                                         x = x[1:]
1866                                 if x[0] == "-":
1867                                         writemsg(colorize("BAD", "Invalid '-' operator in " + \
1868                                                 "non-incremental variable '%s': '%s'\n" % (var, x)),
1869                                                 noiselevel=-1)
1870                                         continue
1871                                 mystr = var_lower + "_" + x
1872                                 if mystr not in use_expand_protected:
1873                                         use_expand_protected.append(mystr)
1874
1875                 if not self.uvlist:
1876                         for x in self["USE_ORDER"].split(":"):
1877                                 if x in self.configdict:
1878                                         self.uvlist.append(self.configdict[x])
1879                         self.uvlist.reverse()
1880
1881                 myflags = use_expand_protected[:]
1882                 for curdb in self.uvlist:
1883                         if "USE" not in curdb:
1884                                 continue
1885                         mysplit = curdb["USE"].split()
1886                         for x in mysplit:
1887                                 if x == "-*":
1888                                         myflags = use_expand_protected[:]
1889                                         continue
1890
1891                                 if x[0] == "+":
1892                                         writemsg(colorize("BAD", "USE flags should not start " + \
1893                                                 "with a '+': %s\n" % x), noiselevel=-1)
1894                                         x = x[1:]
1895                                         if not x:
1896                                                 continue
1897
1898                                 if x[0] == "-":
1899                                         try:
1900                                                 myflags.remove(x[1:])
1901                                         except ValueError:
1902                                                 pass
1903                                         continue
1904
1905                                 if x not in myflags:
1906                                         myflags.append(x)
1907
1908                 myflags = set(myflags)
1909                 myflags.update(self.useforce)
1910
1911                 # FEATURES=test should imply USE=test
1912                 if "test" in self.configlist[-1].get("FEATURES","").split():
1913                         myflags.add("test")
1914
1915                 usesplit = [ x for x in myflags if \
1916                         x not in self.usemask]
1917
1918                 usesplit.sort()
1919
1920                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1921                 # that they are consistent.
1922                 for var in use_expand:
1923                         prefix = var.lower() + "_"
1924                         prefix_len = len(prefix)
1925                         expand_flags = set([ x[prefix_len:] for x in usesplit \
1926                                 if x.startswith(prefix) ])
1927                         var_split = self.get(var, "").split()
1928                         # Preserve the order of var_split because it can matter for things
1929                         # like LINGUAS.
1930                         var_split = [ x for x in var_split if x in expand_flags ]
1931                         var_split.extend(expand_flags.difference(var_split))
1932                         if var_split or var in self:
1933                                 # Don't export empty USE_EXPAND vars unless the user config
1934                                 # exports them as empty.  This is required for vars such as
1935                                 # LINGUAS, where unset and empty have different meanings.
1936                                 self[var] = " ".join(var_split)
1937
1938                 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1939                 if self.configdict["defaults"].has_key("ARCH"):
1940                         if self.configdict["defaults"]["ARCH"]:
1941                                 if self.configdict["defaults"]["ARCH"] not in usesplit:
1942                                         usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1943
1944                 self.configlist[-1]["USE"]= " ".join(usesplit)
1945
1946                 self.already_in_regenerate = 0
1947
1948         def get_virts_p(self, myroot):
1949                 if self.virts_p:
1950                         return self.virts_p
1951                 virts = self.getvirtuals(myroot)
1952                 if virts:
1953                         myvkeys = virts.keys()
1954                         for x in myvkeys:
1955                                 vkeysplit = x.split("/")
1956                                 if not self.virts_p.has_key(vkeysplit[1]):
1957                                         self.virts_p[vkeysplit[1]] = virts[x]
1958                 return self.virts_p
1959
1960         def getvirtuals(self, myroot=None):
1961                 """myroot is now ignored because, due to caching, it has always been
1962                 broken for all but the first call."""
1963                 myroot = self["ROOT"]
1964                 if self.virtuals:
1965                         return self.virtuals
1966
1967                 virtuals_list = []
1968                 for x in self.profiles:
1969                         virtuals_file = os.path.join(x, "virtuals")
1970                         virtuals_dict = grabdict(virtuals_file)
1971                         for k in virtuals_dict.keys():
1972                                 if not isvalidatom(k) or dep_getkey(k) != k:
1973                                         writemsg("--- Invalid virtuals atom in %s: %s\n" % \
1974                                                 (virtuals_file, k), noiselevel=-1)
1975                                         del virtuals_dict[k]
1976                                         continue
1977                                 myvalues = virtuals_dict[k]
1978                                 for x in myvalues:
1979                                         myatom = x
1980                                         if x.startswith("-"):
1981                                                 # allow incrementals
1982                                                 myatom = x[1:]
1983                                         if not isvalidatom(myatom):
1984                                                 writemsg("--- Invalid atom in %s: %s\n" % \
1985                                                         (virtuals_file, x), noiselevel=-1)
1986                                                 myvalues.remove(x)
1987                                 if not myvalues:
1988                                         del virtuals_dict[k]
1989                         if virtuals_dict:
1990                                 virtuals_list.append(virtuals_dict)
1991
1992                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
1993                 del virtuals_list
1994
1995                 for virt in self.dirVirtuals:
1996                         # Preference for virtuals decreases from left to right.
1997                         self.dirVirtuals[virt].reverse()
1998
1999                 # Repoman does not use user or tree virtuals.
2000                 if self.local_config and not self.treeVirtuals:
2001                         temp_vartree = vartree(myroot, None,
2002                                 categories=self.categories, settings=self)
2003                         # Reduce the provides into a list by CP.
2004                         self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2005
2006                 self.virtuals = self.__getvirtuals_compile()
2007                 return self.virtuals
2008
2009         def __getvirtuals_compile(self):
2010                 """Stack installed and profile virtuals.  Preference for virtuals
2011                 decreases from left to right.
2012                 Order of preference:
2013                 1. installed and in profile
2014                 2. installed only
2015                 3. profile only
2016                 """
2017
2018                 # Virtuals by profile+tree preferences.
2019                 ptVirtuals   = {}
2020
2021                 for virt, installed_list in self.treeVirtuals.iteritems():
2022                         profile_list = self.dirVirtuals.get(virt, None)
2023                         if not profile_list:
2024                                 continue
2025                         for cp in installed_list:
2026                                 if cp in profile_list:
2027                                         ptVirtuals.setdefault(virt, [])
2028                                         ptVirtuals[virt].append(cp)
2029
2030                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2031                         self.dirVirtuals])
2032                 return virtuals
2033
2034         def __delitem__(self,mykey):
2035                 self.modifying()
2036                 for x in self.lookuplist:
2037                         if x != None:
2038                                 if mykey in x:
2039                                         del x[mykey]
2040
2041         def __getitem__(self,mykey):
2042                 match = ''
2043                 for x in self.lookuplist:
2044                         if x is None:
2045                                 writemsg("!!! lookuplist is null.\n")
2046                         elif x.has_key(mykey):
2047                                 match = x[mykey]
2048                                 break
2049                 return match
2050
2051         def has_key(self,mykey):
2052                 for x in self.lookuplist:
2053                         if x.has_key(mykey):
2054                                 return 1
2055                 return 0
2056
2057         def __contains__(self, mykey):
2058                 """Called to implement membership test operators (in and not in)."""
2059                 return bool(self.has_key(mykey))
2060
2061         def setdefault(self, k, x=None):
2062                 if k in self:
2063                         return self[k]
2064                 else:
2065                         self[k] = x
2066                         return x
2067
2068         def get(self, k, x=None):
2069                 if k in self:
2070                         return self[k]
2071                 else:
2072                         return x
2073
2074         def keys(self):
2075                 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2076
2077         def __setitem__(self,mykey,myvalue):
2078                 "set a value; will be thrown away at reset() time"
2079                 if type(myvalue) != types.StringType:
2080                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2081                 self.modifying()
2082                 self.modifiedkeys += [mykey]
2083                 self.configdict["env"][mykey]=myvalue
2084
2085         def environ(self):
2086                 "return our locally-maintained environment"
2087                 mydict={}
2088                 for x in self.keys():
2089                         myvalue = self[x]
2090                         if not isinstance(myvalue, basestring):
2091                                 writemsg("!!! Non-string value in config: %s=%s\n" % \
2092                                         (x, myvalue), noiselevel=-1)
2093                                 continue
2094                         mydict[x] = myvalue
2095                 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2096                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2097                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2098
2099                 return mydict
2100
2101         def thirdpartymirrors(self):
2102                 if getattr(self, "_thirdpartymirrors", None) is None:
2103                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2104                         for x in self["PORTDIR_OVERLAY"].split():
2105                                 profileroots.insert(0, os.path.join(x, "profiles"))
2106                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2107                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2108                 return self._thirdpartymirrors
2109
2110         def archlist(self):
2111                 return flatten([[myarch, "~" + myarch] \
2112                         for myarch in self["PORTAGE_ARCHLIST"].split()])
2113
2114         def selinux_enabled(self):
2115                 if getattr(self, "_selinux_enabled", None) is None:
2116                         self._selinux_enabled = 0
2117                         if "selinux" in self["USE"].split():
2118                                 if "selinux" in globals():
2119                                         if selinux.is_selinux_enabled() == 1:
2120                                                 self._selinux_enabled = 1
2121                                         else:
2122                                                 self._selinux_enabled = 0
2123                                 else:
2124                                         writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2125                                                 noiselevel=-1)
2126                                         self._selinux_enabled = 0
2127                         if self._selinux_enabled == 0:
2128                                 try:    
2129                                         del sys.modules["selinux"]
2130                                 except KeyError:
2131                                         pass
2132                 return self._selinux_enabled
2133
2134 # XXX This would be to replace getstatusoutput completely.
2135 # XXX Issue: cannot block execution. Deadlock condition.
2136 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2137         """
2138         Spawn a subprocess with extra portage-specific options.
2139         Optiosn include:
2140
2141         Sandbox: Sandbox means the spawned process will be limited in its ability t
2142         read and write files (normally this means it is restricted to ${IMAGE}/)
2143         SElinux Sandbox: Enables sandboxing on SElinux
2144         Reduced Privileges: Drops privilages such that the process runs as portage:portage
2145         instead of as root.
2146
2147         Notes: os.system cannot be used because it messes with signal handling.  Instead we
2148         use the portage_exec spawn* family of functions.
2149
2150         This function waits for the process to terminate.
2151
2152         @param mystring: Command to run
2153         @type mystring: String
2154         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2155         @type mysettings: Dictionary or config instance
2156         @param debug: Ignored
2157         @type debug: Boolean
2158         @param free: Enable sandboxing for this process
2159         @type free: Boolean
2160         @param droppriv: Drop to portage:portage when running this command
2161         @type droppriv: Boolean
2162         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2163         @type sesandbox: Boolean
2164         @param keywords: Extra options encoded as a dict, to be passed to spawn
2165         @type keywords: Dictionary
2166         @rtype: Integer
2167         @returns:
2168         1. The return code of the spawned process.
2169         """
2170
2171         if type(mysettings) == types.DictType:
2172                 env=mysettings
2173                 keywords["opt_name"]="[ %s ]" % "portage"
2174         else:
2175                 check_config_instance(mysettings)
2176                 env=mysettings.environ()
2177                 keywords["opt_name"]="[%s]" % mysettings["PF"]
2178
2179         # The default policy for the sesandbox domain only allows entry (via exec)
2180         # from shells and from binaries that belong to portage (the number of entry
2181         # points is minimized).  The "tee" binary is not among the allowed entry
2182         # points, so it is spawned outside of the sesandbox domain and reads from a
2183         # pipe between two domains.
2184         logfile = keywords.get("logfile")
2185         mypids = []
2186         pw = None
2187         if logfile:
2188                 del keywords["logfile"]
2189                 fd_pipes = keywords.get("fd_pipes")
2190                 if fd_pipes is None:
2191                         fd_pipes = {0:0, 1:1, 2:2}
2192                 elif 1 not in fd_pipes or 2 not in fd_pipes:
2193                         raise ValueError(fd_pipes)
2194                 pr, pw = os.pipe()
2195                 mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
2196                          returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
2197                 os.close(pr)
2198                 fd_pipes[1] = pw
2199                 fd_pipes[2] = pw
2200                 keywords["fd_pipes"] = fd_pipes
2201
2202         features = mysettings.features
2203         # XXX: Negative RESTRICT word
2204         droppriv=(droppriv and ("userpriv" in features) and not \
2205                 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2206                  ("userpriv" in mysettings["RESTRICT"].split())))
2207
2208         if droppriv and not uid and portage_gid and portage_uid:
2209                 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2210
2211         if not free:
2212                 free=((droppriv and "usersandbox" not in features) or \
2213                         (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2214
2215         if free:
2216                 keywords["opt_name"] += " bash"
2217                 spawn_func = portage_exec.spawn_bash
2218         else:
2219                 keywords["opt_name"] += " sandbox"
2220                 spawn_func = portage_exec.spawn_sandbox
2221
2222         if sesandbox:
2223                 con = selinux.getcontext()
2224                 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2225                 selinux.setexec(con)
2226
2227         returnpid = keywords.get("returnpid")
2228         keywords["returnpid"] = True
2229         try:
2230                 mypids.extend(spawn_func(mystring, env=env, **keywords))
2231         finally:
2232                 if pw:
2233                         os.close(pw)
2234                 if sesandbox:
2235                         selinux.setexec(None)
2236
2237         if returnpid:
2238                 return mypids
2239
2240         while mypids:
2241                 pid = mypids.pop(0)
2242                 retval = os.waitpid(pid, 0)[1]
2243                 portage_exec.spawned_pids.remove(pid)
2244                 if retval != os.EX_OK:
2245                         for pid in mypids:
2246                                 if os.waitpid(pid, os.WNOHANG) == (0,0):
2247                                         os.kill(pid, signal.SIGTERM)
2248                                         os.waitpid(pid, 0)
2249                                 portage_exec.spawned_pids.remove(pid)
2250                         if retval & 0xff:
2251                                 return (retval & 0xff) << 8
2252                         return retval >> 8
2253         return os.EX_OK
2254
2255 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2256         "fetch files.  Will use digest file if available."
2257
2258         features = mysettings.features
2259         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2260         if ("mirror" in mysettings["RESTRICT"].split()) or \
2261            ("nomirror" in mysettings["RESTRICT"].split()):
2262                 if ("mirror" in features) and ("lmirror" not in features):
2263                         # lmirror should allow you to bypass mirror restrictions.
2264                         # XXX: This is not a good thing, and is temporary at best.
2265                         print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2266                         return 1
2267
2268         thirdpartymirrors = mysettings.thirdpartymirrors()
2269
2270         check_config_instance(mysettings)
2271
2272         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2273                 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2274
2275         mymirrors=[]
2276
2277         if listonly or ("distlocks" not in features):
2278                 use_locks = 0
2279
2280         fetch_to_ro = 0
2281         if "skiprocheck" in features:
2282                 fetch_to_ro = 1
2283
2284         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2285                 if use_locks:
2286                         writemsg(red("!!! For fetching to a read-only filesystem, " + \
2287                                 "locking should be turned off.\n"), noiselevel=-1)
2288                         writemsg("!!! This can be done by adding -distlocks to " + \
2289                                 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2290 #                       use_locks = 0
2291
2292         # local mirrors are always added
2293         if custommirrors.has_key("local"):
2294                 mymirrors += custommirrors["local"]
2295
2296         if ("nomirror" in mysettings["RESTRICT"].split()) or \
2297            ("mirror"   in mysettings["RESTRICT"].split()):
2298                 # We don't add any mirrors.
2299                 pass
2300         else:
2301                 if try_mirrors:
2302                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2303
2304         mydigests = Manifest(
2305                 mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST")
2306
2307         fsmirrors = []
2308         for x in range(len(mymirrors)-1,-1,-1):
2309                 if mymirrors[x] and mymirrors[x][0]=='/':
2310                         fsmirrors += [mymirrors[x]]
2311                         del mymirrors[x]
2312
2313         restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2314         custom_local_mirrors = custommirrors.get("local", [])
2315         if restrict_fetch:
2316                 # With fetch restriction, a normal uri may only be fetched from
2317                 # custom local mirrors (if available).  A mirror:// uri may also
2318                 # be fetched from specific mirrors (effectively overriding fetch
2319                 # restriction, but only for specific mirrors).
2320                 locations = custom_local_mirrors
2321         else:
2322                 locations = mymirrors
2323
2324         filedict={}
2325         primaryuri_indexes={}
2326         for myuri in myuris:
2327                 myfile=os.path.basename(myuri)
2328                 if not filedict.has_key(myfile):
2329                         filedict[myfile]=[]
2330                         for y in range(0,len(locations)):
2331                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2332                 if myuri[:9]=="mirror://":
2333                         eidx = myuri.find("/", 9)
2334                         if eidx != -1:
2335                                 mirrorname = myuri[9:eidx]
2336
2337                                 # Try user-defined mirrors first
2338                                 if custommirrors.has_key(mirrorname):
2339                                         for cmirr in custommirrors[mirrorname]:
2340                                                 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2341                                                 # remove the mirrors we tried from the list of official mirrors
2342                                                 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2343                                                         thirdpartymirrors[mirrorname].remove(cmirr)
2344                                 # now try the official mirrors
2345                                 if thirdpartymirrors.has_key(mirrorname):
2346                                         shuffle(thirdpartymirrors[mirrorname])
2347
2348                                         for locmirr in thirdpartymirrors[mirrorname]:
2349                                                 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2350
2351                                 if not filedict[myfile]:
2352                                         writemsg("No known mirror by the name: %s\n" % (mirrorname))
2353                         else:
2354                                 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2355                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
2356                 else:
2357                         if restrict_fetch:
2358                                 # Only fetch from specific mirrors is allowed.
2359                                 continue
2360                         if "primaryuri" in mysettings["RESTRICT"].split():
2361                                 # Use the source site first.
2362                                 if primaryuri_indexes.has_key(myfile):
2363                                         primaryuri_indexes[myfile] += 1
2364                                 else:
2365                                         primaryuri_indexes[myfile] = 0
2366                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2367                         else:
2368                                 filedict[myfile].append(myuri)
2369
2370         can_fetch=True
2371
2372         if listonly:
2373                 can_fetch = False
2374
2375         for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2376                 if not mysettings.get(var_name, None):
2377                         can_fetch = False
2378
2379         if can_fetch:
2380                 dirmode  = 02070
2381                 filemode =   060
2382                 modemask =    02
2383                 distdir_dirs = [""]
2384                 if "distlocks" in features:
2385                         distdir_dirs.append(".locks")
2386                 try:
2387                         
2388                         for x in distdir_dirs:
2389                                 mydir = os.path.join(mysettings["DISTDIR"], x)
2390                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2391                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2392                                                 noiselevel=-1)
2393                                         def onerror(e):
2394                                                 raise # bail out on the first error that occurs during recursion
2395                                         if not apply_recursive_permissions(mydir,
2396                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2397                                                 filemode=filemode, filemask=modemask, onerror=onerror):
2398                                                 raise portage_exception.OperationNotPermitted(
2399                                                         "Failed to apply recursive permissions for the portage group.")
2400                 except portage_exception.PortageException, e:
2401                         if not os.path.isdir(mysettings["DISTDIR"]):
2402                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2403                                 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2404                                 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2405
2406         if can_fetch and \
2407                 not fetch_to_ro and \
2408                 not os.access(mysettings["DISTDIR"], os.W_OK):
2409                 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2410                         noiselevel=-1)
2411                 can_fetch = False
2412
2413         if can_fetch and use_locks and locks_in_subdir:
2414                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2415                         if not os.access(distlocks_subdir, os.W_OK):
2416                                 writemsg("!!! No write access to write to %s.  Aborting.\n" % distlocks_subdir,
2417                                         noiselevel=-1)
2418                                 return 0
2419                         del distlocks_subdir
2420         for myfile in filedict.keys():
2421                 """
2422                 fetched  status
2423                 0        nonexistent
2424                 1        partially downloaded
2425                 2        completely downloaded
2426                 """
2427                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2428                 fetched=0
2429                 file_lock = None
2430                 if listonly:
2431                         writemsg_stdout("\n", noiselevel=-1)
2432                 else:
2433                         if use_locks and can_fetch:
2434                                 if locks_in_subdir:
2435                                         file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
2436                                 else:
2437                                         file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
2438                 try:
2439                         if not listonly:
2440                                 if fsmirrors and not os.path.exists(myfile_path):
2441                                         for mydir in fsmirrors:
2442                                                 mirror_file = os.path.join(mydir, myfile)
2443                                                 try:
2444                                                         shutil.copyfile(mirror_file, myfile_path)
2445                                                         writemsg(_("Local mirror has file:" + \
2446                                                                 " %(file)s\n" % {"file":myfile}))
2447                                                         break
2448                                                 except (IOError, OSError), e:
2449                                                         if e.errno != errno.ENOENT:
2450                                                                 raise
2451                                                         del e
2452
2453                                 try:
2454                                         mystat = os.stat(myfile_path)
2455                                 except OSError, e:
2456                                         if e.errno != errno.ENOENT:
2457                                                 raise
2458                                         del e
2459                                 else:
2460                                         try:
2461                                                 apply_secpass_permissions(
2462                                                         myfile_path, gid=portage_gid, mode=0664, mask=02,
2463                                                         stat_cached=mystat)
2464                                         except portage_exception.PortageException, e:
2465                                                 if not os.access(myfile_path, os.R_OK):
2466                                                         writemsg("!!! Failed to adjust permissions:" + \
2467                                                                 " %s\n" % str(e), noiselevel=-1)
2468                                         if myfile not in mydigests:
2469                                                 # We don't have a digest, but the file exists.  We must
2470                                                 # assume that it is fully downloaded.
2471                                                 continue
2472                                         else:
2473                                                 if mystat.st_size < mydigests[myfile]["size"] and \
2474                                                         not restrict_fetch:
2475                                                         fetched = 1 # Try to resume this download.
2476                                                 else:
2477                                                         verified_ok, reason = portage_checksum.verify_all(
2478                                                                 myfile_path, mydigests[myfile])
2479                                                         if not verified_ok:
2480                                                                 writemsg("!!! Previously fetched" + \
2481                                                                         " file: '%s'\n" % myfile, noiselevel=-1)
2482                                                                 writemsg("!!! Reason: %s\n" % reason[0],
2483                                                                         noiselevel=-1)
2484                                                                 writemsg(("!!! Got:      %s\n" + \
2485                                                                         "!!! Expected: %s\n") % \
2486                                                                         (reason[1], reason[2]), noiselevel=-1)
2487                                                                 if can_fetch and not restrict_fetch:
2488                                                                         writemsg("Refetching...\n\n",
2489                                                                                 noiselevel=-1)
2490                                                                         os.unlink(myfile_path)
2491                                                         else:
2492                                                                 eout = output.EOutput()
2493                                                                 eout.quiet = \
2494                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
2495                                                                 for digest_name in mydigests[myfile]:
2496                                                                         eout.ebegin(
2497                                                                                 "%s %s ;-)" % (myfile, digest_name))
2498                                                                         eout.eend(0)
2499                                                                 continue # fetch any remaining files
2500
2501                         for loc in filedict[myfile]:
2502                                 if listonly:
2503                                         writemsg_stdout(loc+" ", noiselevel=-1)
2504                                         continue
2505                                 # allow different fetchcommands per protocol
2506                                 protocol = loc[0:loc.find("://")]
2507                                 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2508                                         fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2509                                 else:
2510                                         fetchcommand=mysettings["FETCHCOMMAND"]
2511                                 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2512                                         resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2513                                 else:
2514                                         resumecommand=mysettings["RESUMECOMMAND"]
2515
2516                                 fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2517                                 resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2518
2519                                 if not can_fetch:
2520                                         if fetched != 2:
2521                                                 if fetched == 0:
2522                                                         writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2523                                                                 noiselevel=-1)
2524                                                 else:
2525                                                         writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2526                                                                 noiselevel=-1)
2527                                                 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2528                                                         if not mysettings.get(var_name, None):
2529                                                                 writemsg(("!!! %s is unset.  It should " + \
2530                                                                 "have been defined in /etc/make.globals.\n") \
2531                                                                  % var_name, noiselevel=-1)
2532                                                 return 0
2533                                         else:
2534                                                 continue
2535
2536                                 if fetched != 2:
2537                                         #we either need to resume or start the download
2538                                         #you can't use "continue" when you're inside a "try" block
2539                                         if fetched==1:
2540                                                 #resume mode:
2541                                                 writemsg(">>> Resuming download...\n")
2542                                                 locfetch=resumecommand
2543                                         else:
2544                                                 #normal mode:
2545                                                 locfetch=fetchcommand
2546                                         writemsg_stdout(">>> Downloading '%s'\n" % \
2547                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2548                                         myfetch=locfetch.replace("${URI}",loc)
2549                                         myfetch=myfetch.replace("${FILE}",myfile)
2550
2551                                         spawn_keywords = {}
2552                                         if "userfetch" in mysettings.features and \
2553                                                 os.getuid() == 0 and portage_gid and portage_uid:
2554                                                 spawn_keywords.update({
2555                                                         "uid"    : portage_uid,
2556                                                         "gid"    : portage_gid,
2557                                                         "groups" : userpriv_groups,
2558                                                         "umask"  : 002})
2559
2560                                         try:
2561
2562                                                 if mysettings.selinux_enabled():
2563                                                         con = selinux.getcontext()
2564                                                         con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2565                                                         selinux.setexec(con)
2566
2567                                                 myret = portage_exec.spawn_bash(myfetch,
2568                                                         env=mysettings.environ(), **spawn_keywords)
2569
2570                                                 if mysettings.selinux_enabled():
2571                                                         selinux.setexec(None)
2572
2573                                         finally:
2574                                                 try:
2575                                                         apply_secpass_permissions(myfile_path,
2576                                                                 gid=portage_gid, mode=0664, mask=02)
2577                                                 except portage_exception.FileNotFound, e:
2578                                                         pass
2579                                                 except portage_exception.PortageException, e:
2580                                                         if not os.access(myfile_path, os.R_OK):
2581                                                                 writemsg("!!! Failed to adjust permissions:" + \
2582                                                                         " %s\n" % str(e), noiselevel=-1)
2583
2584                                         if mydigests!=None and mydigests.has_key(myfile):
2585                                                 try:
2586                                                         mystat = os.stat(myfile_path)
2587                                                 except OSError, e:
2588                                                         if e.errno != errno.ENOENT:
2589                                                                 raise
2590                                                         del e
2591                                                         fetched = 0
2592                                                 else:
2593                                                         # no exception?  file exists. let digestcheck() report
2594                                                         # an appropriately for size or checksum errors
2595                                                         if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2596                                                                 # Fetch failed... Try the next one... Kill 404 files though.
2597                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2598                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2599                                                                         if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2600                                                                                 try:
2601                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2602                                                                                         writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2603                                                                                         fetched = 0
2604                                                                                         continue
2605                                                                                 except (IOError, OSError):
2606                                                                                         pass
2607                                                                 fetched = 1
2608                                                                 continue
2609                                                         if not fetchonly:
2610                                                                 fetched=2
2611                                                                 break
2612                                                         else:
2613                                                                 # File is the correct size--check the checksums for the fetched
2614                                                                 # file NOW, for those users who don't have a stable/continuous
2615                                                                 # net connection. This way we have a chance to try to download
2616                                                                 # from another mirror...
2617                                                                 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2618                                                                 if not verified_ok:
2619                                                                         print reason
2620                                                                         writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2621                                                                                 noiselevel=-1)
2622                                                                         writemsg("!!! Reason: "+reason[0]+"\n",
2623                                                                                 noiselevel=-1)
2624                                                                         writemsg("!!! Got:      %s\n!!! Expected: %s\n" % \
2625                                                                                 (reason[1], reason[2]), noiselevel=-1)
2626                                                                         writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2627                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2628                                                                         fetched=0
2629                                                                 else:
2630                                                                         eout = output.EOutput()
2631                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2632                                                                         for x_key in mydigests[myfile].keys():
2633                                                                                 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2634                                                                                 eout.eend(0)
2635                                                                         fetched=2
2636                                                                         break
2637                                         else:
2638                                                 if not myret:
2639                                                         fetched=2
2640                                                         break
2641                                                 elif mydigests!=None:
2642                                                         writemsg("No digest file available and download failed.\n\n",
2643                                                                 noiselevel=-1)
2644                 finally:
2645                         if use_locks and file_lock:
2646                                 portage_locks.unlockfile(file_lock)
2647
2648                 if listonly:
2649                         writemsg_stdout("\n", noiselevel=-1)
2650                 if fetched != 2:
2651                         if restrict_fetch:
2652                                 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2653                                         mysettings["PF"], "has fetch restriction turned on."
2654                                 print "!!! This probably means that this " + \
2655                                         "ebuild's files must be downloaded"
2656                                 print "!!! manually.  See the comments in" + \
2657                                         " the ebuild for more information.\n"
2658                                 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2659                         elif listonly:
2660                                 continue
2661                         elif not filedict[myfile]:
2662                                 writemsg("Warning: No mirrors available for file" + \
2663                                         " '%s'\n" % (myfile), noiselevel=-1)
2664                         else:
2665                                 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2666                                         noiselevel=-1)
2667                         return 0
2668         return 1
2669
2670 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2671         """
2672         Generates a digest file if missing.  Assumes all files are available.
2673         DEPRECATED: this now only is a compability wrapper for 
2674                     portage_manifest.Manifest()
2675         NOTE: manifestonly and overwrite are useless with manifest2 and
2676               are therefore ignored."""
2677         if myportdb is None:
2678                 writemsg("Warning: myportdb not specified to digestgen\n")
2679                 global portdb
2680                 myportdb = portdb
2681         global _doebuild_manifest_exempt_depend
2682         try:
2683                 _doebuild_manifest_exempt_depend += 1
2684                 distfiles_map = {}
2685                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2686                 for cpv in fetchlist_dict:
2687                         try:
2688                                 for myfile in fetchlist_dict[cpv]:
2689                                         distfiles_map.setdefault(myfile, []).append(cpv)
2690                         except portage_exception.InvalidDependString, e:
2691                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2692                                 writemsg("!!! Invalid SRC_URI for '%s'.\n" % cpv, noiselevel=-1)
2693                                 del e
2694                                 return 0
2695                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2696                         fetchlist_dict=fetchlist_dict)
2697                 # Don't require all hashes since that can trigger excessive
2698                 # fetches when sufficient digests already exist.  To ease transition
2699                 # while Manifest 1 is being removed, only require hashes that will
2700                 # exist before and after the transition.
2701                 required_hash_types = set()
2702                 required_hash_types.add("size")
2703                 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
2704                 dist_hashes = mf.fhashdict.get("DIST", {})
2705                 missing_hashes = set()
2706                 for myfile in distfiles_map:
2707                         myhashes = dist_hashes.get(myfile)
2708                         if not myhashes:
2709                                 missing_hashes.add(myfile)
2710                                 continue
2711                         if required_hash_types.difference(myhashes):
2712                                 missing_hashes.add(myfile)
2713                 if missing_hashes:
2714                         missing_files = []
2715                         for myfile in missing_hashes:
2716                                 try:
2717                                         os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2718                                 except OSError, e:
2719                                         if e.errno != errno.ENOENT:
2720                                                 raise
2721                                         del e
2722                                         missing_files.append(myfile)
2723                         if missing_files:
2724                                 mytree = os.path.realpath(os.path.dirname(
2725                                         os.path.dirname(mysettings["O"])))
2726                                 fetch_settings = config(clone=mysettings)
2727                                 debug = mysettings.get("PORTAGE_DEBUG") == "1"
2728                                 for myfile in missing_files:
2729                                         success = False
2730                                         for cpv in distfiles_map[myfile]:
2731                                                 myebuild = os.path.join(mysettings["O"],
2732                                                         catsplit(cpv)[1] + ".ebuild")
2733                                                 # for RESTRICT=fetch, mirror, etc...
2734                                                 doebuild_environment(myebuild, "fetch",
2735                                                         mysettings["ROOT"], fetch_settings,
2736                                                         debug, 1, myportdb)
2737                                                 alluris, aalist = myportdb.getfetchlist(
2738                                                         cpv, mytree=mytree, all=True,
2739                                                         mysettings=fetch_settings)
2740                                                 myuris = [uri for uri in alluris \
2741                                                         if os.path.basename(uri) == myfile]
2742                                                 if fetch(myuris, fetch_settings):
2743                                                         success = True
2744                                                         break
2745                                         if not success:
2746                                                 writemsg(("!!! File %s doesn't exist, can't update " + \
2747                                                         "Manifest\n") % myfile, noiselevel=-1)
2748                                                 return 0
2749                 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2750                 try:
2751                         mf.create(requiredDistfiles=myarchives,
2752                                 assumeDistHashesSometimes=True,
2753                                 assumeDistHashesAlways=(
2754                                 "assume-digests" in mysettings.features))
2755                 except portage_exception.FileNotFound, e:
2756                         writemsg(("!!! File %s doesn't exist, can't update " + \
2757                                 "Manifest\n") % e, noiselevel=-1)
2758                         return 0
2759                 mf.write(sign=False)
2760                 if "assume-digests" not in mysettings.features:
2761                         distlist = mf.fhashdict.get("DIST", {}).keys()
2762                         distlist.sort()
2763                         auto_assumed = []
2764                         for filename in distlist:
2765                                 if not os.path.exists(
2766                                         os.path.join(mysettings["DISTDIR"], filename)):
2767                                         auto_assumed.append(filename)
2768                         if auto_assumed:
2769                                 mytree = os.path.realpath(
2770                                         os.path.dirname(os.path.dirname(mysettings["O"])))
2771                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2772                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
2773                                 pkgs.sort()
2774                                 writemsg_stdout("  digest.assumed" + output.colorize("WARN",
2775                                         str(len(auto_assumed)).rjust(18)) + "\n")
2776                                 for pkg_key in pkgs:
2777                                         fetchlist = myportdb.getfetchlist(pkg_key,
2778                                                 mysettings=mysettings, all=True, mytree=mytree)[1]
2779                                         pv = pkg_key.split("/")[1]
2780                                         for filename in auto_assumed:
2781                                                 if filename in fetchlist:
2782                                                         writemsg_stdout(
2783                                                                 "   digest-%s::%s\n" % (pv, filename))
2784                 return 1
2785         finally:
2786                 _doebuild_manifest_exempt_depend -= 1
2787
2788 def digestParseFile(myfilename, mysettings=None):
2789         """(filename) -- Parses a given file for entries matching:
2790         <checksumkey> <checksum_hex_string> <filename> <filesize>
2791         Ignores lines that don't start with a valid checksum identifier
2792         and returns a dict with the filenames as keys and {checksumkey:checksum}
2793         as the values.
2794         DEPRECATED: this function is now only a compability wrapper for
2795                     portage_manifest.Manifest()."""
2796
2797         mysplit = myfilename.split(os.sep)
2798         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2799                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2800         elif mysplit[-1] == "Manifest":
2801                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2802
2803         if mysettings is None:
2804                 global settings
2805                 mysettings = config(clone=settings)
2806
2807         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2808
2809 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2810         """Verifies checksums.  Assumes all files have been downloaded.
2811         DEPRECATED: this is now only a compability wrapper for 
2812                     portage_manifest.Manifest()."""
2813         if not strict:
2814                 return 1
2815         pkgdir = mysettings["O"]
2816         manifest_path = os.path.join(pkgdir, "Manifest")
2817         if not os.path.exists(manifest_path):
2818                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2819                         noiselevel=-1)
2820                 if strict:
2821                         return 0
2822         mf = Manifest(pkgdir, mysettings["DISTDIR"])
2823         eout = output.EOutput()
2824         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2825         try:
2826                 eout.ebegin("checking ebuild checksums ;-)")
2827                 mf.checkTypeHashes("EBUILD")
2828                 eout.eend(0)
2829                 eout.ebegin("checking auxfile checksums ;-)")
2830                 mf.checkTypeHashes("AUX")
2831                 eout.eend(0)
2832                 eout.ebegin("checking miscfile checksums ;-)")
2833                 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2834                 eout.eend(0)
2835                 for f in myfiles:
2836                         eout.ebegin("checking %s ;-)" % f)
2837                         mf.checkFileHashes(mf.findFile(f), f)
2838                         eout.eend(0)
2839         except KeyError, e:
2840                 eout.eend(1)
2841                 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2842                 return 0
2843         except portage_exception.FileNotFound, e:
2844                 eout.eend(1)
2845                 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2846                         noiselevel=-1)
2847                 return 0
2848         except portage_exception.DigestException, e:
2849                 eout.eend(1)
2850                 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2851                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2852                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2853                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2854                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2855                 return 0
2856         # Make sure that all of the ebuilds are actually listed in the Manifest.
2857         for f in os.listdir(pkgdir):
2858                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2859                         writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2860                                 os.path.join(pkgdir, f), noiselevel=-1)
2861                         return 0
2862         """ epatch will just grab all the patches out of a directory, so we have to
2863         make sure there aren't any foreign files that it might grab."""
2864         filesdir = os.path.join(pkgdir, "files")
2865         for parent, dirs, files in os.walk(filesdir):
2866                 for d in dirs:
2867                         if d.startswith(".") or d == "CVS":
2868                                 dirs.remove(d)
2869                 for f in files:
2870                         if f.startswith("."):
2871                                 continue
2872                         f = os.path.join(parent, f)[len(filesdir) + 1:]
2873                         file_type = mf.findFile(f)
2874                         if file_type != "AUX" and not f.startswith("digest-"):
2875                                 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2876                                         os.path.join(filesdir, f), noiselevel=-1)
2877                                 return 0
2878         return 1
2879
2880 # parse actionmap to spawn ebuild with the appropriate args
2881 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2882         if alwaysdep or "noauto" not in mysettings.features:
2883                 # process dependency first
2884                 if "dep" in actionmap[mydo].keys():
2885                         retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2886                         if retval:
2887                                 return retval
2888         kwargs = actionmap[mydo]["args"]
2889         mysettings["EBUILD_PHASE"] = mydo
2890         phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2891         mysettings["EBUILD_PHASE"] = ""
2892
2893         if not kwargs["droppriv"] and secpass >= 2:
2894                 """ Privileged phases may have left files that need to be made
2895                 writable to a less privileged user."""
2896                 apply_recursive_permissions(mysettings["T"],
2897                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2898                         filemode=060, filemask=0)
2899
2900         if phase_retval == os.EX_OK:
2901                 if mydo == "install":
2902                         # User and group bits that match the "portage" user or group are
2903                         # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2904                         # necessary.  The chown system call may clear S_ISUID and S_ISGID
2905                         # bits, so those bits are restored if necessary.
2906                         inst_uid = int(mysettings["PORTAGE_INST_UID"])
2907                         inst_gid = int(mysettings["PORTAGE_INST_GID"])
2908                         for parent, dirs, files in os.walk(mysettings["D"]):
2909                                 for fname in chain(dirs, files):
2910                                         fpath = os.path.join(parent, fname)
2911                                         mystat = os.lstat(fpath)
2912                                         if mystat.st_uid != portage_uid and \
2913                                                 mystat.st_gid != portage_gid:
2914                                                 continue
2915                                         myuid = -1
2916                                         mygid = -1
2917                                         if mystat.st_uid == portage_uid:
2918                                                 myuid = inst_uid
2919                                         if mystat.st_gid == portage_gid:
2920                                                 mygid = inst_gid
2921                                         apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2922                                                 mode=mystat.st_mode, stat_cached=mystat,
2923                                                 follow_links=False)
2924                         mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2925                         qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2926                         if qa_retval:
2927                                 writemsg("!!! install_qa_check failed; exiting.\n",
2928                                         noiselevel=-1)
2929                         return qa_retval
2930         return phase_retval
2931
2932
2933 def eapi_is_supported(eapi):
2934         return str(eapi).strip() == str(portage_const.EAPI).strip()
2935
2936 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2937
2938         ebuild_path = os.path.abspath(myebuild)
2939         pkg_dir     = os.path.dirname(ebuild_path)
2940
2941         if mysettings.configdict["pkg"].has_key("CATEGORY"):
2942                 cat = mysettings.configdict["pkg"]["CATEGORY"]
2943         else:
2944                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
2945         mypv = os.path.basename(ebuild_path)[:-7]       
2946         mycpv = cat+"/"+mypv
2947         mysplit=pkgsplit(mypv,silent=0)
2948         if mysplit is None:
2949                 raise portage_exception.IncorrectParameter(
2950                         "Invalid ebuild path: '%s'" % myebuild)
2951
2952         if mydo != "depend":
2953                 """For performance reasons, setcpv only triggers reset when it
2954                 detects a package-specific change in config.  For the ebuild
2955                 environment, a reset call is forced in order to ensure that the
2956                 latest env.d variables are used."""
2957                 mysettings.reset(use_cache=use_cache)
2958                 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
2959
2960         mysettings["EBUILD_PHASE"] = mydo
2961
2962         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
2963
2964         # We are disabling user-specific bashrc files.
2965         mysettings["BASH_ENV"] = INVALID_ENV_FILE
2966
2967         if debug: # Otherwise it overrides emerge's settings.
2968                 # We have no other way to set debug... debug can't be passed in
2969                 # due to how it's coded... Don't overwrite this so we can use it.
2970                 mysettings["PORTAGE_DEBUG"] = "1"
2971
2972         mysettings["ROOT"]     = myroot
2973         mysettings["STARTDIR"] = getcwd()
2974
2975         mysettings["EBUILD"]   = ebuild_path
2976         mysettings["O"]        = pkg_dir
2977         mysettings.configdict["pkg"]["CATEGORY"] = cat
2978         mysettings["FILESDIR"] = pkg_dir+"/files"
2979         mysettings["PF"]       = mypv
2980
2981         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
2982         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2983
2984         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
2985         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
2986         mysettings["PN"] = mysplit[0]
2987         mysettings["PV"] = mysplit[1]
2988         mysettings["PR"] = mysplit[2]
2989
2990         if portage_util.noiselimit < 0:
2991                 mysettings["PORTAGE_QUIET"] = "1"
2992
2993         if mydo != "depend":
2994                 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"]  = \
2995                         mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
2996                 if not eapi_is_supported(eapi):
2997                         # can't do anything with this.
2998                         raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2999                 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
3000                         portage_dep.use_reduce(portage_dep.paren_reduce(
3001                         mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
3002
3003         if mysplit[2] == "r0":
3004                 mysettings["PVR"]=mysplit[1]
3005         else:
3006                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
3007
3008         if mysettings.has_key("PATH"):
3009                 mysplit=mysettings["PATH"].split(":")
3010         else:
3011                 mysplit=[]
3012         if PORTAGE_BIN_PATH not in mysplit:
3013                 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
3014
3015         # Sandbox needs cannonical paths.
3016         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
3017                 mysettings["PORTAGE_TMPDIR"])
3018         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
3019         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
3020         
3021         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
3022         # locations in order to prevent interference.
3023         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
3024                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3025                         mysettings["PKG_TMPDIR"],
3026                         mysettings["CATEGORY"], mysettings["PF"])
3027         else:
3028                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3029                         mysettings["BUILD_PREFIX"],
3030                         mysettings["CATEGORY"], mysettings["PF"])
3031
3032         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
3033         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
3034         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
3035         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
3036
3037         mysettings["PORTAGE_BASHRC"] = os.path.join(
3038                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
3039
3040         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
3041         if (mydo!="depend") or not mysettings.has_key("KV"):
3042                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
3043                 if mykv:
3044                         # Regular source tree
3045                         mysettings["KV"]=mykv
3046                 else:
3047                         mysettings["KV"]=""
3048
3049         if (mydo!="depend") or not mysettings.has_key("KVERS"):
3050                 myso=os.uname()[2]
3051                 mysettings["KVERS"]=myso[1]
3052
3053         # Allow color.map to control colors associated with einfo, ewarn, etc...
3054         mycolors = []
3055         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
3056                 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
3057         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
3058
3059 def prepare_build_dirs(myroot, mysettings, cleanup):
3060
3061         clean_dirs = [mysettings["HOME"]]
3062
3063         # We enable cleanup when we want to make sure old cruft (such as the old
3064         # environment) doesn't interfere with the current phase.
3065         if cleanup:
3066                 clean_dirs.append(mysettings["T"])
3067
3068         for clean_dir in clean_dirs:
3069                 try:
3070                         shutil.rmtree(clean_dir)
3071                 except OSError, oe:
3072                         if errno.ENOENT == oe.errno:
3073                                 pass
3074                         elif errno.EPERM == oe.errno:
3075                                 writemsg("%s\n" % oe, noiselevel=-1)
3076                                 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3077                                         clean_dir, noiselevel=-1)
3078                                 return 1
3079                         else:
3080                                 raise
3081
3082         def makedirs(dir_path):
3083                 try:
3084                         os.makedirs(dir_path)
3085                 except OSError, oe:
3086                         if errno.EEXIST == oe.errno:
3087                                 pass
3088                         elif errno.EPERM == oe.errno:
3089                                 writemsg("%s\n" % oe, noiselevel=-1)
3090                                 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3091                                         dir_path, noiselevel=-1)
3092                                 return False
3093                         else:
3094                                 raise
3095                 return True
3096
3097         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3098
3099         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3100         mydirs.append(os.path.dirname(mydirs[-1]))
3101
3102         try:
3103                 for mydir in mydirs:
3104                         portage_util.ensure_dirs(mydir)
3105                         portage_util.apply_secpass_permissions(mydir,
3106                                 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3107                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3108                         """These directories don't necessarily need to be group writable.
3109                         However, the setup phase is commonly run as a privileged user prior
3110                         to the other phases being run by an unprivileged user.  Currently,
3111                         we use the portage group to ensure that the unprivleged user still
3112                         has write access to these directories in any case."""
3113                         portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3114                         portage_util.apply_secpass_permissions(mysettings[dir_key],
3115                                 uid=portage_uid, gid=portage_gid)
3116         except portage_exception.PermissionDenied, e:
3117                 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3118                 return 1
3119         except portage_exception.OperationNotPermitted, e:
3120                 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3121                 return 1
3122         except portage_exception.FileNotFound, e:
3123                 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3124                 return 1
3125
3126         features_dirs = {
3127                 "ccache":{
3128                         "basedir_var":"CCACHE_DIR",
3129                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3130                         "always_recurse":False},
3131                 "confcache":{
3132                         "basedir_var":"CONFCACHE_DIR",
3133                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3134                         "always_recurse":True},
3135                 "distcc":{
3136                         "basedir_var":"DISTCC_DIR",
3137                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3138                         "subdirs":("lock", "state"),
3139                         "always_recurse":True}
3140         }
3141         dirmode  = 02070
3142         filemode =   060
3143         modemask =    02
3144         for myfeature, kwargs in features_dirs.iteritems():
3145                 if myfeature in mysettings.features:
3146                         basedir = mysettings[kwargs["basedir_var"]]
3147                         if basedir == "":
3148                                 basedir = kwargs["default_dir"]
3149                                 mysettings[kwargs["basedir_var"]] = basedir
3150                         try:
3151                                 mydirs = [mysettings[kwargs["basedir_var"]]]
3152                                 if "subdirs" in kwargs:
3153                                         for subdir in kwargs["subdirs"]:
3154                                                 mydirs.append(os.path.join(basedir, subdir))
3155                                 for mydir in mydirs:
3156                                         modified = portage_util.ensure_dirs(mydir,
3157                                                 gid=portage_gid, mode=dirmode, mask=modemask)
3158                                         # To avoid excessive recursive stat calls, we trigger
3159                                         # recursion when the top level directory does not initially
3160                                         # match our permission requirements.
3161                                         if modified or kwargs["always_recurse"]:
3162                                                 if modified:
3163                                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3164                                                                 noiselevel=-1)
3165                                                 def onerror(e):
3166                                                         raise   # The feature is disabled if a single error
3167                                                                         # occurs during permissions adjustment.
3168                                                 if not apply_recursive_permissions(mydir,
3169                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3170                                                 filemode=filemode, filemask=modemask, onerror=onerror):
3171                                                         raise portage_exception.OperationNotPermitted(
3172                                                                 "Failed to apply recursive permissions for the portage group.")
3173                         except portage_exception.PortageException, e:
3174                                 mysettings.features.remove(myfeature)
3175                                 mysettings["FEATURES"] = " ".join(mysettings.features)
3176                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3177                                 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3178                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
3179                                 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3180                                         noiselevel=-1)
3181                                 time.sleep(5)
3182
3183         workdir_mode = 0700
3184         try:
3185                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3186                 if mode.isdigit():
3187                         parsed_mode = int(mode, 8)
3188                 elif mode == "":
3189                         raise KeyError()
3190                 else:
3191                         raise ValueError()
3192                 if parsed_mode & 07777 != parsed_mode:
3193                         raise ValueError("Invalid file mode: %s" % mode)
3194                 else:
3195                         workdir_mode = parsed_mode
3196         except KeyError, e:
3197                 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3198         except ValueError, e:
3199                 if len(str(e)) > 0:
3200                         writemsg("%s\n" % e)
3201                 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3202                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3203         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3204         try:
3205                 apply_secpass_permissions(mysettings["WORKDIR"],
3206                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3207         except portage_exception.FileNotFound:
3208                 pass # ebuild.sh will create it
3209
3210         if mysettings.get("PORT_LOGDIR", "") == "":
3211                 while "PORT_LOGDIR" in mysettings:
3212                         del mysettings["PORT_LOGDIR"]
3213         if "PORT_LOGDIR" in mysettings:
3214                 try:
3215                         portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3216                                 uid=portage_uid, gid=portage_gid, mode=02770)
3217                 except portage_exception.PortageException, e:
3218                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3219                         writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3220                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
3221                         writemsg("!!! Disabling logging.\n", noiselevel=-1)
3222                         while "PORT_LOGDIR" in mysettings:
3223                                 del mysettings["PORT_LOGDIR"]
3224         if "PORT_LOGDIR" in mysettings:
3225                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3226                 if not os.path.exists(logid_path):
3227                         f = open(logid_path, "w")
3228                         f.close()
3229                         del f
3230                 logid_time = time.strftime("%Y%m%d-%H%M%S",
3231                         time.gmtime(os.stat(logid_path).st_mtime))
3232                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3233                         mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3234                         (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3235                 del logid_path, logid_time
3236         else:
3237                 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
3238                 # enabled since it is possible that local SELinux security policies
3239                 # do not allow ouput to be piped out of the sesandbox domain.
3240                 if not (mysettings.selinux_enabled() and \
3241                         "sesandbox" in mysettings.features):
3242                         mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3243                                 mysettings["T"], "build.log")
3244
3245 _doebuild_manifest_exempt_depend = 0
3246 _doebuild_manifest_checked = None
3247
3248 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3249         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3250         mydbapi=None, vartree=None, prev_mtimes=None):
3251         
3252         """
3253         Wrapper function that invokes specific ebuild phases through the spawning
3254         of ebuild.sh
3255         
3256         @param myebuild: name of the ebuild to invoke the phase on (CPV)
3257         @type myebuild: String
3258         @param mydo: Phase to run
3259         @type mydo: String
3260         @param myroot: $ROOT (usually '/', see man make.conf)
3261         @type myroot: String
3262         @param mysettings: Portage Configuration
3263         @type mysettings: instance of portage.config
3264         @param debug: Turns on various debug information (eg, debug for spawn)
3265         @type debug: Boolean
3266         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3267         @type listonly: Boolean
3268         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3269         @type fetchonly: Boolean
3270         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3271         @type cleanup: Boolean
3272         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3273         @type dbkey: Dict or String
3274         @param use_cache: Enables the cache
3275         @type use_cache: Boolean
3276         @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3277         @type fetchall: Boolean
3278         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3279         @type tree: String
3280         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3281         @type mydbapi: portdbapi instance
3282         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3283         @type vartree: vartree instance
3284         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3285         @type prev_mtimes: dictionary
3286         @rtype: Boolean
3287         @returns:
3288         1. 0 for success
3289         2. 1 for error
3290         
3291         Most errors have an accompanying error message.
3292         
3293         listonly and fetchonly are only really necessary for operations involving 'fetch'
3294         prev_mtimes are only necessary for merge operations.
3295         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3296         
3297         """
3298         
3299         if not tree:
3300                 writemsg("Warning: tree not specified to doebuild\n")
3301                 tree = "porttree"
3302         global db
3303         
3304         # chunked out deps for each phase, so that ebuild binary can use it 
3305         # to collapse targets down.
3306         actionmap_deps={
3307         "depend": [],
3308         "setup":  [],
3309         "unpack": ["setup"],
3310         "compile":["unpack"],
3311         "test":   ["compile"],
3312         "install":["test"],
3313         "rpm":    ["install"],
3314         "package":["install"],
3315         }
3316         
3317         if mydbapi is None:
3318                 mydbapi = db[myroot][tree].dbapi
3319
3320         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3321                 vartree = db[myroot]["vartree"]
3322
3323         features = mysettings.features
3324
3325         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3326                         "config","setup","depend","fetch","digest",
3327                         "unpack","compile","test","install","rpm","qmerge","merge",
3328                         "package","unmerge", "manifest"]
3329
3330         if mydo not in validcommands:
3331                 validcommands.sort()
3332                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3333                         noiselevel=-1)
3334                 for vcount in range(len(validcommands)):
3335                         if vcount%6 == 0:
3336                                 writemsg("\n!!! ", noiselevel=-1)
3337                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3338                 writemsg("\n", noiselevel=-1)
3339                 return 1
3340
3341         if not os.path.exists(myebuild):
3342                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3343                         noiselevel=-1)
3344                 return 1
3345
3346         global _doebuild_manifest_exempt_depend
3347
3348         if "strict" in features and \
3349                 "digest" not in features and \
3350                 tree == "porttree" and \
3351                 mydo not in ("digest", "manifest", "help") and \
3352                 not _doebuild_manifest_exempt_depend:
3353                 # Always verify the ebuild checksums before executing it.
3354                 pkgdir = os.path.dirname(myebuild)
3355                 manifest_path = os.path.join(pkgdir, "Manifest")
3356                 global _doebuild_manifest_checked
3357                 # Avoid checking the same Manifest several times in a row during a
3358                 # regen with an empty cache.
3359                 if _doebuild_manifest_checked != manifest_path:
3360                         if not os.path.exists(manifest_path):
3361                                 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3362                                         noiselevel=-1)
3363                                 return 1
3364                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
3365                         try:
3366                                 mf.checkTypeHashes("EBUILD")
3367                         except portage_exception.FileNotFound, e:
3368                                 writemsg("!!! A file listed in the Manifest " + \
3369                                         "could not be found: %s\n" % str(e), noiselevel=-1)
3370                                 return 1
3371                         except portage_exception.DigestException, e:
3372                                 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3373                                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3374                                 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3375                                 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3376                                 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3377                                 return 1
3378                         # Make sure that all of the ebuilds are actually listed in the
3379                         # Manifest.
3380                         for f in os.listdir(pkgdir):
3381                                 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3382                                         writemsg("!!! A file is not listed in the " + \
3383                                         "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3384                                         noiselevel=-1)
3385                                         return 1
3386                         _doebuild_manifest_checked = manifest_path
3387
3388         logfile=None
3389         builddir_lock = None
3390         try:
3391                 if mydo in ("digest", "manifest", "help"):
3392                         # Temporarily exempt the depend phase from manifest checks, in case
3393                         # aux_get calls trigger cache generation.
3394                         _doebuild_manifest_exempt_depend += 1
3395
3396                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3397                         use_cache, mydbapi)
3398
3399                 # get possible slot information from the deps file
3400                 if mydo == "depend":
3401                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3402                         if isinstance(dbkey, dict):
3403                                 mysettings["dbkey"] = ""
3404                                 pr, pw = os.pipe()
3405                                 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3406                                 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3407                                         fd_pipes=fd_pipes, returnpid=True)
3408                                 os.close(pw) # belongs exclusively to the child process now
3409                                 maxbytes = 1024
3410                                 mybytes = []
3411                                 while True:
3412                                         mybytes.append(os.read(pr, maxbytes))
3413                                         if not mybytes[-1]:
3414                                                 break
3415                                 os.close(pr)
3416                                 mybytes = "".join(mybytes)
3417                                 global auxdbkeys
3418                                 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3419                                         dbkey[k] = v
3420                                 retval = os.waitpid(mypids[0], 0)[1]
3421                                 portage_exec.spawned_pids.remove(mypids[0])
3422                                 # If it got a signal, return the signal that was sent, but
3423                                 # shift in order to distinguish it from a return value. (just
3424                                 # like portage_exec.spawn() would do).
3425                                 if retval & 0xff:
3426                                         return (retval & 0xff) << 8
3427                                 # Otherwise, return its exit code.
3428                                 return retval >> 8
3429                         elif dbkey:
3430                                 mysettings["dbkey"] = dbkey
3431                         else:
3432                                 mysettings["dbkey"] = \
3433                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3434
3435                         return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3436
3437                 # Validate dependency metadata here to ensure that ebuilds with invalid
3438                 # data are never installed (even via the ebuild command).
3439                 invalid_dep_exempt_phases = \
3440                         set(["clean", "cleanrm", "help", "prerm", "postrm"])
3441                 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3442                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3443                 metadata = dict(izip(dep_keys, mydbapi.aux_get(mycpv, dep_keys)))
3444                 class FakeTree(object):
3445                         def __init__(self, mydb):
3446                                 self.dbapi = mydb
3447                 dep_check_trees = {myroot:{}}
3448                 dep_check_trees[myroot]["porttree"] = \
3449                         FakeTree(fakedbapi(settings=mysettings))
3450                 for dep_type in dep_keys:
3451                         mycheck = dep_check(metadata[dep_type], None, mysettings,
3452                                 myuse="all", myroot=myroot, trees=dep_check_trees)
3453                         if not mycheck[0]:
3454                                 writemsg("%s: %s\n%s\n" % (
3455                                         dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3456                                 if mydo not in invalid_dep_exempt_phases:
3457                                         return 1
3458                         del dep_type, mycheck
3459                 del mycpv, dep_keys, metadata, FakeTree, dep_check_trees
3460
3461                 if "PORTAGE_TMPDIR" not in mysettings or \
3462                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3463                         writemsg("The directory specified in your " + \
3464                                 "PORTAGE_TMPDIR variable, '%s',\n" % \
3465                                 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3466                         writemsg("does not exist.  Please create this directory or " + \
3467                                 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3468                         return 1
3469
3470                 # Build directory creation isn't required for any of these.
3471                 if mydo not in ("digest", "fetch", "help", "manifest"):
3472                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3473                         if mystatus:
3474                                 return mystatus
3475                         # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3476                         logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3477                 if mydo == "unmerge":
3478                         return unmerge(mysettings["CATEGORY"],
3479                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
3480
3481                 # if any of these are being called, handle them -- running them out of
3482                 # the sandbox -- and stop now.
3483                 if mydo in ["clean","cleanrm"]:
3484                         return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3485                                 debug=debug, free=1, logfile=None)
3486                 elif mydo == "help":
3487                         return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3488                                 debug=debug, free=1, logfile=logfile)
3489                 elif mydo == "setup":
3490                         infodir = os.path.join(
3491                                 mysettings["PORTAGE_BUILDDIR"], "build-info")
3492                         if os.path.isdir(infodir):
3493                                 """Load USE flags for setup phase of a binary package.
3494                                 Ideally, the environment.bz2 would be used instead."""
3495                                 mysettings.load_infodir(infodir)
3496                         retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3497                                 debug=debug, free=1, logfile=logfile)
3498                         if secpass >= 2:
3499                                 """ Privileged phases may have left files that need to be made
3500                                 writable to a less privileged user."""
3501                                 apply_recursive_permissions(mysettings["T"],
3502                                         uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3503                                         filemode=060, filemask=0)
3504                         return retval
3505                 elif mydo == "preinst":
3506                         mysettings["IMAGE"] = mysettings["D"]
3507                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3508                                 mysettings, debug=debug, free=1, logfile=logfile)
3509                         if phase_retval == os.EX_OK:
3510                                 # Post phase logic and tasks that have been factored out of
3511                                 # ebuild.sh.
3512                                 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3513                                         "preinst_sfperms", "preinst_selinux_labels",
3514                                         "preinst_suid_scan"]
3515                                 mysettings["EBUILD_PHASE"] = ""
3516                                 phase_retval = spawn(" ".join(myargs),
3517                                         mysettings, debug=debug, free=1, logfile=logfile)
3518                                 if phase_retval != os.EX_OK:
3519                                         writemsg("!!! post preinst failed; exiting.\n",
3520                                                 noiselevel=-1)
3521                         del mysettings["IMAGE"]
3522                         return phase_retval
3523                 elif mydo == "postinst":
3524                         mysettings.load_infodir(mysettings["O"])
3525                         phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3526                                 mysettings, debug=debug, free=1, logfile=logfile)
3527                         if phase_retval == os.EX_OK:
3528                                 # Post phase logic and tasks that have been factored out of
3529                                 # ebuild.sh.
3530                                 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3531                                 mysettings["EBUILD_PHASE"] = ""
3532                                 phase_retval = spawn(" ".join(myargs),
3533                                         mysettings, debug=debug, free=1, logfile=logfile)
3534                                 if phase_retval != os.EX_OK:
3535                                         writemsg("!!! post postinst failed; exiting.\n",
3536                                                 noiselevel=-1)
3537                         return phase_retval
3538                 elif mydo in ["prerm","postrm","config"]:
3539                         mysettings.load_infodir(mysettings["O"])
3540                         return spawn(EBUILD_SH_BINARY + " " + mydo,
3541                                 mysettings, debug=debug, free=1, logfile=logfile)
3542
3543                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3544
3545                 # Make sure we get the correct tree in case there are overlays.
3546                 mytree = os.path.realpath(
3547                         os.path.dirname(os.path.dirname(mysettings["O"])))
3548                 try:
3549                         newuris, alist = mydbapi.getfetchlist(
3550                                 mycpv, mytree=mytree, mysettings=mysettings)
3551                         alluris, aalist = mydbapi.getfetchlist(
3552                                 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3553                 except portage_exception.InvalidDependString, e:
3554                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3555                         writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
3556                         del e
3557                         return 1
3558                 mysettings["A"] = " ".join(alist)
3559                 mysettings["AA"] = " ".join(aalist)
3560                 if ("mirror" in features) or fetchall:
3561                         fetchme = alluris[:]
3562                         checkme = aalist[:]
3563                 elif mydo == "digest":
3564                         fetchme = alluris[:]
3565                         checkme = aalist[:]
3566                         # Skip files that we already have digests for.
3567                         mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3568                         mydigests = mf.getTypeDigests("DIST")
3569                         required_hash_types = set()
3570                         required_hash_types.add("size")
3571                         required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
3572                         for filename, hashes in mydigests.iteritems():
3573                                 if not required_hash_types.difference(hashes):
3574                                         checkme = [i for i in checkme if i != filename]
3575                                         fetchme = [i for i in fetchme \
3576                                                 if os.path.basename(i) != filename]
3577                                 del filename, hashes
3578                 else:
3579                         fetchme = newuris[:]
3580                         checkme = alist[:]
3581
3582                 # Only try and fetch the files if we are going to need them ...
3583                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3584                 # unpack compile install`, we will try and fetch 4 times :/
3585                 need_distfiles = (mydo in ("fetch", "unpack") or \
3586                         mydo not in ("digest", "manifest") and "noauto" not in features)
3587                 if need_distfiles and not fetch(
3588                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3589                         return 1
3590
3591                 if mydo == "fetch" and listonly:
3592                         return 0
3593
3594                 try:
3595                         if mydo == "manifest":
3596                                 return not digestgen(aalist, mysettings, overwrite=1,
3597                                         manifestonly=1, myportdb=mydbapi)
3598                         elif mydo == "digest":
3599                                 return not digestgen(aalist, mysettings, overwrite=1,
3600                                         myportdb=mydbapi)
3601                         elif "digest" in mysettings.features:
3602                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3603                 except portage_exception.PermissionDenied, e:
3604                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
3605                         if mydo in ("digest", "manifest"):
3606                                 return 1
3607
3608                 # See above comment about fetching only when needed
3609                 if not digestcheck(checkme, mysettings, ("strict" in features),
3610                         (mydo not in ["digest","fetch","unpack"] and \
3611                         mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3612                         "noauto" in features)):
3613                         return 1
3614
3615                 if mydo == "fetch":
3616                         return 0
3617
3618                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3619                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3620                         orig_distdir = mysettings["DISTDIR"]
3621                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3622                         edpath = mysettings["DISTDIR"] = \
3623                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3624                         if os.path.exists(edpath):
3625                                 try:
3626                                         if os.path.isdir(edpath) and not os.path.islink(edpath):
3627                                                 shutil.rmtree(edpath)
3628                                         else:
3629                                                 os.unlink(edpath)
3630                                 except OSError:
3631                                         print "!!! Failed reseting ebuild distdir path, " + edpath
3632                                         raise
3633                         os.mkdir(edpath)
3634                         apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3635                         try:
3636                                 for file in aalist:
3637                                         os.symlink(os.path.join(orig_distdir, file),
3638                                                 os.path.join(edpath, file))
3639                         except OSError:
3640                                 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3641                                 raise
3642
3643                 #initial dep checks complete; time to process main commands
3644
3645                 nosandbox = (("userpriv" in features) and \
3646                         ("usersandbox" not in features) and \
3647                         ("userpriv" not in mysettings["RESTRICT"]) and \
3648                         ("nouserpriv" not in mysettings["RESTRICT"]))
3649                 if nosandbox and ("userpriv" not in features or \
3650                         "userpriv" in mysettings["RESTRICT"] or \
3651                         "nouserpriv" in mysettings["RESTRICT"]):
3652                         nosandbox = ("sandbox" not in features and \
3653                                 "usersandbox" not in features)
3654
3655                 sesandbox = mysettings.selinux_enabled() and \
3656                         "sesandbox" in mysettings.features
3657                 ebuild_sh = EBUILD_SH_BINARY + " %s"
3658                 misc_sh = MISC_SH_BINARY + " dyn_%s"
3659
3660                 # args are for the to spawn function
3661                 actionmap = {
3662 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":0}},
3663 "setup":  {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1,         "sesandbox":0}},
3664 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0,         "sesandbox":sesandbox}},
3665 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3666 "test":   {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3667 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0,         "sesandbox":sesandbox}},
3668 "rpm":    {"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3669 "package":{"cmd":misc_sh,   "args":{"droppriv":0, "free":0,         "sesandbox":0}},
3670                 }
3671
3672                 # merge the deps in so we have again a 'full' actionmap
3673                 # be glad when this can die.
3674                 for x in actionmap.keys():
3675                         if len(actionmap_deps.get(x, [])):
3676                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3677
3678                 if mydo in actionmap.keys():
3679                         if mydo=="package":
3680                                 portage_util.ensure_dirs(
3681                                         os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3682                                 portage_util.ensure_dirs(
3683                                         os.path.join(mysettings["PKGDIR"], "All"))
3684                         retval = spawnebuild(mydo,
3685                                 actionmap, mysettings, debug, logfile=logfile)
3686                 elif mydo=="qmerge":
3687                         # check to ensure install was run.  this *only* pops up when users
3688                         # forget it and are using ebuild
3689                         if not os.path.exists(
3690                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3691                                 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3692                                         noiselevel=-1)
3693                                 return 1
3694                         # qmerge is a special phase that implies noclean.
3695                         if "noclean" not in mysettings.features:
3696                                 mysettings.features.append("noclean")
3697                         #qmerge is specifically not supposed to do a runtime dep check
3698                         retval = merge(
3699                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3700                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3701                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3702                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3703                 elif mydo=="merge":
3704                         retval = spawnebuild("install", actionmap, mysettings, debug,
3705                                 alwaysdep=1, logfile=logfile)
3706                         if retval == os.EX_OK:
3707                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3708                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3709                                         "build-info"), myroot, mysettings,
3710                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3711                                         vartree=vartree, prev_mtimes=prev_mtimes)
3712                 else:
3713                         print "!!! Unknown mydo:",mydo
3714                         return 1
3715
3716                 if retval != os.EX_OK and tree == "porttree":
3717                         for i in xrange(len(mydbapi.porttrees)-1):
3718                                 t = mydbapi.porttrees[i+1]
3719                                 if myebuild.startswith(t):
3720                                         # Display the non-cannonical path, in case it's different, to
3721                                         # prevent confusion.
3722                                         overlays = mysettings["PORTDIR_OVERLAY"].split()
3723                                         try:
3724                                                 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3725                                                         overlays[i], noiselevel=-1)
3726                                         except IndexError:
3727                                                 pass
3728                                         break
3729                 return retval
3730
3731         finally:
3732                 if builddir_lock:
3733                         portage_locks.unlockdir(builddir_lock)
3734
3735                 # Make sure that DISTDIR is restored to it's normal value before we return!
3736                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3737                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3738                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3739
3740                 if logfile:
3741                         try:
3742                                 if os.stat(logfile).st_size == 0:
3743                                         os.unlink(logfile)
3744                         except OSError:
3745                                 pass
3746
3747                 if mydo in ("digest", "manifest", "help"):
3748                         # If necessary, depend phase has been triggered by aux_get calls
3749                         # and the exemption is no longer needed.
3750                         _doebuild_manifest_exempt_depend -= 1
3751
3752 expandcache={}
3753
3754 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3755         """moves a file from src to dest, preserving all permissions and attributes; mtime will
3756         be preserved even when moving across filesystems.  Returns true on success and false on
3757         failure.  Move is atomic."""
3758         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3759         global lchown
3760         if mysettings is None:
3761                 global settings
3762                 mysettings = settings
3763         selinux_enabled = mysettings.selinux_enabled()
3764         try:
3765                 if not sstat:
3766                         sstat=os.lstat(src)
3767
3768         except SystemExit, e:
3769                 raise
3770         except Exception, e:
3771                 print "!!! Stating source file failed... movefile()"
3772                 print "!!!",e
3773                 return None
3774
3775         destexists=1
3776         try:
3777                 dstat=os.lstat(dest)
3778         except (OSError, IOError):
3779                 dstat=os.lstat(os.path.dirname(dest))
3780                 destexists=0
3781
3782         if bsd_chflags:
3783                 # Check that we can actually unset schg etc flags...
3784                 # Clear the flags on source and destination; we'll reinstate them after merging
3785                 if destexists and dstat.st_flags != 0:
3786                         if bsd_chflags.lchflags(dest, 0) < 0:
3787                                 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3788                                         noiselevel=-1)
3789                 # We might have an immutable flag on the parent dir; save and clear.
3790                 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3791                 if pflags != 0:
3792                         bsd_chflags.lchflags(os.path.dirname(dest), 0)
3793
3794                 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3795                         bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3796                         # This is bad: we can't merge the file with these flags set.
3797                         writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3798                                 noiselevel=-1)
3799                         return None
3800
3801         if destexists:
3802                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3803                         try:
3804                                 os.unlink(dest)
3805                                 destexists=0
3806                         except SystemExit, e:
3807                                 raise
3808                         except Exception, e:
3809                                 pass
3810
3811         if stat.S_ISLNK(sstat[stat.ST_MODE]):
3812                 try:
3813                         target=os.readlink(src)
3814                         if mysettings and mysettings["D"]:
3815                                 if target.find(mysettings["D"])==0:
3816                                         target=target[len(mysettings["D"]):]
3817                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3818                                 os.unlink(dest)
3819                         if selinux_enabled:
3820                                 sid = selinux.get_lsid(src)
3821                                 selinux.secure_symlink(target,dest,sid)
3822                         else:
3823                                 os.symlink(target,dest)
3824                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3825                         return os.lstat(dest)[stat.ST_MTIME]
3826                 except SystemExit, e:
3827                         raise
3828                 except Exception, e:
3829                         print "!!! failed to properly create symlink:"
3830                         print "!!!",dest,"->",target
3831                         print "!!!",e
3832                         return None
3833
3834         renamefailed=1
3835         if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3836                 try:
3837                         if selinux_enabled:
3838                                 ret=selinux.secure_rename(src,dest)
3839                         else:
3840                                 ret=os.rename(src,dest)
3841                         renamefailed=0
3842                 except SystemExit, e:
3843                         raise
3844                 except Exception, e:
3845                         if e[0]!=errno.EXDEV:
3846                                 # Some random error.
3847                                 print "!!! Failed to move",src,"to",dest
3848                                 print "!!!",e
3849                                 return None
3850                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3851         if renamefailed:
3852                 didcopy=0
3853                 if stat.S_ISREG(sstat[stat.ST_MODE]):
3854                         try: # For safety copy then move it over.
3855                                 if selinux_enabled:
3856                                         selinux.secure_copy(src,dest+"#new")
3857                                         selinux.secure_rename(dest+"#new",dest)
3858                                 else:
3859                                         shutil.copyfile(src,dest+"#new")
3860                                         os.rename(dest+"#new",dest)
3861                                 didcopy=1
3862                         except SystemExit, e:
3863                                 raise
3864                         except Exception, e:
3865                                 print '!!! copy',src,'->',dest,'failed.'
3866                                 print "!!!",e
3867                                 return None
3868                 else:
3869                         #we don't yet handle special, so we need to fall back to /bin/mv
3870                         if selinux_enabled:
3871                                 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3872                         else:
3873                                 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3874                                 if a[0]!=0:
3875                                         print "!!! Failed to move special file:"
3876                                         print "!!! '"+src+"' to '"+dest+"'"
3877                                         print "!!!",a
3878                                         return None # failure
3879                 try:
3880                         if didcopy:
3881                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3882                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3883                                 else:
3884                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3885                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3886                                 os.unlink(src)
3887                 except SystemExit, e:
3888                         raise
3889                 except Exception, e:
3890                         print "!!! Failed to chown/chmod/unlink in movefile()"
3891                         print "!!!",dest
3892                         print "!!!",e
3893                         return None
3894
3895         if newmtime:
3896                 os.utime(dest,(newmtime,newmtime))
3897         else:
3898                 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3899                 newmtime=sstat[stat.ST_MTIME]
3900
3901         if bsd_chflags:
3902                 # Restore the flags we saved before moving
3903                 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3904                         writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3905                                 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3906                         return None
3907
3908         return newmtime
3909
3910 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3911         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3912         if not os.access(myroot, os.W_OK):
3913                 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3914                         noiselevel=-1)
3915                 return errno.EACCES
3916         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3917                 vartree=vartree)
3918         return mylink.merge(pkgloc, infloc, myroot, myebuild,
3919                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3920
3921 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3922         mylink = dblink(
3923                 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3924         try:
3925                 mylink.lockdb()
3926                 if mylink.exists():
3927                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3928                                 ldpath_mtimes=ldpath_mtimes)
3929                         if retval == os.EX_OK:
3930                                 mylink.delete()
3931                         return retval
3932                 return os.EX_OK
3933         finally:
3934                 mylink.unlockdb()
3935
3936 def getCPFromCPV(mycpv):
3937         """Calls pkgsplit on a cpv and returns only the cp."""
3938         return pkgsplit(mycpv)[0]
3939
3940 def dep_virtual(mysplit, mysettings):
3941         "Does virtual dependency conversion"
3942         newsplit=[]
3943         myvirtuals = mysettings.getvirtuals()
3944         for x in mysplit:
3945                 if type(x)==types.ListType:
3946                         newsplit.append(dep_virtual(x, mysettings))
3947                 else:
3948                         mykey=dep_getkey(x)
3949                         mychoices = myvirtuals.get(mykey, None)
3950                         if mychoices:
3951                                 if len(mychoices) == 1:
3952                                         a = x.replace(mykey, mychoices[0])
3953                                 else:
3954                                         if x[0]=="!":
3955                                                 # blocker needs "and" not "or(||)".
3956                                                 a=[]
3957                                         else:
3958                                                 a=['||']
3959                                         for y in mychoices:
3960                                                 a.append(x.replace(mykey, y))
3961                                 newsplit.append(a)
3962                         else:
3963                                 newsplit.append(x)
3964         return newsplit
3965
3966 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
3967         trees=None, **kwargs):
3968         """Recursively expand new-style virtuals so as to collapse one or more
3969         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
3970         zero cost regardless of whether or not they are currently installed. Virtual
3971         blockers are supported but only when the virtual expands to a single
3972         atom because it wouldn't necessarily make sense to block all the components
3973         of a compound virtual.  When more than one new-style virtual is matched,
3974         the matches are sorted from highest to lowest versions and the atom is
3975         expanded to || ( highest match ... lowest match )."""
3976         newsplit = []
3977         # According to GLEP 37, RDEPEND is the only dependency type that is valid
3978         # for new-style virtuals.  Repoman should enforce this.
3979         dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
3980         def compare_pkgs(a, b):
3981                 return pkgcmp(b[1], a[1])
3982         portdb = trees[myroot]["porttree"].dbapi
3983         if kwargs["use_binaries"]:
3984                 portdb = trees[myroot]["bintree"].dbapi
3985         myvirtuals = mysettings.getvirtuals()
3986         for x in mysplit:
3987                 if x == "||":
3988                         newsplit.append(x)
3989                         continue
3990                 elif isinstance(x, list):
3991                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
3992                                 mysettings, myroot=myroot, trees=trees, **kwargs))
3993                         continue
3994                 if portage_dep._dep_check_strict and \
3995                         not isvalidatom(x, allow_blockers=True):
3996                         raise portage_exception.ParseError(
3997                                 "invalid atom: '%s'" % x)
3998                 mykey = dep_getkey(x)
3999                 if not mykey.startswith("virtual/"):
4000                         newsplit.append(x)
4001                         continue
4002                 mychoices = myvirtuals.get(mykey, [])
4003                 isblocker = x.startswith("!")
4004                 match_atom = x
4005                 if isblocker:
4006                         match_atom = x[1:]
4007                 pkgs = {}
4008                 for cpv in portdb.match(match_atom):
4009                         # only use new-style matches
4010                         if cpv.startswith("virtual/"):
4011                                 pkgs[cpv] = (cpv, pkgsplit(cpv), portdb)
4012                 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
4013                         vardb = trees[myroot]["vartree"].dbapi
4014                         for cpv in vardb.match(match_atom):
4015                                 # only use new-style matches
4016                                 if cpv.startswith("virtual/"):
4017                                         if cpv in pkgs:
4018                                                 continue
4019                                         pkgs[cpv] = (cpv, pkgsplit(cpv), vardb)
4020                 if not (pkgs or mychoices):
4021                         # This one couldn't be expanded as a new-style virtual.  Old-style
4022                         # virtuals have already been expanded by dep_virtual, so this one
4023                         # is unavailable and dep_zapdeps will identify it as such.  The
4024                         # atom is not eliminated here since it may still represent a
4025                         # dependency that needs to be satisfied.
4026                         newsplit.append(x)
4027                         continue
4028                 if not pkgs and len(mychoices) == 1:
4029                         newsplit.append(x.replace(mykey, mychoices[0]))
4030                         continue
4031                 pkgs = pkgs.values()
4032                 pkgs.sort(compare_pkgs) # Prefer higher versions.
4033                 if isblocker:
4034                         a = []
4035                 else:
4036                         a = ['||']
4037                 for y in pkgs:
4038                         depstring = " ".join(y[2].aux_get(y[0], dep_keys))
4039                         if edebug:
4040                                 print "Virtual Parent:   ", y[0]
4041                                 print "Virtual Depstring:", depstring
4042                         mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
4043                                 trees=trees, **kwargs)
4044                         if not mycheck[0]:
4045                                 raise portage_exception.ParseError(
4046                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
4047                         if isblocker:
4048                                 virtual_atoms = [atom for atom in mycheck[1] \
4049                                         if not atom.startswith("!")]
4050                                 if len(virtual_atoms) == 1:
4051                                         # It wouldn't make sense to block all the components of a
4052                                         # compound virtual, so only a single atom block is allowed.
4053                                         a.append("!" + virtual_atoms[0])
4054                         else:
4055                                 mycheck[1].append("="+y[0]) # pull in the new-style virtual
4056                                 a.append(mycheck[1])
4057                 # Plain old-style virtuals.  New-style virtuals are preferred.
4058                 for y in mychoices:
4059                         a.append(x.replace(mykey, y))
4060                 if isblocker and not a:
4061                         # Probably a compound virtual.  Pass the atom through unprocessed.
4062                         newsplit.append(x)
4063                         continue
4064                 newsplit.append(a)
4065         return newsplit
4066
4067 def dep_eval(deplist):
4068         if not deplist:
4069                 return 1
4070         if deplist[0]=="||":
4071                 #or list; we just need one "1"
4072                 for x in deplist[1:]:
4073                         if type(x)==types.ListType:
4074                                 if dep_eval(x)==1:
4075                                         return 1
4076                         elif x==1:
4077                                         return 1
4078                 #XXX: unless there's no available atoms in the list
4079                 #in which case we need to assume that everything is
4080                 #okay as some ebuilds are relying on an old bug.
4081                 if len(deplist) == 1:
4082                         return 1
4083                 return 0
4084         else:
4085                 for x in deplist:
4086                         if type(x)==types.ListType:
4087                                 if dep_eval(x)==0:
4088                                         return 0
4089                         elif x==0 or x==2:
4090                                 return 0
4091                 return 1
4092
4093 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
4094         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
4095         Returned deplist contains steps that must be taken to satisfy dependencies."""
4096         if trees is None:
4097                 global db
4098                 trees = db
4099         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
4100         if not reduced or unreduced == ["||"] or dep_eval(reduced):
4101                 return []
4102
4103         if unreduced[0] != "||":
4104                 unresolved = []
4105                 for dep, satisfied in izip(unreduced, reduced):
4106                         if isinstance(dep, list):
4107                                 unresolved += dep_zapdeps(dep, satisfied, myroot,
4108                                         use_binaries=use_binaries, trees=trees)
4109                         elif not satisfied:
4110                                 unresolved.append(dep)
4111                 return unresolved
4112
4113         # We're at a ( || atom ... ) type level and need to make a choice
4114         deps = unreduced[1:]
4115         satisfieds = reduced[1:]
4116
4117         # Our preference order is for an the first item that:
4118         # a) contains all unmasked packages with the same key as installed packages
4119         # b) contains all unmasked packages
4120         # c) contains masked installed packages
4121         # d) is the first item
4122
4123         preferred = []
4124         possible_upgrades = []
4125         other = []
4126
4127         # Alias the trees we'll be checking availability against
4128         vardb = None
4129         if "vartree" in trees[myroot]:
4130                 vardb = trees[myroot]["vartree"].dbapi
4131         if use_binaries:
4132                 mydbapi = trees[myroot]["bintree"].dbapi
4133         else:
4134                 mydbapi = trees[myroot]["porttree"].dbapi
4135
4136         # Sort the deps into preferred (installed) and other
4137         # with values of [[required_atom], availablility]
4138         for dep, satisfied in izip(deps, satisfieds):
4139                 if isinstance(dep, list):
4140                         atoms = dep_zapdeps(dep, satisfied, myroot,
4141                                 use_binaries=use_binaries, trees=trees)
4142                 else:
4143                         atoms = [dep]
4144
4145                 all_available = True
4146                 for atom in atoms:
4147                         if not mydbapi.match(atom):
4148                                 # With --usepkgonly, count installed packages as "available".
4149                                 # Note that --usepkgonly currently has no package.mask support.
4150                                 # See bug #149816.
4151                                 if use_binaries and vardb and vardb.match(atom):
4152                                         continue
4153                                 all_available = False
4154                                 break
4155
4156                 if not vardb:
4157                         # called by repoman
4158                         preferred.append((atoms, None, all_available))
4159                         continue
4160
4161                 """ The package names rather than the exact atoms are used for an
4162                 initial rough match against installed packages.  More specific
4163                 preference selection is handled later via slot and version comparison."""
4164                 all_installed = True
4165                 for atom in set([dep_getkey(atom) for atom in atoms]):
4166                         # New-style virtuals have zero cost to install.
4167                         if not vardb.match(atom) and not atom.startswith("virtual/"):
4168                                 all_installed = False
4169                                 break
4170
4171                 # Check if the set of atoms will result in a downgrade of
4172                 # an installed package. If they will then don't prefer them
4173                 # over other atoms.
4174                 has_downgrade = False
4175                 versions = {}
4176                 if all_installed or all_available:
4177                         for atom in atoms:
4178                                 mykey = dep_getkey(atom)
4179                                 avail_pkg = best(mydbapi.match(atom))
4180                                 if not avail_pkg:
4181                                         continue
4182                                 avail_slot = "%s:%s" % (mykey,
4183                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4184                                 versions[avail_slot] = avail_pkg
4185                                 inst_pkg = vardb.match(avail_slot)
4186                                 if not inst_pkg:
4187                                         continue
4188                                 # emerge guarantees 1 package per slot here (highest counter)
4189                                 inst_pkg = inst_pkg[0]
4190                                 if avail_pkg != inst_pkg and \
4191                                         avail_pkg != best([avail_pkg, inst_pkg]):
4192                                         has_downgrade = True
4193                                         break
4194
4195                 this_choice = (atoms, versions, all_available)
4196                 if not has_downgrade:
4197                         if all_installed:
4198                                 preferred.append(this_choice)
4199                                 continue
4200                         elif all_available:
4201                                 possible_upgrades.append(this_choice)
4202                                 continue
4203                 other.append(this_choice)
4204
4205         # Compare the "all_installed" choices against the "all_available" choices
4206         # for possible missed upgrades.  The main purpose of this code is to find
4207         # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4208         # into || ( highest version ... lowest version ).  We want to prefer the
4209         # highest all_available version of the new-style virtual when there is a
4210         # lower all_installed version.
4211         for possible_upgrade in list(possible_upgrades):
4212                 atoms, versions, all_available = possible_upgrade
4213                 myslots = set(versions)
4214                 for other_choice in preferred:
4215                         o_atoms, o_versions, o_all_available = other_choice
4216                         intersecting_slots = myslots.intersection(o_versions)
4217                         if not intersecting_slots:
4218                                 continue
4219                         has_upgrade = False
4220                         has_downgrade = False
4221                         for myslot in intersecting_slots:
4222                                 myversion = versions[myslot]
4223                                 o_version = o_versions[myslot]
4224                                 if myversion != o_version:
4225                                         if myversion == best([myversion, o_version]):
4226                                                 has_upgrade = True
4227                                         else:
4228                                                 has_downgrade = True
4229                                                 break
4230                         if has_upgrade and not has_downgrade:
4231                                 o_index = preferred.index(other_choice)
4232                                 preferred.insert(o_index, possible_upgrade)
4233                                 possible_upgrades.remove(possible_upgrade)
4234                                 break
4235         preferred.extend(possible_upgrades)
4236
4237         # preferred now contains a) and c) from the order above with
4238         # the masked flag differentiating the two. other contains b)
4239         # and d) so adding other to preferred will give us a suitable
4240         # list to iterate over.
4241         preferred.extend(other)
4242
4243         for allow_masked in (False, True):
4244                 for atoms, versions, all_available in preferred:
4245                         if all_available or allow_masked:
4246                                 return atoms
4247
4248         assert(False) # This point should not be reachable
4249
4250
4251 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4252         if not len(mydep):
4253                 return mydep
4254         if mydep[0]=="*":
4255                 mydep=mydep[1:]
4256         orig_dep = mydep
4257         mydep = dep_getcpv(orig_dep)
4258         myindex = orig_dep.index(mydep)
4259         prefix = orig_dep[:myindex]
4260         postfix = orig_dep[myindex+len(mydep):]
4261         return prefix + cpv_expand(
4262                 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4263
4264 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4265         use_cache=1, use_binaries=0, myroot="/", trees=None):
4266         """Takes a depend string and parses the condition."""
4267         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4268         #check_config_instance(mysettings)
4269         if trees is None:
4270                 trees = globals()["db"]
4271         if use=="yes":
4272                 if myuse is None:
4273                         #default behavior
4274                         myusesplit = mysettings["USE"].split()
4275                 else:
4276                         myusesplit = myuse
4277                         # We've been given useflags to use.
4278                         #print "USE FLAGS PASSED IN."
4279                         #print myuse
4280                         #if "bindist" in myusesplit:
4281                         #       print "BINDIST is set!"
4282                         #else:
4283                         #       print "BINDIST NOT set."
4284         else:
4285                 #we are being run by autouse(), don't consult USE vars yet.
4286                 # WE ALSO CANNOT USE SETTINGS
4287                 myusesplit=[]
4288
4289         #convert parenthesis to sublists
4290         mysplit = portage_dep.paren_reduce(depstring)
4291
4292         mymasks = set()
4293         useforce = set()
4294         useforce.add(mysettings["ARCH"])
4295         if use == "all":
4296                 # This masking/forcing is only for repoman.  In other cases, relevant
4297                 # masking/forcing should have already been applied via
4298                 # config.regenerate().  Also, binary or installed packages may have
4299                 # been built with flags that are now masked, and it would be
4300                 # inconsistent to mask them now.  Additionally, myuse may consist of
4301                 # flags from a parent package that is being merged to a $ROOT that is
4302                 # different from the one that mysettings represents.
4303                 mymasks.update(mysettings.usemask)
4304                 mymasks.update(mysettings.archlist())
4305                 mymasks.discard(mysettings["ARCH"])
4306                 useforce.update(mysettings.useforce)
4307                 useforce.difference_update(mymasks)
4308         try:
4309                 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4310                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4311         except portage_exception.InvalidDependString, e:
4312                 return [0, str(e)]
4313
4314         # Do the || conversions
4315         mysplit=portage_dep.dep_opconvert(mysplit)
4316
4317         if mysplit == []:
4318                 #dependencies were reduced to nothing
4319                 return [1,[]]
4320
4321         # Recursively expand new-style virtuals so as to
4322         # collapse one or more levels of indirection.
4323         try:
4324                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4325                         use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4326                         use_binaries=use_binaries, myroot=myroot, trees=trees)
4327         except portage_exception.ParseError, e:
4328                 return [0, str(e)]
4329
4330         mysplit2=mysplit[:]
4331         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4332         if mysplit2 is None:
4333                 return [0,"Invalid token"]
4334
4335         writemsg("\n\n\n", 1)
4336         writemsg("mysplit:  %s\n" % (mysplit), 1)
4337         writemsg("mysplit2: %s\n" % (mysplit2), 1)
4338
4339         myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4340                 use_binaries=use_binaries, trees=trees)
4341         mylist = flatten(myzaps)
4342         writemsg("myzaps:   %s\n" % (myzaps), 1)
4343         writemsg("mylist:   %s\n" % (mylist), 1)
4344         #remove duplicates
4345         mydict={}
4346         for x in mylist:
4347                 mydict[x]=1
4348         writemsg("mydict:   %s\n" % (mydict), 1)
4349         return [1,mydict.keys()]
4350
4351 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4352         "Reduces the deplist to ones and zeros"
4353         deplist=mydeplist[:]
4354         for mypos in xrange(len(deplist)):
4355                 if type(deplist[mypos])==types.ListType:
4356                         #recurse
4357                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4358                 elif deplist[mypos]=="||":
4359                         pass
4360                 else:
4361                         mykey = dep_getkey(deplist[mypos])
4362                         if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4363                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4364                                 deplist[mypos]=True
4365                         elif mydbapi is None:
4366                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
4367                                 # return all of deps the deps that have been selected
4368                                 # (excluding those satisfied by package.provided).
4369                                 deplist[mypos] = False
4370                         else:
4371                                 if mode:
4372                                         mydep=mydbapi.xmatch(mode,deplist[mypos])
4373                                 else:
4374                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4375                                 if mydep!=None:
4376                                         tmp=(len(mydep)>=1)
4377                                         if deplist[mypos][0]=="!":
4378                                                 tmp=False
4379                                         deplist[mypos]=tmp
4380                                 else:
4381                                         #encountered invalid string
4382                                         return None
4383         return deplist
4384
4385 def cpv_getkey(mycpv):
4386         myslash=mycpv.split("/")
4387         mysplit=pkgsplit(myslash[-1])
4388         mylen=len(myslash)
4389         if mylen==2:
4390                 return myslash[0]+"/"+mysplit[0]
4391         elif mylen==1:
4392                 return mysplit[0]
4393         else:
4394                 return mysplit
4395
4396 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4397         mysplit=mykey.split("/")
4398         if settings is None:
4399                 settings = globals()["settings"]
4400         virts = settings.getvirtuals("/")
4401         virts_p = settings.get_virts_p("/")
4402         if len(mysplit)==1:
4403                 if mydb and type(mydb)==types.InstanceType:
4404                         for x in settings.categories:
4405                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4406                                         return x+"/"+mykey
4407                         if virts_p.has_key(mykey):
4408                                 return(virts_p[mykey][0])
4409                 return "null/"+mykey
4410         elif mydb:
4411                 if type(mydb)==types.InstanceType:
4412                         if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4413                                 return virts[mykey][0]
4414                 return mykey
4415
4416 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4417         """Given a string (packagename or virtual) expand it into a valid
4418         cat/package string. Virtuals use the mydb to determine which provided
4419         virtual is a valid choice and defaults to the first element when there
4420         are no installed/available candidates."""
4421         myslash=mycpv.split("/")
4422         mysplit=pkgsplit(myslash[-1])
4423         if settings is None:
4424                 settings = globals()["settings"]
4425         virts = settings.getvirtuals("/")
4426         virts_p = settings.get_virts_p("/")
4427         if len(myslash)>2:
4428                 # this is illegal case.
4429                 mysplit=[]
4430                 mykey=mycpv
4431         elif len(myslash)==2:
4432                 if mysplit:
4433                         mykey=myslash[0]+"/"+mysplit[0]
4434                 else:
4435                         mykey=mycpv
4436                 if mydb and virts and mykey in virts:
4437                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4438                         if type(mydb)==types.InstanceType:
4439                                 if not mydb.cp_list(mykey, use_cache=use_cache):
4440                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4441                                         mykey_orig = mykey[:]
4442                                         for vkey in virts[mykey]:
4443                                                 if mydb.cp_list(vkey,use_cache=use_cache):
4444                                                         mykey = vkey
4445                                                         writemsg("virts chosen: %s\n" % (mykey), 1)
4446                                                         break
4447                                         if mykey == mykey_orig:
4448                                                 mykey=virts[mykey][0]
4449                                                 writemsg("virts defaulted: %s\n" % (mykey), 1)
4450                         #we only perform virtual expansion if we are passed a dbapi
4451         else:
4452                 #specific cpv, no category, ie. "foo-1.0"
4453                 if mysplit:
4454                         myp=mysplit[0]
4455                 else:
4456                         # "foo" ?
4457                         myp=mycpv
4458                 mykey=None
4459                 matches=[]
4460                 if mydb:
4461                         for x in settings.categories:
4462                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4463                                         matches.append(x+"/"+myp)
4464                 if len(matches) > 1:
4465                         virtual_name_collision = False
4466                         if len(matches) == 2:
4467                                 for x in matches:
4468                                         if not x.startswith("virtual/"):
4469                                                 # Assume that the non-virtual is desired.  This helps
4470                                                 # avoid the ValueError for invalid deps that come from
4471                                                 # installed packages (during reverse blocker detection,
4472                                                 # for example).
4473                                                 mykey = x
4474                                         else:
4475                                                 virtual_name_collision = True
4476                         if not virtual_name_collision:
4477                                 raise ValueError, matches
4478                 elif matches:
4479                         mykey=matches[0]
4480
4481                 if not mykey and type(mydb)!=types.ListType:
4482                         if virts_p.has_key(myp):
4483                                 mykey=virts_p[myp][0]
4484                         #again, we only perform virtual expansion if we have a dbapi (not a list)
4485                 if not mykey:
4486                         mykey="null/"+myp
4487         if mysplit:
4488                 if mysplit[2]=="r0":
4489                         return mykey+"-"+mysplit[1]
4490                 else:
4491                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4492         else:
4493                 return mykey
4494
4495 def getmaskingreason(mycpv, settings=None, portdb=None):
4496         from portage_util import grablines
4497         if settings is None:
4498                 settings = globals()["settings"]
4499         if portdb is None:
4500                 portdb = globals()["portdb"]
4501         mysplit = catpkgsplit(mycpv)
4502         if not mysplit:
4503                 raise ValueError("invalid CPV: %s" % mycpv)
4504         if not portdb.cpv_exists(mycpv):
4505                 raise KeyError("CPV %s does not exist" % mycpv)
4506         mycp=mysplit[0]+"/"+mysplit[1]
4507
4508         # XXX- This is a temporary duplicate of code from the config constructor.
4509         locations = [os.path.join(settings["PORTDIR"], "profiles")]
4510         locations.extend(settings.profiles)
4511         for ov in settings["PORTDIR_OVERLAY"].split():
4512                 profdir = os.path.join(normalize_path(ov), "profiles")
4513                 if os.path.isdir(profdir):
4514                         locations.append(profdir)
4515         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4516                 USER_CONFIG_PATH.lstrip(os.path.sep)))
4517         locations.reverse()
4518         pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4519         pmasklines = []
4520         while pmasklists: # stack_lists doesn't preserve order so it can't be used
4521                 pmasklines.extend(pmasklists.pop(0))
4522         del pmasklists
4523
4524         if settings.pmaskdict.has_key(mycp):
4525                 for x in settings.pmaskdict[mycp]:
4526                         if mycpv in portdb.xmatch("match-all", x):
4527                                 comment = ""
4528                                 l = "\n"
4529                                 comment_valid = -1
4530                                 for i in xrange(len(pmasklines)):
4531                                         l = pmasklines[i].strip()
4532                                         if l == "":
4533                                                 comment = ""
4534                                                 comment_valid = -1
4535                                         elif l[0] == "#":
4536                                                 comment += (l+"\n")
4537                                                 comment_valid = i + 1
4538                                         elif l == x:
4539                                                 if comment_valid != i:
4540                                                         comment = ""
4541                                                 return comment
4542                                         elif comment_valid != -1:
4543                                                 # Apparently this comment applies to muliple masks, so
4544                                                 # it remains valid until a blank line is encountered.
4545                                                 comment_valid += 1
4546         return None
4547
4548 def getmaskingstatus(mycpv, settings=None, portdb=None):
4549         if settings is None:
4550                 settings = globals()["settings"]
4551         if portdb is None:
4552                 portdb = globals()["portdb"]
4553         mysplit = catpkgsplit(mycpv)
4554         if not mysplit:
4555                 raise ValueError("invalid CPV: %s" % mycpv)
4556         if not portdb.cpv_exists(mycpv):
4557                 raise KeyError("CPV %s does not exist" % mycpv)
4558         mycp=mysplit[0]+"/"+mysplit[1]
4559
4560         rValue = []
4561
4562         # profile checking
4563         revmaskdict=settings.prevmaskdict
4564         if revmaskdict.has_key(mycp):
4565                 for x in revmaskdict[mycp]:
4566                         if x[0]=="*":
4567                                 myatom = x[1:]
4568                         else:
4569                                 myatom = x
4570                         if not match_to_list(mycpv, [myatom]):
4571                                 rValue.append("profile")
4572                                 break
4573
4574         # package.mask checking
4575         maskdict=settings.pmaskdict
4576         unmaskdict=settings.punmaskdict
4577         if maskdict.has_key(mycp):
4578                 for x in maskdict[mycp]:
4579                         if mycpv in portdb.xmatch("match-all", x):
4580                                 unmask=0
4581                                 if unmaskdict.has_key(mycp):
4582                                         for z in unmaskdict[mycp]:
4583                                                 if mycpv in portdb.xmatch("match-all",z):
4584                                                         unmask=1
4585                                                         break
4586                                 if unmask==0:
4587                                         rValue.append("package.mask")
4588
4589         # keywords checking
4590         try:
4591                 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4592         except KeyError:
4593                 # The "depend" phase apparently failed for some reason.  An associated
4594                 # error message will have already been printed to stderr.
4595                 return ["corruption"]
4596         if not eapi_is_supported(eapi):
4597                 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4598         mygroups = mygroups.split()
4599         pgroups = settings["ACCEPT_KEYWORDS"].split()
4600         myarch = settings["ARCH"]
4601         if pgroups and myarch not in pgroups:
4602                 """For operating systems other than Linux, ARCH is not necessarily a
4603                 valid keyword."""
4604                 myarch = pgroups[0].lstrip("~")
4605         pkgdict = settings.pkeywordsdict
4606
4607         cp = dep_getkey(mycpv)
4608         if pkgdict.has_key(cp):
4609                 matches = match_to_list(mycpv, pkgdict[cp].keys())
4610                 for match in matches:
4611                         pgroups.extend(pkgdict[cp][match])
4612                 if matches:
4613                         inc_pgroups = []
4614                         for x in pgroups:
4615                                 if x != "-*" and x.startswith("-"):
4616                                         try:
4617                                                 inc_pgroups.remove(x[1:])
4618                                         except ValueError:
4619                                                 pass
4620                                 if x not in inc_pgroups:
4621                                         inc_pgroups.append(x)
4622                         pgroups = inc_pgroups
4623                         del inc_pgroups
4624
4625         kmask = "missing"
4626
4627         for keyword in pgroups:
4628                 if keyword in mygroups:
4629                         kmask=None
4630
4631         if kmask:
4632                 fallback = None
4633                 for gp in mygroups:
4634                         if gp=="*":
4635                                 kmask=None
4636                                 break
4637                         elif gp=="-"+myarch:
4638                                 kmask="-"+myarch
4639                                 break
4640                         elif gp=="~"+myarch:
4641                                 kmask="~"+myarch
4642                                 break
4643
4644         if kmask:
4645                 rValue.append(kmask+" keyword")
4646         return rValue
4647
4648 class portagetree:
4649         def __init__(self, root="/", virtual=None, clone=None, settings=None):
4650
4651                 if clone:
4652                         self.root=clone.root
4653                         self.portroot=clone.portroot
4654                         self.pkglines=clone.pkglines
4655                 else:
4656                         self.root=root
4657                         if settings is None:
4658                                 settings = globals()["settings"]
4659                         self.settings = settings
4660                         self.portroot=settings["PORTDIR"]
4661                         self.virtual=virtual
4662                         self.dbapi = portdbapi(
4663                                 settings["PORTDIR"], mysettings=settings)
4664
4665         def dep_bestmatch(self,mydep):
4666                 "compatibility method"
4667                 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4668                 if mymatch is None:
4669                         return ""
4670                 return mymatch
4671
4672         def dep_match(self,mydep):
4673                 "compatibility method"
4674                 mymatch=self.dbapi.xmatch("match-visible",mydep)
4675                 if mymatch is None:
4676                         return []
4677                 return mymatch
4678
4679         def exists_specific(self,cpv):
4680                 return self.dbapi.cpv_exists(cpv)
4681
4682         def getallnodes(self):
4683                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
4684                 masked package for nodes in this nodes list."""
4685                 return self.dbapi.cp_all()
4686
4687         def getname(self,pkgname):
4688                 "returns file location for this particular package (DEPRECATED)"
4689                 if not pkgname:
4690                         return ""
4691                 mysplit=pkgname.split("/")
4692                 psplit=pkgsplit(mysplit[1])
4693                 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4694
4695         def resolve_specific(self,myspec):
4696                 cps=catpkgsplit(myspec)
4697                 if not cps:
4698                         return None
4699                 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4700                         settings=self.settings)
4701                 mykey=mykey+"-"+cps[2]
4702                 if cps[3]!="r0":
4703                         mykey=mykey+"-"+cps[3]
4704                 return mykey
4705
4706         def depcheck(self,mycheck,use="yes",myusesplit=None):
4707                 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4708
4709         def getslot(self,mycatpkg):
4710                 "Get a slot for a catpkg; assume it exists."
4711                 myslot = ""
4712                 try:
4713                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4714                 except SystemExit, e:
4715                         raise
4716                 except Exception, e:
4717                         pass
4718                 return myslot
4719
4720
4721 class dbapi:
4722         def __init__(self):
4723                 pass
4724
4725         def close_caches(self):
4726                 pass
4727
4728         def cp_list(self,cp,use_cache=1):
4729                 return
4730
4731         def cpv_all(self):
4732                 cpv_list = []
4733                 for cp in self.cp_all():
4734                         cpv_list.extend(self.cp_list(cp))
4735                 return cpv_list
4736
4737         def aux_get(self,mycpv,mylist):
4738                 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4739                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4740                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4741                 raise NotImplementedError
4742
4743         def match(self,origdep,use_cache=1):
4744                 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4745                 mykey=dep_getkey(mydep)
4746                 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4747                 myslot = portage_dep.dep_getslot(mydep)
4748                 if myslot is not None:
4749                         mylist = [cpv for cpv in mylist \
4750                                 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4751                 return mylist
4752
4753         def match2(self,mydep,mykey,mylist):
4754                 writemsg("DEPRECATED: dbapi.match2\n")
4755                 match_from_list(mydep,mylist)
4756
4757         def invalidentry(self, mypath):
4758                 if re.search("portage_lockfile$",mypath):
4759                         if not os.environ.has_key("PORTAGE_MASTER_PID"):
4760                                 writemsg("Lockfile removed: %s\n" % mypath, 1)
4761                                 portage_locks.unlockfile((mypath,None,None))
4762                         else:
4763                                 # Nothing we can do about it. We're probably sandboxed.
4764                                 pass
4765                 elif re.search(".*/-MERGING-(.*)",mypath):
4766                         if os.path.exists(mypath):
4767                                 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4768                 else:
4769                         writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4770
4771
4772
4773 class fakedbapi(dbapi):
4774         "This is a dbapi to use for the emptytree function.  It's empty, but things can be added to it."
4775         def __init__(self, settings=None):
4776                 self.cpvdict={}
4777                 self.cpdict={}
4778                 if settings is None:
4779                         settings = globals()["settings"]
4780                 self.settings = settings
4781                 self._match_cache = {}
4782
4783         def _clear_cache(self):
4784                 if self._match_cache:
4785                         self._match_cache = {}
4786
4787         def match(self, origdep, use_cache=1):
4788                 result = self._match_cache.get(origdep, None)
4789                 if result is not None:
4790                         return result[:]
4791                 result = dbapi.match(self, origdep, use_cache=use_cache)
4792                 self._match_cache[origdep] = result
4793                 return result[:]
4794
4795         def cpv_exists(self,mycpv):
4796                 return self.cpvdict.has_key(mycpv)
4797
4798         def cp_list(self,mycp,use_cache=1):
4799                 if not self.cpdict.has_key(mycp):
4800                         return []
4801                 else:
4802                         return self.cpdict[mycp]
4803
4804         def cp_all(self):
4805                 returnme=[]
4806                 for x in self.cpdict.keys():
4807                         returnme.extend(self.cpdict[x])
4808                 return returnme
4809
4810         def cpv_all(self):
4811                 return self.cpvdict.keys()
4812
4813         def cpv_inject(self, mycpv, metadata=None):
4814                 """Adds a cpv from the list of available packages."""
4815                 self._clear_cache()
4816                 mycp=cpv_getkey(mycpv)
4817                 self.cpvdict[mycpv] = metadata
4818                 myslot = None
4819                 if metadata:
4820                         myslot = metadata.get("SLOT", None)
4821                 if myslot and mycp in self.cpdict:
4822                         # If necessary, remove another package in the same SLOT.
4823                         for cpv in self.cpdict[mycp]:
4824                                 if mycpv != cpv:
4825                                         other_metadata = self.cpvdict[cpv]
4826                                         if other_metadata:
4827                                                 if myslot == other_metadata.get("SLOT", None):
4828                                                         self.cpv_remove(cpv)
4829                                                         break
4830                 if mycp not in self.cpdict:
4831                         self.cpdict[mycp] = []
4832                 if not mycpv in self.cpdict[mycp]:
4833                         self.cpdict[mycp].append(mycpv)
4834
4835         def cpv_remove(self,mycpv):
4836                 """Removes a cpv from the list of available packages."""
4837                 self._clear_cache()
4838                 mycp=cpv_getkey(mycpv)
4839                 if self.cpvdict.has_key(mycpv):
4840                         del     self.cpvdict[mycpv]
4841                 if not self.cpdict.has_key(mycp):
4842                         return
4843                 while mycpv in self.cpdict[mycp]:
4844                         del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4845                 if not len(self.cpdict[mycp]):
4846                         del self.cpdict[mycp]
4847
4848         def aux_get(self, mycpv, wants):
4849                 if not self.cpv_exists(mycpv):
4850                         raise KeyError(mycpv)
4851                 metadata = self.cpvdict[mycpv]
4852                 if not metadata:
4853                         return ["" for x in wants]
4854                 return [metadata.get(x, "") for x in wants]
4855
4856         def aux_update(self, cpv, values):
4857                 self._clear_cache()
4858                 self.cpvdict[cpv].update(values)
4859
4860 class bindbapi(fakedbapi):
4861         def __init__(self, mybintree=None, settings=None):
4862                 self.bintree = mybintree
4863                 self.cpvdict={}
4864                 self.cpdict={}
4865                 if settings is None:
4866                         settings = globals()["settings"]
4867                 self.settings = settings
4868                 self._match_cache = {}
4869                 # Selectively cache metadata in order to optimize dep matching.
4870                 self._aux_cache_keys = set(["SLOT"])
4871                 self._aux_cache = {}
4872
4873         def match(self, *pargs, **kwargs):
4874                 if self.bintree and not self.bintree.populated:
4875                         self.bintree.populate()
4876                 return fakedbapi.match(self, *pargs, **kwargs)
4877
4878         def aux_get(self,mycpv,wants):
4879                 if self.bintree and not self.bintree.populated:
4880                         self.bintree.populate()
4881                 cache_me = False
4882                 if not set(wants).difference(self._aux_cache_keys):
4883                         aux_cache = self._aux_cache.get(mycpv)
4884                         if aux_cache is not None:
4885                                 return [aux_cache[x] for x in wants]
4886                         cache_me = True
4887                 mysplit = mycpv.split("/")
4888                 mylist  = []
4889                 tbz2name = mysplit[1]+".tbz2"
4890                 if self.bintree and not self.bintree.isremote(mycpv):
4891                         tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4892                         getitem = tbz2.getfile
4893                 else:
4894                         getitem = self.bintree.remotepkgs[tbz2name].get
4895                 mydata = {}
4896                 mykeys = wants
4897                 if cache_me:
4898                         mykeys = self._aux_cache_keys.union(wants)
4899                 for x in mykeys:
4900                         myval = getitem(x)
4901                         # myval is None if the key doesn't exist
4902                         # or the tbz2 is corrupt.
4903                         if myval:
4904                                 mydata[x] = " ".join(myval.split())
4905                 if "EAPI" in mykeys:
4906                         if not mydata.setdefault("EAPI", "0"):
4907                                 mydata["EAPI"] = "0"
4908                 if cache_me:
4909                         aux_cache = {}
4910                         for x in self._aux_cache_keys:
4911                                 aux_cache[x] = mydata.get(x, "")
4912                         self._aux_cache[mycpv] = aux_cache
4913                 return [mydata.get(x, "") for x in wants]
4914
4915         def aux_update(self, cpv, values):
4916                 if not self.bintree.populated:
4917                         self.bintree.populate()
4918                 tbz2path = self.bintree.getname(cpv)
4919                 if not os.path.exists(tbz2path):
4920                         raise KeyError(cpv)
4921                 mytbz2 = xpak.tbz2(tbz2path)
4922                 mydata = mytbz2.get_data()
4923                 mydata.update(values)
4924                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4925
4926         def cp_list(self, *pargs, **kwargs):
4927                 if not self.bintree.populated:
4928                         self.bintree.populate()
4929                 return fakedbapi.cp_list(self, *pargs, **kwargs)
4930
4931         def cpv_all(self):
4932                 if not self.bintree.populated:
4933                         self.bintree.populate()
4934                 return fakedbapi.cpv_all(self)
4935
4936 class vardbapi(dbapi):
4937         def __init__(self, root, categories=None, settings=None, vartree=None):
4938                 self.root       = root[:]
4939                 #cache for category directory mtimes
4940                 self.mtdircache = {}
4941                 #cache for dependency checks
4942                 self.matchcache = {}
4943                 #cache for cp_list results
4944                 self.cpcache    = {}
4945                 self.blockers   = None
4946                 if settings is None:
4947                         settings = globals()["settings"]
4948                 self.settings = settings
4949                 if categories is None:
4950                         categories = settings.categories
4951                 self.categories = categories[:]
4952                 if vartree is None:
4953                         vartree = globals()["db"][root]["vartree"]
4954                 self.vartree = vartree
4955                 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
4956                         "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
4957                 self._aux_cache = None
4958                 self._aux_cache_version = "1"
4959                 self._aux_cache_filename = os.path.join(self.root,
4960                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
4961
4962         def cpv_exists(self,mykey):
4963                 "Tells us whether an actual ebuild exists on disk (no masking)"
4964                 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4965
4966         def cpv_counter(self,mycpv):
4967                 "This method will grab the COUNTER. Returns a counter value."
4968                 try:
4969                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
4970                 except KeyError, ValueError:
4971                         pass
4972                 cdir=self.root+VDB_PATH+"/"+mycpv
4973                 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4974
4975                 # We write our new counter value to a new file that gets moved into
4976                 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4977                 corrupted=0
4978                 if os.path.exists(cpath):
4979                         cfile=open(cpath, "r")
4980                         try:
4981                                 counter=long(cfile.readline())
4982                         except ValueError:
4983                                 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4984                                 counter=long(0)
4985                                 corrupted=1
4986                         cfile.close()
4987                 elif os.path.exists(cdir):
4988                         mys = pkgsplit(mycpv)
4989                         myl = self.match(mys[0],use_cache=0)
4990                         print mys,myl
4991                         if len(myl) == 1:
4992                                 try:
4993                                         # Only one package... Counter doesn't matter.
4994                                         write_atomic(cpath, "1")
4995                                         counter = 1
4996                                 except SystemExit, e:
4997                                         raise
4998                                 except Exception, e:
4999                                         writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5000                                                 noiselevel=-1)
5001                                         writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
5002                                                 noiselevel=-1)
5003                                         writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
5004                                         writemsg("!!! %s\n" % e, noiselevel=-1)
5005                                         sys.exit(1)
5006                         else:
5007                                 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5008                                         noiselevel=-1)
5009                                 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
5010                                         noiselevel=-1)
5011                                 writemsg("!!! remerge the package.\n", noiselevel=-1)
5012                                 sys.exit(1)
5013                 else:
5014                         counter=long(0)
5015                 if corrupted:
5016                         # update new global counter file
5017                         write_atomic(cpath, str(counter))
5018                 return counter
5019
5020         def cpv_inject(self,mycpv):
5021                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
5022                 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
5023                 counter = self.counter_tick(self.root, mycpv=mycpv)
5024                 # write local package counter so that emerge clean does the right thing
5025                 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
5026
5027         def isInjected(self,mycpv):
5028                 if self.cpv_exists(mycpv):
5029                         if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
5030                                 return True
5031                         if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
5032                                 return True
5033                 return False
5034
5035         def move_ent(self,mylist):
5036                 origcp=mylist[1]
5037                 newcp=mylist[2]
5038
5039                 # sanity check
5040                 for cp in [origcp,newcp]:
5041                         if not (isvalidatom(cp) and isjustname(cp)):
5042                                 raise portage_exception.InvalidPackageName(cp)
5043                 origmatches=self.match(origcp,use_cache=0)
5044                 if not origmatches:
5045                         return
5046                 for mycpv in origmatches:
5047                         mycpsplit=catpkgsplit(mycpv)
5048                         mynewcpv=newcp+"-"+mycpsplit[2]
5049                         mynewcat=newcp.split("/")[0]
5050                         if mycpsplit[3]!="r0":
5051                                 mynewcpv += "-"+mycpsplit[3]
5052                         mycpsplit_new = catpkgsplit(mynewcpv)
5053                         origpath=self.root+VDB_PATH+"/"+mycpv
5054                         if not os.path.exists(origpath):
5055                                 continue
5056                         writemsg_stdout("@")
5057                         if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
5058                                 #create the directory
5059                                 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
5060                         newpath=self.root+VDB_PATH+"/"+mynewcpv
5061                         if os.path.exists(newpath):
5062                                 #dest already exists; keep this puppy where it is.
5063                                 continue
5064                         os.rename(origpath, newpath)
5065
5066                         # We need to rename the ebuild now.
5067                         old_pf = catsplit(mycpv)[1]
5068                         new_pf = catsplit(mynewcpv)[1]
5069                         if new_pf != old_pf:
5070                                 try:
5071                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
5072                                                 os.path.join(newpath, new_pf + ".ebuild"))
5073                                 except OSError, e:
5074                                         if e.errno != errno.ENOENT:
5075                                                 raise
5076                                         del e
5077                                 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
5078
5079                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
5080                         fixdbentries([mylist], newpath)
5081
5082         def update_ents(self, update_iter):
5083                 """Run fixdbentries on all installed packages (time consuming).  Like
5084                 fixpackages, this should be run from a helper script and display
5085                 a progress indicator."""
5086                 dbdir = os.path.join(self.root, VDB_PATH)
5087                 for catdir in listdir(dbdir):
5088                         catdir = dbdir+"/"+catdir
5089                         if os.path.isdir(catdir):
5090                                 for pkgdir in listdir(catdir):
5091                                         pkgdir = catdir+"/"+pkgdir
5092                                         if os.path.isdir(pkgdir):
5093                                                 fixdbentries(update_iter, pkgdir)
5094
5095         def move_slot_ent(self,mylist):
5096                 pkg=mylist[1]
5097                 origslot=mylist[2]
5098                 newslot=mylist[3]
5099
5100                 if not isvalidatom(pkg):
5101                         raise portage_exception.InvalidAtom(pkg)
5102
5103                 origmatches=self.match(pkg,use_cache=0)
5104                 
5105                 if not origmatches:
5106                         return
5107                 for mycpv in origmatches:
5108                         origpath=self.root+VDB_PATH+"/"+mycpv
5109                         if not os.path.exists(origpath):
5110                                 continue
5111
5112                         slot=grabfile(origpath+"/SLOT");
5113                         if (not slot):
5114                                 continue
5115
5116                         if (slot[0]!=origslot):
5117                                 continue
5118
5119                         writemsg_stdout("s")
5120                         write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
5121
5122         def cp_list(self,mycp,use_cache=1):
5123                 mysplit=mycp.split("/")
5124                 if mysplit[0] == '*':
5125                         mysplit[0] = mysplit[0][1:]
5126                 try:
5127                         mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5128                 except OSError:
5129                         mystat=0
5130                 if use_cache and self.cpcache.has_key(mycp):
5131                         cpc=self.cpcache[mycp]
5132                         if cpc[0]==mystat:
5133                                 return cpc[1]
5134                 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5135
5136                 if (list is None):
5137                         return []
5138                 returnme=[]
5139                 for x in list:
5140                         if x.startswith("."):
5141                                 continue
5142                         if x[0] == '-':
5143                                 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5144                                 continue
5145                         ps=pkgsplit(x)
5146                         if not ps:
5147                                 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5148                                 continue
5149                         if len(mysplit) > 1:
5150                                 if ps[0]==mysplit[1]:
5151                                         returnme.append(mysplit[0]+"/"+x)
5152                 if use_cache:
5153                         self.cpcache[mycp]=[mystat,returnme]
5154                 elif self.cpcache.has_key(mycp):
5155                         del self.cpcache[mycp]
5156                 return returnme
5157
5158         def cpv_all(self,use_cache=1):
5159                 returnme=[]
5160                 basepath = self.root+VDB_PATH+"/"
5161
5162                 for x in self.categories:
5163                         for y in listdir(basepath+x,EmptyOnError=1):
5164                                 if y.startswith("."):
5165                                         continue
5166                                 subpath = x+"/"+y
5167                                 # -MERGING- should never be a cpv, nor should files.
5168                                 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5169                                         returnme += [subpath]
5170                 return returnme
5171
5172         def cp_all(self,use_cache=1):
5173                 mylist = self.cpv_all(use_cache=use_cache)
5174                 d={}
5175                 for y in mylist:
5176                         if y[0] == '*':
5177                                 y = y[1:]
5178                         mysplit=catpkgsplit(y)
5179                         if not mysplit:
5180                                 self.invalidentry(self.root+VDB_PATH+"/"+y)
5181                                 continue
5182                         d[mysplit[0]+"/"+mysplit[1]] = None
5183                 return d.keys()
5184
5185         def checkblockers(self,origdep):
5186                 pass
5187
5188         def match(self,origdep,use_cache=1):
5189                 "caching match function"
5190                 mydep = dep_expand(
5191                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5192                 mykey=dep_getkey(mydep)
5193                 mycat=mykey.split("/")[0]
5194                 if not use_cache:
5195                         if self.matchcache.has_key(mycat):
5196                                 del self.mtdircache[mycat]
5197                                 del self.matchcache[mycat]
5198                         mymatch = match_from_list(mydep,
5199                                 self.cp_list(mykey, use_cache=use_cache))
5200                         myslot = portage_dep.dep_getslot(mydep)
5201                         if myslot is not None:
5202                                 mymatch = [cpv for cpv in mymatch \
5203                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5204                         return mymatch
5205                 try:
5206                         curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5207                 except (IOError, OSError):
5208                         curmtime=0
5209
5210                 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5211                         # clear cache entry
5212                         self.mtdircache[mycat]=curmtime
5213                         self.matchcache[mycat]={}
5214                 if not self.matchcache[mycat].has_key(mydep):
5215                         mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5216                         myslot = portage_dep.dep_getslot(mydep)
5217                         if myslot is not None:
5218                                 mymatch = [cpv for cpv in mymatch \
5219                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5220                         self.matchcache[mycat][mydep]=mymatch
5221                 return self.matchcache[mycat][mydep][:]
5222
5223         def findname(self, mycpv):
5224                 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5225
5226         def flush_cache(self):
5227                 """If the current user has permission and the internal aux_get cache has
5228                 been updated, save it to disk and mark it unmodified.  This is called
5229                 by emerge after it has loaded the full vdb for use in dependency
5230                 calculations.  Currently, the cache is only written if the user has
5231                 superuser privileges (since that's required to obtain a lock), but all
5232                 users have read access and benefit from faster metadata lookups (as
5233                 long as at least part of the cache is still valid)."""
5234                 if self._aux_cache is not None and \
5235                         self._aux_cache["modified"] and \
5236                         secpass >= 2:
5237                         valid_nodes = set(self.cpv_all())
5238                         for cpv in self._aux_cache["packages"].keys():
5239                                 if cpv not in valid_nodes:
5240                                         del self._aux_cache["packages"][cpv]
5241                         del self._aux_cache["modified"]
5242                         try:
5243                                 f = atomic_ofstream(self._aux_cache_filename)
5244                                 cPickle.dump(self._aux_cache, f, -1)
5245                                 f.close()
5246                                 portage_util.apply_secpass_permissions(
5247                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
5248                         except (IOError, OSError), e:
5249                                 pass
5250                         self._aux_cache["modified"] = False
5251
5252         def aux_get(self, mycpv, wants):
5253                 """This automatically caches selected keys that are frequently needed
5254                 by emerge for dependency calculations.  The cached metadata is
5255                 considered valid if the mtime of the package directory has not changed
5256                 since the data was cached.  The cache is stored in a pickled dict
5257                 object with the following format:
5258
5259                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5260
5261                 If an error occurs while loading the cache pickle or the version is
5262                 unrecognized, the cache will simple be recreated from scratch (it is
5263                 completely disposable).
5264                 """
5265                 if not self._aux_cache_keys.intersection(wants):
5266                         return self._aux_get(mycpv, wants)
5267                 if self._aux_cache is None:
5268                         try:
5269                                 f = open(self._aux_cache_filename)
5270                                 mypickle = cPickle.Unpickler(f)
5271                                 mypickle.find_global = None
5272                                 self._aux_cache = mypickle.load()
5273                                 f.close()
5274                                 del f
5275                         except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5276                                 pass
5277                         if not self._aux_cache or \
5278                                 not isinstance(self._aux_cache, dict) or \
5279                                 self._aux_cache.get("version") != self._aux_cache_version or \
5280                                 not self._aux_cache.get("packages"):
5281                                 self._aux_cache = {"version":self._aux_cache_version}
5282                                 self._aux_cache["packages"] = {}
5283                         self._aux_cache["modified"] = False
5284                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5285                 mydir_stat = None
5286                 try:
5287                         mydir_stat = os.stat(mydir)
5288                 except OSError, e:
5289                         if e.errno != errno.ENOENT:
5290                                 raise
5291                         raise KeyError(mycpv)
5292                 mydir_mtime = long(mydir_stat.st_mtime)
5293                 pkg_data = self._aux_cache["packages"].get(mycpv)
5294                 mydata = {}
5295                 cache_valid = False
5296                 if pkg_data:
5297                         cache_mtime, metadata = pkg_data
5298                         cache_valid = cache_mtime == mydir_mtime
5299                         if cache_valid and set(metadata) != self._aux_cache_keys:
5300                                 # Allow self._aux_cache_keys to change without a cache version
5301                                 # bump.
5302                                 cache_valid = False
5303                 if cache_valid:
5304                         mydata.update(metadata)
5305                         pull_me = set(wants).difference(self._aux_cache_keys)
5306                 else:
5307                         pull_me = self._aux_cache_keys.union(wants)
5308                 if pull_me:
5309                         # pull any needed data and cache it
5310                         aux_keys = list(pull_me)
5311                         for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5312                                 mydata[k] = v
5313                         if not cache_valid:
5314                                 cache_data = {}
5315                                 for aux_key in self._aux_cache_keys:
5316                                         cache_data[aux_key] = mydata[aux_key]
5317                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5318                                 self._aux_cache["modified"] = True
5319                 return [mydata[x] for x in wants]
5320
5321         def _aux_get(self, mycpv, wants):
5322                 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5323                 try:
5324                         if not stat.S_ISDIR(os.stat(mydir).st_mode):
5325                                 raise KeyError(mycpv)
5326                 except OSError, e:
5327                         if e.errno == errno.ENOENT:
5328                                 raise KeyError(mycpv)
5329                         del e
5330                         raise
5331                 results = []
5332                 for x in wants:
5333                         try:
5334                                 myf = open(os.path.join(mydir, x), "r")
5335                                 try:
5336                                         myd = myf.read()
5337                                 finally:
5338                                         myf.close()
5339                                 myd = " ".join(myd.split())
5340                         except IOError:
5341                                 myd = ""
5342                         if x == "EAPI" and not myd:
5343                                 results.append("0")
5344                         else:
5345                                 results.append(myd)
5346                 return results
5347
5348         def aux_update(self, cpv, values):
5349                 cat, pkg = cpv.split("/")
5350                 mylink = dblink(cat, pkg, self.root, self.settings,
5351                 treetype="vartree", vartree=self.vartree)
5352                 if not mylink.exists():
5353                         raise KeyError(cpv)
5354                 for k, v in values.iteritems():
5355                         mylink.setfile(k, v)
5356
5357         def counter_tick(self,myroot,mycpv=None):
5358                 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5359
5360         def get_counter_tick_core(self,myroot,mycpv=None):
5361                 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5362
5363         def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5364                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
5365                 cpath=myroot+"var/cache/edb/counter"
5366                 changed=0
5367                 min_counter = 0
5368                 if mycpv:
5369                         mysplit = pkgsplit(mycpv)
5370                         for x in self.match(mysplit[0],use_cache=0):
5371                                 if x==mycpv:
5372                                         continue
5373                                 try:
5374                                         old_counter = long(self.aux_get(x,["COUNTER"])[0])
5375                                         writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5376                                 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5377                                         old_counter = 0
5378                                         writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5379                                 if old_counter > min_counter:
5380                                         min_counter = old_counter
5381
5382                 # We write our new counter value to a new file that gets moved into
5383                 # place to avoid filesystem corruption.
5384                 find_counter = ("find '%s' -type f -name COUNTER | " + \
5385                         "while read f; do echo $(<\"${f}\"); done | " + \
5386                         "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5387                 if os.path.exists(cpath):
5388                         cfile=open(cpath, "r")
5389                         try:
5390                                 counter=long(cfile.readline())
5391                         except (ValueError,OverflowError):
5392                                 try:
5393                                         counter = long(commands.getoutput(find_counter).strip())
5394                                         writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5395                                                 noiselevel=-1)
5396                                         changed=1
5397                                 except (ValueError,OverflowError):
5398                                         writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5399                                                 noiselevel=-1)
5400                                         writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5401                                                 noiselevel=-1)
5402                                         writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5403                                         sys.exit(2)
5404                         cfile.close()
5405                 else:
5406                         try:
5407                                 counter = long(commands.getoutput(find_counter).strip())
5408                                 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5409                                         noiselevel=-1)
5410                         except ValueError: # Value Error for long(), probably others for commands.getoutput
5411                                 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5412                                 counter=long(0)
5413                         changed=1
5414
5415                 if counter < min_counter:
5416                         counter = min_counter+1000
5417                         changed = 1
5418
5419                 if incrementing or changed:
5420
5421                         #increment counter
5422                         counter += 1
5423                         # update new global counter file
5424                         write_atomic(cpath, str(counter))
5425                 return counter
5426
5427 class vartree(object):
5428         "this tree will scan a var/db/pkg database located at root (passed to init)"
5429         def __init__(self, root="/", virtual=None, clone=None, categories=None,
5430                 settings=None):
5431                 if clone:
5432                         self.root       = clone.root[:]
5433                         self.dbapi      = copy.deepcopy(clone.dbapi)
5434                         self.populated  = 1
5435                         self.settings   = config(clone=clone.settings)
5436                 else:
5437                         self.root       = root[:]
5438                         if settings is None:
5439                                 settings = globals()["settings"]
5440                         self.settings = settings # for key_expand calls
5441                         if categories is None:
5442                                 categories = settings.categories
5443                         self.dbapi = vardbapi(self.root, categories=categories,
5444                                 settings=settings, vartree=self)
5445                         self.populated  = 1
5446
5447         def zap(self,mycpv):
5448                 return
5449
5450         def inject(self,mycpv):
5451                 return
5452
5453         def get_provide(self,mycpv):
5454                 myprovides=[]
5455                 mylines = None
5456                 try:
5457                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5458                         if mylines:
5459                                 myuse = myuse.split()
5460                                 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5461                                 for myprovide in mylines:
5462                                         mys = catpkgsplit(myprovide)
5463                                         if not mys:
5464                                                 mys = myprovide.split("/")
5465                                         myprovides += [mys[0] + "/" + mys[1]]
5466                         return myprovides
5467                 except SystemExit, e:
5468                         raise
5469                 except Exception, e:
5470                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
5471                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5472                                 noiselevel=-1)
5473                         if mylines:
5474                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5475                                         noiselevel=-1)
5476                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5477                         return []
5478
5479         def get_all_provides(self):
5480                 myprovides = {}
5481                 for node in self.getallcpv():
5482                         for mykey in self.get_provide(node):
5483                                 if myprovides.has_key(mykey):
5484                                         myprovides[mykey] += [node]
5485                                 else:
5486                                         myprovides[mykey]  = [node]
5487                 return myprovides
5488
5489         def dep_bestmatch(self,mydep,use_cache=1):
5490                 "compatibility method -- all matches, not just visible ones"
5491                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5492                 mymatch = best(self.dbapi.match(
5493                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5494                         use_cache=use_cache))
5495                 if mymatch is None:
5496                         return ""
5497                 else:
5498                         return mymatch
5499
5500         def dep_match(self,mydep,use_cache=1):
5501                 "compatibility method -- we want to see all matches, not just visible ones"
5502                 #mymatch=match(mydep,self.dbapi)
5503                 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5504                 if mymatch is None:
5505                         return []
5506                 else:
5507                         return mymatch
5508
5509         def exists_specific(self,cpv):
5510                 return self.dbapi.cpv_exists(cpv)
5511
5512         def getallcpv(self):
5513                 """temporary function, probably to be renamed --- Gets a list of all
5514                 category/package-versions installed on the system."""
5515                 return self.dbapi.cpv_all()
5516
5517         def getallnodes(self):
5518                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
5519                 masked package for nodes in this nodes list."""
5520                 return self.dbapi.cp_all()
5521
5522         def exists_specific_cat(self,cpv,use_cache=1):
5523                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5524                         settings=self.settings)
5525                 a=catpkgsplit(cpv)
5526                 if not a:
5527                         return 0
5528                 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5529                 for x in mylist:
5530                         b=pkgsplit(x)
5531                         if not b:
5532                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5533                                 continue
5534                         if a[1]==b[0]:
5535                                 return 1
5536                 return 0
5537
5538         def getebuildpath(self,fullpackage):
5539                 cat,package=fullpackage.split("/")
5540                 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5541
5542         def getnode(self,mykey,use_cache=1):
5543                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5544                         settings=self.settings)
5545                 if not mykey:
5546                         return []
5547                 mysplit=mykey.split("/")
5548                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5549                 returnme=[]
5550                 for x in mydirlist:
5551                         mypsplit=pkgsplit(x)
5552                         if not mypsplit:
5553                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5554                                 continue
5555                         if mypsplit[0]==mysplit[1]:
5556                                 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5557                                 returnme.append(appendme)
5558                 return returnme
5559
5560
5561         def getslot(self,mycatpkg):
5562                 "Get a slot for a catpkg; assume it exists."
5563                 try:
5564                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5565                 except KeyError:
5566                         return ""
5567
5568         def hasnode(self,mykey,use_cache):
5569                 """Does the particular node (cat/pkg key) exist?"""
5570                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5571                         settings=self.settings)
5572                 mysplit=mykey.split("/")
5573                 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5574                 for x in mydirlist:
5575                         mypsplit=pkgsplit(x)
5576                         if not mypsplit:
5577                                 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5578                                 continue
5579                         if mypsplit[0]==mysplit[1]:
5580                                 return 1
5581                 return 0
5582
5583         def populate(self):
5584                 self.populated=1
5585
5586 auxdbkeys=[
5587   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
5588         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
5589         'KEYWORDS',  'INHERITED', 'IUSE',      'CDEPEND',
5590         'PDEPEND',   'PROVIDE', 'EAPI',
5591         'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5592         'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5593         ]
5594 auxdbkeylen=len(auxdbkeys)
5595
5596 def close_portdbapi_caches():
5597         for i in portdbapi.portdbapi_instances:
5598                 i.close_caches()
5599
5600
5601 class portdbapi(dbapi):
5602         """this tree will scan a portage directory located at root (passed to init)"""
5603         portdbapi_instances = []
5604
5605         def __init__(self,porttree_root,mysettings=None):
5606                 portdbapi.portdbapi_instances.append(self)
5607
5608                 if mysettings:
5609                         self.mysettings = mysettings
5610                 else:
5611                         global settings
5612                         self.mysettings = config(clone=settings)
5613
5614                 # This is strictly for use in aux_get() doebuild calls when metadata
5615                 # is generated by the depend phase.  It's safest to use a clone for
5616                 # this purpose because doebuild makes many changes to the config
5617                 # instance that is passed in.
5618                 self.doebuild_settings = config(clone=self.mysettings)
5619
5620                 self.manifestVerifyLevel  = None
5621                 self.manifestVerifier     = None
5622                 self.manifestCache        = {}    # {location: [stat, md5]}
5623                 self.manifestMissingCache = []
5624
5625                 if "gpg" in self.mysettings.features:
5626                         self.manifestVerifyLevel   = portage_gpg.EXISTS
5627                         if "strict" in self.mysettings.features:
5628                                 self.manifestVerifyLevel = portage_gpg.MARGINAL
5629                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5630                         elif "severe" in self.mysettings.features:
5631                                 self.manifestVerifyLevel = portage_gpg.TRUSTED
5632                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5633                         else:
5634                                 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5635
5636                 #self.root=settings["PORTDIR"]
5637                 self.porttree_root = os.path.realpath(porttree_root)
5638
5639                 self.depcachedir = self.mysettings.depcachedir[:]
5640
5641                 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5642                 if self.tmpfs and not os.path.exists(self.tmpfs):
5643                         self.tmpfs = None
5644                 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5645                         self.tmpfs = None
5646                 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5647                         self.tmpfs = None
5648
5649                 self.eclassdb = eclass_cache.cache(self.porttree_root,
5650                         overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5651
5652                 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5653
5654                 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5655                 self.xcache={}
5656                 self.frozen=0
5657
5658                 self.porttrees = [self.porttree_root] + \
5659                         [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5660                 self.auxdbmodule  = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5661                 self.auxdb        = {}
5662                 self._init_cache_dirs()
5663                 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5664                 # ~harring
5665                 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5666                 if secpass < 1:
5667                         from cache import metadata_overlay, volatile
5668                         for x in self.porttrees:
5669                                 db_ro = self.auxdbmodule(self.depcachedir, x,
5670                                         filtered_auxdbkeys, gid=portage_gid, readonly=True)
5671                                 self.auxdb[x] = metadata_overlay.database(
5672                                         self.depcachedir, x, filtered_auxdbkeys,
5673                                         gid=portage_gid, db_rw=volatile.database,
5674                                         db_ro=db_ro)
5675                 else:
5676                         for x in self.porttrees:
5677                                 # location, label, auxdbkeys
5678                                 self.auxdb[x] = self.auxdbmodule(
5679                                         self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5680                 # Selectively cache metadata in order to optimize dep matching.
5681                 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5682                 self._aux_cache = {}
5683
5684         def _init_cache_dirs(self):
5685                 """Create /var/cache/edb/dep and adjust permissions for the portage
5686                 group."""
5687
5688                 dirmode  = 02070
5689                 filemode =   060
5690                 modemask =    02
5691
5692                 try:
5693                         for mydir in (self.depcachedir,):
5694                                 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5695                                         writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5696                                                 noiselevel=-1)
5697                                         def onerror(e):
5698                                                 raise # bail out on the first error that occurs during recursion
5699                                         if not apply_recursive_permissions(mydir,
5700                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5701                                                 filemode=filemode, filemask=modemask, onerror=onerror):
5702                                                 raise portage_exception.OperationNotPermitted(
5703                                                         "Failed to apply recursive permissions for the portage group.")
5704                 except portage_exception.PortageException, e:
5705                         pass
5706
5707         def close_caches(self):
5708                 for x in self.auxdb.keys():
5709                         self.auxdb[x].sync()
5710                 self.auxdb.clear()
5711
5712         def flush_cache(self):
5713                 for x in self.auxdb.values():
5714                         x.sync()
5715
5716         def finddigest(self,mycpv):
5717                 try:
5718                         mydig   = self.findname2(mycpv)[0]
5719                         if not mydig:
5720                                 return ""
5721                         mydigs  = mydig.split("/")[:-1]
5722                         mydig   = "/".join(mydigs)
5723                         mysplit = mycpv.split("/")
5724                 except OSError:
5725                         return ""
5726                 return mydig+"/files/digest-"+mysplit[-1]
5727
5728         def findname(self,mycpv):
5729                 return self.findname2(mycpv)[0]
5730
5731         def findname2(self, mycpv, mytree=None):
5732                 """ 
5733                 Returns the location of the CPV, and what overlay it was in.
5734                 Searches overlays first, then PORTDIR; this allows us to return the first
5735                 matching file.  As opposed to starting in portdir and then doing overlays
5736                 second, we would have to exhaustively search the overlays until we found
5737                 the file we wanted.
5738                 """
5739                 if not mycpv:
5740                         return "",0
5741                 mysplit=mycpv.split("/")
5742                 psplit=pkgsplit(mysplit[1])
5743
5744                 if mytree:
5745                         mytrees = [mytree]
5746                 else:
5747                         mytrees = self.porttrees[:]
5748                         mytrees.reverse()
5749                 if psplit:
5750                         for x in mytrees:
5751                                 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5752                                 if os.access(file, os.R_OK):
5753                                         return[file, x]
5754                 return None, 0
5755
5756         def aux_get(self, mycpv, mylist, mytree=None):
5757                 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5758                 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5759                 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5760                 cache_me = False
5761                 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5762                         aux_cache = self._aux_cache.get(mycpv)
5763                         if aux_cache is not None:
5764                                 return [aux_cache[x] for x in mylist]
5765                         cache_me = True
5766                 global auxdbkeys,auxdbkeylen
5767                 cat,pkg = mycpv.split("/", 1)
5768
5769                 myebuild, mylocation = self.findname2(mycpv, mytree)
5770
5771                 if not myebuild:
5772                         writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5773                                 noiselevel=1)
5774                         writemsg("!!!            %s\n" % myebuild, noiselevel=1)
5775                         raise KeyError(mycpv)
5776
5777                 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5778                 if "gpg" in self.mysettings.features:
5779                         try:
5780                                 mys = portage_gpg.fileStats(myManifestPath)
5781                                 if (myManifestPath in self.manifestCache) and \
5782                                    (self.manifestCache[myManifestPath] == mys):
5783                                         pass
5784                                 elif self.manifestVerifier:
5785                                         if not self.manifestVerifier.verify(myManifestPath):
5786                                                 # Verification failed the desired level.
5787                                                 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5788
5789                                 if ("severe" in self.mysettings.features) and \
5790                                    (mys != portage_gpg.fileStats(myManifestPath)):
5791                                         raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5792
5793                         except portage_exception.InvalidSignature, e:
5794                                 if ("strict" in self.mysettings.features) or \
5795                                    ("severe" in self.mysettings.features):
5796                                         raise
5797                                 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5798                         except portage_exception.MissingSignature, e:
5799                                 if ("severe" in self.mysettings.features):
5800                                         raise
5801                                 if ("strict" in self.mysettings.features):
5802                                         if myManifestPath not in self.manifestMissingCache:
5803                                                 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5804                                                 self.manifestMissingCache.insert(0,myManifestPath)
5805                         except (OSError,portage_exception.FileNotFound), e:
5806                                 if ("strict" in self.mysettings.features) or \
5807                                    ("severe" in self.mysettings.features):
5808                                         raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5809                                 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5810                                         noiselevel=-1)
5811
5812
5813                 if os.access(myebuild, os.R_OK):
5814                         emtime=os.stat(myebuild)[stat.ST_MTIME]
5815                 else:
5816                         writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5817                                 noiselevel=-1)
5818                         writemsg("!!!            %s\n" % myebuild,
5819                                 noiselevel=-1)
5820                         raise KeyError
5821
5822                 try:
5823                         mydata = self.auxdb[mylocation][mycpv]
5824                         if emtime != long(mydata.get("_mtime_", 0)):
5825                                 doregen = True
5826                         elif len(mydata.get("_eclasses_", [])) > 0:
5827                                 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5828                         else:
5829                                 doregen = False
5830                                 
5831                 except KeyError:
5832                         doregen = True
5833                 except CacheError:
5834                         doregen = True
5835                         try:                            del self.auxdb[mylocation][mycpv]
5836                         except KeyError:        pass
5837
5838                 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5839
5840                 if doregen:
5841                         writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5842                         writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5843
5844                         self.doebuild_settings.reset()
5845                         mydata = {}
5846                         myret = doebuild(myebuild, "depend",
5847                                 self.doebuild_settings["ROOT"], self.doebuild_settings,
5848                                 dbkey=mydata, tree="porttree", mydbapi=self)
5849                         if myret != os.EX_OK:
5850                                 raise KeyError(mycpv)
5851
5852                         if "EAPI" not in mydata or not mydata["EAPI"].strip():
5853                                 mydata["EAPI"] = "0"
5854
5855                         if not eapi_is_supported(mydata["EAPI"]):
5856                                 # if newer version, wipe everything and negate eapi
5857                                 eapi = mydata["EAPI"]
5858                                 mydata = {}
5859                                 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5860                                 mydata["EAPI"] = "-"+eapi
5861
5862                         if mydata.get("INHERITED", False):
5863                                 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5864                         else:
5865                                 mydata["_eclasses_"] = {}
5866                         
5867                         del mydata["INHERITED"]
5868
5869                         mydata["_mtime_"] = emtime
5870
5871                         self.auxdb[mylocation][mycpv] = mydata
5872
5873                 if not mydata.setdefault("EAPI", "0"):
5874                         mydata["EAPI"] = "0"
5875
5876                 #finally, we look at our internal cache entry and return the requested data.
5877                 returnme = []
5878                 for x in mylist:
5879                         if x == "INHERITED":
5880                                 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5881                         else:
5882                                 returnme.append(mydata.get(x,""))
5883
5884                 if cache_me:
5885                         aux_cache = {}
5886                         for x in self._aux_cache_keys:
5887                                 aux_cache[x] = mydata.get(x, "")
5888                         self._aux_cache[mycpv] = aux_cache
5889
5890                 return returnme
5891
5892         def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5893                 if mysettings is None:
5894                         mysettings = self.mysettings
5895                 try:
5896                         myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5897                 except KeyError:
5898                         print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5899                         sys.exit(1)
5900
5901                 if useflags is None:
5902                         useflags = mysettings["USE"].split()
5903
5904                 myurilist = portage_dep.paren_reduce(myuris)
5905                 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5906                 newuris = flatten(myurilist)
5907
5908                 myfiles = []
5909                 for x in newuris:
5910                         mya = os.path.basename(x)
5911                         if not mya in myfiles:
5912                                 myfiles.append(mya)
5913                 return [newuris, myfiles]
5914
5915         def getfetchsizes(self,mypkg,useflags=None,debug=0):
5916                 # returns a filename:size dictionnary of remaining downloads
5917                 myebuild = self.findname(mypkg)
5918                 pkgdir = os.path.dirname(myebuild)
5919                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5920                 checksums = mf.getDigests()
5921                 if not checksums:
5922                         if debug: print "[empty/missing/bad digest]: "+mypkg
5923                         return None
5924                 filesdict={}
5925                 if useflags is None:
5926                         myuris, myfiles = self.getfetchlist(mypkg,all=1)
5927                 else:
5928                         myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
5929                 #XXX: maybe this should be improved: take partial downloads
5930                 # into account? check checksums?
5931                 for myfile in myfiles:
5932                         if myfile not in checksums:
5933                                 if debug:
5934                                         writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
5935                                 continue
5936                         file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
5937                         mystat = None
5938                         try:
5939                                 mystat = os.stat(file_path)
5940                         except OSError, e:
5941                                 pass
5942                         if mystat is None:
5943                                 existing_size = 0
5944                         else:
5945                                 existing_size = mystat.st_size
5946                         remaining_size = int(checksums[myfile]["size"]) - existing_size
5947                         if remaining_size > 0:
5948                                 # Assume the download is resumable.
5949                                 filesdict[myfile] = remaining_size
5950                         elif remaining_size < 0:
5951                                 # The existing file is too large and therefore corrupt.
5952                                 filesdict[myfile] = int(checksums[myfile]["size"])
5953                 return filesdict
5954
5955         def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5956                 if not useflags:
5957                         if mysettings:
5958                                 useflags = mysettings["USE"].split()
5959                 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5960                 myebuild = self.findname(mypkg)
5961                 pkgdir = os.path.dirname(myebuild)
5962                 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5963                 mysums = mf.getDigests()
5964
5965                 failures = {}
5966                 for x in myfiles:
5967                         if not mysums or x not in mysums:
5968                                 ok     = False
5969                                 reason = "digest missing"
5970                         else:
5971                                 try:
5972                                         ok, reason = portage_checksum.verify_all(
5973                                                 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
5974                                 except portage_exception.FileNotFound, e:
5975                                         ok = False
5976                                         reason = "File Not Found: '%s'" % str(e)
5977                         if not ok:
5978                                 failures[x] = reason
5979                 if failures:
5980                         return False
5981                 return True
5982
5983         def getsize(self,mypkg,useflags=None,debug=0):
5984                 # returns the total size of remaining downloads
5985                 #
5986                 # we use getfetchsizes() now, so this function would be obsoleted
5987                 #
5988                 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5989                 if filesdict is None:
5990                         return "[empty/missing/bad digest]"
5991                 mysize=0
5992                 for myfile in filesdict.keys():
5993                         mysum+=filesdict[myfile]
5994                 return mysum
5995
5996         def cpv_exists(self,mykey):
5997                 "Tells us whether an actual ebuild exists on disk (no masking)"
5998                 cps2=mykey.split("/")
5999                 cps=catpkgsplit(mykey,silent=0)
6000                 if not cps:
6001                         #invalid cat/pkg-v
6002                         return 0
6003                 if self.findname(cps[0]+"/"+cps2[1]):
6004                         return 1
6005                 else:
6006                         return 0
6007
6008         def cp_all(self):
6009                 "returns a list of all keys in our tree"
6010                 d={}
6011                 for x in self.mysettings.categories:
6012                         for oroot in self.porttrees:
6013                                 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
6014                                         d[x+"/"+y] = None
6015                 l = d.keys()
6016                 l.sort()
6017                 return l
6018
6019         def p_list(self,mycp):
6020                 d={}
6021                 for oroot in self.porttrees:
6022                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6023                                 if x[-7:]==".ebuild":
6024                                         d[x[:-7]] = None
6025                 return d.keys()
6026
6027         def cp_list(self, mycp, use_cache=1, mytree=None):
6028                 mysplit=mycp.split("/")
6029                 d={}
6030                 if mytree:
6031                         mytrees = [mytree]
6032                 else:
6033                         mytrees = self.porttrees
6034                 for oroot in mytrees:
6035                         for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6036                                 if x.endswith(".ebuild"):
6037                                         pf = x[:-7]
6038                                         ps = pkgsplit(pf)
6039                                         if not ps:
6040                                                 writemsg("\nInvalid ebuild name: %s\n" % \
6041                                                         os.path.join(oroot, mycp, x), noiselevel=-1)
6042                                                 continue
6043                                         d[mysplit[0]+"/"+pf] = None
6044                 return d.keys()
6045
6046         def freeze(self):
6047                 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
6048                         self.xcache[x]={}
6049                 self.frozen=1
6050
6051         def melt(self):
6052                 self.xcache={}
6053                 self.frozen=0
6054
6055         def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
6056                 "caching match function; very trick stuff"
6057                 #if no updates are being made to the tree, we can consult our xcache...
6058                 if self.frozen:
6059                         try:
6060                                 return self.xcache[level][origdep][:]
6061                         except KeyError:
6062                                 pass
6063
6064                 if not mydep:
6065                         #this stuff only runs on first call of xmatch()
6066                         #create mydep, mykey from origdep
6067                         mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
6068                         mykey=dep_getkey(mydep)
6069
6070                 if level=="list-visible":
6071                         #a list of all visible packages, not called directly (just by xmatch())
6072                         #myval=self.visible(self.cp_list(mykey))
6073                         myval=self.gvisible(self.visible(self.cp_list(mykey)))
6074                 elif level=="bestmatch-visible":
6075                         #dep match -- best match of all visible packages
6076                         myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
6077                         #get all visible matches (from xmatch()), then choose the best one
6078                 elif level=="bestmatch-list":
6079                         #dep match -- find best match but restrict search to sublist
6080                         myval=best(match_from_list(mydep,mylist))
6081                         #no point is calling xmatch again since we're not caching list deps
6082                 elif level=="match-list":
6083                         #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
6084                         myval=match_from_list(mydep,mylist)
6085                 elif level=="match-visible":
6086                         #dep match -- find all visible matches
6087                         myval = match_from_list(mydep,
6088                                 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
6089                         #get all visible packages, then get the matching ones
6090                 elif level=="match-all":
6091                         #match *all* visible *and* masked packages
6092                         myval=match_from_list(mydep,self.cp_list(mykey))
6093                 else:
6094                         print "ERROR: xmatch doesn't handle",level,"query!"
6095                         raise KeyError
6096                 myslot = portage_dep.dep_getslot(mydep)
6097                 if myslot is not None:
6098                         slotmatches = []
6099                         for cpv in myval:
6100                                 try:
6101                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot:
6102                                                 slotmatches.append(cpv)
6103                                 except KeyError:
6104                                         pass # ebuild masked by corruption
6105                         myval = slotmatches
6106                 if self.frozen and (level not in ["match-list","bestmatch-list"]):
6107                         self.xcache[level][mydep]=myval
6108                         if origdep and origdep != mydep:
6109                                 self.xcache[level][origdep] = myval
6110                 return myval[:]
6111
6112         def match(self,mydep,use_cache=1):
6113                 return self.xmatch("match-visible",mydep)
6114
6115         def visible(self,mylist):
6116                 """two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
6117                 packages file to remove invisible entries, returning remaining items.  This function assumes
6118                 that all entries in mylist have the same category and package name."""
6119                 if (mylist is None) or (len(mylist)==0):
6120                         return []
6121                 newlist=mylist[:]
6122                 #first, we mask out packages in the package.mask file
6123                 mykey=newlist[0]
6124                 cpv=catpkgsplit(mykey)
6125                 if not cpv:
6126                         #invalid cat/pkg-v
6127                         print "visible(): invalid cat/pkg-v:",mykey
6128                         return []
6129                 mycp=cpv[0]+"/"+cpv[1]
6130                 maskdict=self.mysettings.pmaskdict
6131                 unmaskdict=self.mysettings.punmaskdict
6132                 if maskdict.has_key(mycp):
6133                         for x in maskdict[mycp]:
6134                                 mymatches=self.xmatch("match-all",x)
6135                                 if mymatches is None:
6136                                         #error in package.mask file; print warning and continue:
6137                                         print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
6138                                         continue
6139                                 for y in mymatches:
6140                                         unmask=0
6141                                         if unmaskdict.has_key(mycp):
6142                                                 for z in unmaskdict[mycp]:
6143                                                         mymatches_unmask=self.xmatch("match-all",z)
6144                                                         if y in mymatches_unmask:
6145                                                                 unmask=1
6146                                                                 break
6147                                         if unmask==0:
6148                                                 try:
6149                                                         newlist.remove(y)
6150                                                 except ValueError:
6151                                                         pass
6152
6153                 revmaskdict=self.mysettings.prevmaskdict
6154                 if revmaskdict.has_key(mycp):
6155                         for x in revmaskdict[mycp]:
6156                                 #important: only match against the still-unmasked entries...
6157                                 #notice how we pass "newlist" to the xmatch() call below....
6158                                 #Without this, ~ deps in the packages files are broken.
6159                                 mymatches=self.xmatch("match-list",x,mylist=newlist)
6160                                 if mymatches is None:
6161                                         #error in packages file; print warning and continue:
6162                                         print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6163                                         continue
6164                                 pos=0
6165                                 while pos<len(newlist):
6166                                         if newlist[pos] not in mymatches:
6167                                                 del newlist[pos]
6168                                         else:
6169                                                 pos += 1
6170                 return newlist
6171
6172         def gvisible(self,mylist):
6173                 "strip out group-masked (not in current group) entries"
6174
6175                 if mylist is None:
6176                         return []
6177                 newlist=[]
6178
6179                 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6180                 pkgdict = self.mysettings.pkeywordsdict
6181                 for mycpv in mylist:
6182                         try:
6183                                 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6184                         except KeyError:
6185                                 continue
6186                         except portage_exception.PortageException, e:
6187                                 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6188                                         mycpv, noiselevel=-1)
6189                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6190                                 del e
6191                                 continue
6192                         mygroups=keys.split()
6193                         # Repoman may modify this attribute as necessary.
6194                         pgroups = accept_keywords[:]
6195                         match=0
6196                         cp = dep_getkey(mycpv)
6197                         if pkgdict.has_key(cp):
6198                                 matches = match_to_list(mycpv, pkgdict[cp].keys())
6199                                 for atom in matches:
6200                                         pgroups.extend(pkgdict[cp][atom])
6201                                 if matches:
6202                                         inc_pgroups = []
6203                                         for x in pgroups:
6204                                                 # The -* special case should be removed once the tree 
6205                                                 # is clean of KEYWORDS=-* crap
6206                                                 if x != "-*" and x.startswith("-"):
6207                                                         try:
6208                                                                 inc_pgroups.remove(x[1:])
6209                                                         except ValueError:
6210                                                                 pass
6211                                                 if x not in inc_pgroups:
6212                                                         inc_pgroups.append(x)
6213                                         pgroups = inc_pgroups
6214                                         del inc_pgroups
6215                         hasstable = False
6216                         hastesting = False
6217                         for gp in mygroups:
6218                                 if gp=="*":
6219                                         writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6220                                                 noiselevel=-1)
6221                                         match=1
6222                                         break
6223                                 elif gp in pgroups:
6224                                         match=1
6225                                         break
6226                                 elif gp[0] == "~":
6227                                         hastesting = True
6228                                 elif gp[0] != "-":
6229                                         hasstable = True
6230                         if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
6231                                 match=1
6232                         if match and eapi_is_supported(eapi):
6233                                 newlist.append(mycpv)
6234                 return newlist
6235
6236 class binarytree(object):
6237         "this tree scans for a list of all packages available in PKGDIR"
6238         def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6239                 if clone:
6240                         # XXX This isn't cloning. It's an instance of the same thing.
6241                         self.root=clone.root
6242                         self.pkgdir=clone.pkgdir
6243                         self.dbapi=clone.dbapi
6244                         self.populated=clone.populated
6245                         self.tree=clone.tree
6246                         self.remotepkgs=clone.remotepkgs
6247                         self.invalids=clone.invalids
6248                         self.settings = clone.settings
6249                 else:
6250                         self.root=root
6251                         #self.pkgdir=settings["PKGDIR"]
6252                         self.pkgdir = normalize_path(pkgdir)
6253                         self.dbapi = bindbapi(self, settings=settings)
6254                         self.populated=0
6255                         self.tree={}
6256                         self.remotepkgs={}
6257                         self.invalids=[]
6258                         self.settings = settings
6259                         self._pkg_paths = {}
6260
6261         def move_ent(self,mylist):
6262                 if not self.populated:
6263                         self.populate()
6264                 origcp=mylist[1]
6265                 newcp=mylist[2]
6266                 # sanity check
6267                 for cp in [origcp,newcp]:
6268                         if not (isvalidatom(cp) and isjustname(cp)):
6269                                 raise portage_exception.InvalidPackageName(cp)
6270                 origcat = origcp.split("/")[0]
6271                 mynewcat=newcp.split("/")[0]
6272                 origmatches=self.dbapi.cp_list(origcp)
6273                 if not origmatches:
6274                         return
6275                 for mycpv in origmatches:
6276
6277                         mycpsplit=catpkgsplit(mycpv)
6278                         mynewcpv=newcp+"-"+mycpsplit[2]
6279                         if mycpsplit[3]!="r0":
6280                                 mynewcpv += "-"+mycpsplit[3]
6281                         myoldpkg=mycpv.split("/")[1]
6282                         mynewpkg=mynewcpv.split("/")[1]
6283
6284                         if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6285                                 writemsg("!!! Cannot update binary: Destination exists.\n",
6286                                         noiselevel=-1)
6287                                 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6288                                 continue
6289
6290                         tbz2path=self.getname(mycpv)
6291                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6292                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6293                                         noiselevel=-1)
6294                                 continue
6295
6296                         #print ">>> Updating data in:",mycpv
6297                         writemsg_stdout("%")
6298                         mytbz2 = xpak.tbz2(tbz2path)
6299                         mydata = mytbz2.get_data()
6300                         updated_items = update_dbentries([mylist], mydata)
6301                         mydata.update(updated_items)
6302                         mydata["CATEGORY"] = mynewcat+"\n"
6303                         if mynewpkg != myoldpkg:
6304                                 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6305                                 del mydata[myoldpkg+".ebuild"]
6306                                 mydata["PF"] = mynewpkg + "\n"
6307                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6308
6309                         self.dbapi.cpv_remove(mycpv)
6310                         del self._pkg_paths[mycpv]
6311                         new_path = self.getname(mynewcpv)
6312                         self._pkg_paths[mynewcpv] = os.path.join(
6313                                 *new_path.split(os.path.sep)[-2:])
6314                         if new_path != mytbz2:
6315                                 try:
6316                                         os.makedirs(os.path.dirname(new_path))
6317                                 except OSError, e:
6318                                         if e.errno != errno.EEXIST:
6319                                                 raise
6320                                         del e
6321                                 os.rename(tbz2path, new_path)
6322                                 self._remove_symlink(mycpv)
6323                                 if new_path.split(os.path.sep)[-2] == "All":
6324                                         self._create_symlink(mynewcpv)
6325                         self.dbapi.cpv_inject(mynewcpv)
6326
6327                 return 1
6328
6329         def _remove_symlink(self, cpv):
6330                 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6331                 the ${PKGDIR}/${CATEGORY} directory if empty.  The file will not be
6332                 removed if os.path.islink() returns False."""
6333                 mycat, mypkg = catsplit(cpv)
6334                 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6335                 if os.path.islink(mylink):
6336                         """Only remove it if it's really a link so that this method never
6337                         removes a real package that was placed here to avoid a collision."""
6338                         os.unlink(mylink)
6339                 try:
6340                         os.rmdir(os.path.join(self.pkgdir, mycat))
6341                 except OSError, e:
6342                         if e.errno not in (errno.ENOENT,
6343                                 errno.ENOTEMPTY, errno.EEXIST):
6344                                 raise
6345                         del e
6346
6347         def _create_symlink(self, cpv):
6348                 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6349                 ${PKGDIR}/${CATEGORY} directory, if necessary).  Any file that may
6350                 exist in the location of the symlink will first be removed."""
6351                 mycat, mypkg = catsplit(cpv)
6352                 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6353                 try:
6354                         os.makedirs(os.path.dirname(full_path))
6355                 except OSError, e:
6356                         if e.errno != errno.EEXIST:
6357                                 raise
6358                         del e
6359                 try:
6360                         os.unlink(full_path)
6361                 except OSError, e:
6362                         if e.errno != errno.ENOENT:
6363                                 raise
6364                         del e
6365                 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6366
6367         def move_slot_ent(self, mylist):
6368                 if not self.populated:
6369                         self.populate()
6370                 pkg=mylist[1]
6371                 origslot=mylist[2]
6372                 newslot=mylist[3]
6373                 
6374                 if not isvalidatom(pkg):
6375                         raise portage_exception.InvalidAtom(pkg)
6376                 
6377                 origmatches=self.dbapi.match(pkg)
6378                 if not origmatches:
6379                         return
6380                 for mycpv in origmatches:
6381                         mycpsplit=catpkgsplit(mycpv)
6382                         myoldpkg=mycpv.split("/")[1]
6383                         tbz2path=self.getname(mycpv)
6384                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6385                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6386                                         noiselevel=-1)
6387                                 continue
6388
6389                         #print ">>> Updating data in:",mycpv
6390                         mytbz2 = xpak.tbz2(tbz2path)
6391                         mydata = mytbz2.get_data()
6392
6393                         slot = mydata["SLOT"]
6394                         if (not slot):
6395                                 continue
6396
6397                         if (slot[0]!=origslot):
6398                                 continue
6399
6400                         writemsg_stdout("S")
6401                         mydata["SLOT"] = newslot+"\n"
6402                         mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6403                 return 1
6404
6405         def update_ents(self, update_iter):
6406                 if len(update_iter) == 0:
6407                         return
6408                 if not self.populated:
6409                         self.populate()
6410
6411                 for mycpv in self.dbapi.cp_all():
6412                         tbz2path=self.getname(mycpv)
6413                         if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6414                                 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6415                                         noiselevel=-1)
6416                                 continue
6417                         #print ">>> Updating binary data:",mycpv
6418                         writemsg_stdout("*")
6419                         mytbz2 = xpak.tbz2(tbz2path)
6420                         mydata = mytbz2.get_data()
6421                         updated_items = update_dbentries(update_iter, mydata)
6422                         if len(updated_items) > 0:
6423                                 mydata.update(updated_items)
6424                                 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6425                 return 1
6426
6427         def prevent_collision(self, cpv):
6428                 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6429                 use for a given cpv.  If a collision will occur with an existing
6430                 package from another category, the existing package will be bumped to
6431                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6432                 full_path = self.getname(cpv)
6433                 if "All" == full_path.split(os.path.sep)[-2]:
6434                         return
6435                 """Move a colliding package if it exists.  Code below this point only
6436                 executes in rare cases."""
6437                 mycat, mypkg = catsplit(cpv)
6438                 myfile = mypkg + ".tbz2"
6439                 mypath = os.path.join("All", myfile)
6440                 dest_path = os.path.join(self.pkgdir, mypath)
6441                 if os.path.exists(dest_path):
6442                         # For invalid packages, other_cat could be None.
6443                         other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6444                         if other_cat:
6445                                 other_cat = other_cat.strip()
6446                                 self._move_from_all(other_cat + "/" + mypkg)
6447                 """The file may or may not exist. Move it if necessary and update
6448                 internal state for future calls to getname()."""
6449                 self._move_to_all(cpv)
6450
6451         def _move_to_all(self, cpv):
6452                 """If the file exists, move it.  Whether or not it exists, update state
6453                 for future getname() calls."""
6454                 mycat , mypkg = catsplit(cpv)
6455                 myfile = mypkg + ".tbz2"
6456                 src_path = os.path.join(self.pkgdir, mycat, myfile)
6457                 try:
6458                         mystat = os.lstat(src_path)
6459                 except OSError, e:
6460                         mystat = None
6461                 if mystat and stat.S_ISREG(mystat.st_mode):
6462                         try:
6463                                 os.makedirs(os.path.join(self.pkgdir, "All"))
6464                         except OSError, e:
6465                                 if e.errno != errno.EEXIST:
6466                                         raise
6467                                 del e
6468                         os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6469                         self._create_symlink(cpv)
6470                 self._pkg_paths[cpv] = os.path.join("All", myfile)
6471
6472         def _move_from_all(self, cpv):
6473                 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6474                 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6475                 self._remove_symlink(cpv)
6476                 mycat , mypkg = catsplit(cpv)
6477                 myfile = mypkg + ".tbz2"
6478                 mypath = os.path.join(mycat, myfile)
6479                 dest_path = os.path.join(self.pkgdir, mypath)
6480                 try:
6481                         os.makedirs(os.path.dirname(dest_path))
6482                 except OSError, e:
6483                         if e.errno != errno.EEXIST:
6484                                 raise
6485                         del e
6486                 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6487                 self._pkg_paths[cpv] = mypath
6488
6489         def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6490                 "populates the binarytree"
6491                 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6492                         return 0
6493                 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
6494                         return 0
6495
6496                 if not getbinpkgsonly:
6497                         pkg_paths = {}
6498                         dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6499                         if "All" in dirs:
6500                                 dirs.remove("All")
6501                         dirs.sort()
6502                         dirs.insert(0, "All")
6503                         for mydir in dirs:
6504                                 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6505                                         if not myfile.endswith(".tbz2"):
6506                                                 continue
6507                                         mypath = os.path.join(mydir, myfile)
6508                                         full_path = os.path.join(self.pkgdir, mypath)
6509                                         if os.path.islink(full_path):
6510                                                 continue
6511                                         mytbz2 = xpak.tbz2(full_path)
6512                                         # For invalid packages, mycat could be None.
6513                                         mycat = mytbz2.getfile("CATEGORY")
6514                                         mypf = mytbz2.getfile("PF")
6515                                         mypkg = myfile[:-5]
6516                                         if not mycat or not mypf:
6517                                                 #old-style or corrupt package
6518                                                 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6519                                                         noiselevel=-1)
6520                                                 writemsg("!!! This binary package is not " + \
6521                                                         "recoverable and should be deleted.\n",
6522                                                         noiselevel=-1)
6523                                                 self.invalids.append(mypkg)
6524                                                 continue
6525                                         mycat = mycat.strip()
6526                                         if mycat != mydir and mydir != "All":
6527                                                 continue
6528                                         if mypkg != mypf.strip():
6529                                                 continue
6530                                         mycpv = mycat + "/" + mypkg
6531                                         if mycpv in pkg_paths:
6532                                                 # All is first, so it's preferred.
6533                                                 continue
6534                                         pkg_paths[mycpv] = mypath
6535                                         self.dbapi.cpv_inject(mycpv)
6536                         self._pkg_paths = pkg_paths
6537
6538                 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6539                         writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6540                                 noiselevel=-1)
6541
6542                 if getbinpkgs and \
6543                         self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6544                         try:
6545                                 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6546                                 if chunk_size < 8:
6547                                         chunk_size = 8
6548                         except (ValueError, KeyError):
6549                                 chunk_size = 3000
6550
6551                         writemsg(green("Fetching binary packages info...\n"))
6552                         self.remotepkgs = getbinpkg.dir_get_metadata(
6553                                 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6554                         writemsg(green("  -- DONE!\n\n"))
6555
6556                         for mypkg in self.remotepkgs.keys():
6557                                 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6558                                         #old-style or corrupt package
6559                                         writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6560                                                 noiselevel=-1)
6561                                         del self.remotepkgs[mypkg]
6562                                         continue
6563                                 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6564                                 fullpkg=mycat+"/"+mypkg[:-5]
6565                                 mykey=dep_getkey(fullpkg)
6566                                 try:
6567                                         # invalid tbz2's can hurt things.
6568                                         #print "cpv_inject("+str(fullpkg)+")"
6569                                         self.dbapi.cpv_inject(fullpkg)
6570                                         #print "  -- Injected"
6571                                 except SystemExit, e:
6572                                         raise
6573                                 except:
6574                                         writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6575                                                 noiselevel=-1)
6576                                         del self.remotepkgs[mypkg]
6577                                         continue
6578                 self.populated=1
6579
6580         def inject(self,cpv):
6581                 return self.dbapi.cpv_inject(cpv)
6582
6583         def exists_specific(self,cpv):
6584                 if not self.populated:
6585                         self.populate()
6586                 return self.dbapi.match(
6587                         dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6588
6589         def dep_bestmatch(self,mydep):
6590                 "compatibility method -- all matches, not just visible ones"
6591                 if not self.populated:
6592                         self.populate()
6593                 writemsg("\n\n", 1)
6594                 writemsg("mydep: %s\n" % mydep, 1)
6595                 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6596                 writemsg("mydep: %s\n" % mydep, 1)
6597                 mykey=dep_getkey(mydep)
6598                 writemsg("mykey: %s\n" % mykey, 1)
6599                 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6600                 writemsg("mymatch: %s\n" % mymatch, 1)
6601                 if mymatch is None:
6602                         return ""
6603                 return mymatch
6604
6605         def getname(self,pkgname):
6606                 """Returns a file location for this package.  The default location is
6607                 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6608                 in the rare event of a collision.  The prevent_collision() method can
6609                 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6610                 specific cpv."""
6611                 if not self.populated:
6612                         self.populate()
6613                 mycpv = pkgname
6614                 mypath = self._pkg_paths.get(mycpv, None)
6615                 if mypath:
6616                         return os.path.join(self.pkgdir, mypath)
6617                 mycat, mypkg = catsplit(mycpv)
6618                 mypath = os.path.join("All", mypkg + ".tbz2")
6619                 if mypath in self._pkg_paths.values():
6620                         mypath = os.path.join(mycat, mypkg + ".tbz2")
6621                 self._pkg_paths[mycpv] = mypath # cache for future lookups
6622                 return os.path.join(self.pkgdir, mypath)
6623
6624         def isremote(self,pkgname):
6625                 "Returns true if the package is kept remotely."
6626                 mysplit=pkgname.split("/")
6627                 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6628                 return remote
6629
6630         def get_use(self,pkgname):
6631                 mysplit=pkgname.split("/")
6632                 if self.isremote(pkgname):
6633                         return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6634                 tbz2=xpak.tbz2(self.getname(pkgname))
6635                 return tbz2.getfile("USE").split()
6636
6637         def gettbz2(self,pkgname):
6638                 "fetches the package from a remote site, if necessary."
6639                 print "Fetching '"+str(pkgname)+"'"
6640                 mysplit  = pkgname.split("/")
6641                 tbz2name = mysplit[1]+".tbz2"
6642                 if not self.isremote(pkgname):
6643                         if (tbz2name not in self.invalids):
6644                                 return
6645                         else:
6646                                 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6647                                         noiselevel=-1)
6648                 mydest = self.pkgdir+"/All/"
6649                 try:
6650                         os.makedirs(mydest, 0775)
6651                 except (OSError, IOError):
6652                         pass
6653                 return getbinpkg.file_get(
6654                         self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6655                         mydest, fcmd=self.settings["RESUMECOMMAND"])
6656
6657         def getslot(self,mycatpkg):
6658                 "Get a slot for a catpkg; assume it exists."
6659                 myslot = ""
6660                 try:
6661                         myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6662                 except SystemExit, e:
6663                         raise
6664                 except Exception, e:
6665                         pass
6666                 return myslot
6667
6668 class dblink:
6669         """
6670         This class provides an interface to the installed package database
6671         At present this is implemented as a text backend in /var/db/pkg.
6672         """
6673         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6674                 vartree=None):
6675                 """
6676                 Creates a DBlink object for a given CPV.
6677                 The given CPV may not be present in the database already.
6678                 
6679                 @param cat: Category
6680                 @type cat: String
6681                 @param pkg: Package (PV)
6682                 @type pkg: String
6683                 @param myroot: Typically ${ROOT}
6684                 @type myroot: String (Path)
6685                 @param mysettings: Typically portage.config
6686                 @type mysettings: An instance of portage.config
6687                 @param treetype: one of ['porttree','bintree','vartree']
6688                 @type treetype: String
6689                 @param vartree: an instance of vartree corresponding to myroot.
6690                 @type vartree: vartree
6691                 """
6692                 
6693                 self.cat     = cat
6694                 self.pkg     = pkg
6695                 self.mycpv   = self.cat+"/"+self.pkg
6696                 self.mysplit = pkgsplit(self.mycpv)
6697                 self.treetype = treetype
6698                 if vartree is None:
6699                         global db
6700                         vartree = db[myroot]["vartree"]
6701                 self.vartree = vartree
6702
6703                 self.dbroot   = normalize_path(os.path.join(myroot, VDB_PATH))
6704                 self.dbcatdir = self.dbroot+"/"+cat
6705                 self.dbpkgdir = self.dbcatdir+"/"+pkg
6706                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6707                 self.dbdir    = self.dbpkgdir
6708
6709                 self._lock_vdb = None
6710
6711                 self.settings = mysettings
6712                 if self.settings==1:
6713                         raise ValueError
6714
6715                 self.myroot=myroot
6716                 protect_obj = portage_util.ConfigProtect(myroot,
6717                         mysettings.get("CONFIG_PROTECT","").split(),
6718                         mysettings.get("CONFIG_PROTECT_MASK","").split())
6719                 self.updateprotect = protect_obj.updateprotect
6720                 self._config_protect = protect_obj
6721                 self._installed_instance = None
6722                 self.contentscache=[]
6723                 self._contents_inodes = None
6724
6725         def lockdb(self):
6726                 if self._lock_vdb:
6727                         raise AssertionError("Lock already held.")
6728                 # At least the parent needs to exist for the lock file.
6729                 portage_util.ensure_dirs(self.dbroot)
6730                 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6731
6732         def unlockdb(self):
6733                 if self._lock_vdb:
6734                         portage_locks.unlockdir(self._lock_vdb)
6735                         self._lock_vdb = None
6736
6737         def getpath(self):
6738                 "return path to location of db information (for >>> informational display)"
6739                 return self.dbdir
6740
6741         def exists(self):
6742                 "does the db entry exist?  boolean."
6743                 return os.path.exists(self.dbdir)
6744
6745         def create(self):
6746                 "create the skeleton db directory structure.  No contents, virtuals, provides or anything.  Also will create /var/db/pkg if necessary."
6747                 """
6748                 This function should never get called (there is no reason to use it).
6749                 """
6750                 # XXXXX Delete this eventually
6751                 raise Exception, "This is bad. Don't use it."
6752                 if not os.path.exists(self.dbdir):
6753                         os.makedirs(self.dbdir)
6754
6755         def delete(self):
6756                 """
6757                 Remove this entry from the database
6758                 """
6759                 if not os.path.exists(self.dbdir):
6760                         return
6761                 try:
6762                         for x in listdir(self.dbdir):
6763                                 os.unlink(self.dbdir+"/"+x)
6764                         os.rmdir(self.dbdir)
6765                 except OSError, e:
6766                         print "!!! Unable to remove db entry for this package."
6767                         print "!!! It is possible that a directory is in this one. Portage will still"
6768                         print "!!! register this package as installed as long as this directory exists."
6769                         print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6770                         print "!!! "+str(e)
6771                         print
6772                         sys.exit(1)
6773
6774         def clearcontents(self):
6775                 """
6776                 For a given db entry (self), erase the CONTENTS values.
6777                 """
6778                 if os.path.exists(self.dbdir+"/CONTENTS"):
6779                         os.unlink(self.dbdir+"/CONTENTS")
6780
6781         def getcontents(self):
6782                 """
6783                 Get the installed files of a given package (aka what that package installed)
6784                 """
6785                 if not os.path.exists(self.dbdir+"/CONTENTS"):
6786                         return None
6787                 if self.contentscache != []:
6788                         return self.contentscache
6789                 pkgfiles={}
6790                 myc=open(self.dbdir+"/CONTENTS","r")
6791                 mylines=myc.readlines()
6792                 myc.close()
6793                 null_byte = "\0"
6794                 contents_file = os.path.join(self.dbdir, "CONTENTS")
6795                 pos = 0
6796                 for line in mylines:
6797                         pos += 1
6798                         if null_byte in line:
6799                                 # Null bytes are a common indication of corruption.
6800                                 writemsg("!!! Null byte found in contents " + \
6801                                         "file, line %d: '%s'\n" % (pos, contents_file),
6802                                         noiselevel=-1)
6803                                 continue
6804                         mydat = line.split()
6805                         # we do this so we can remove from non-root filesystems
6806                         # (use the ROOT var to allow maintenance on other partitions)
6807                         try:
6808                                 mydat[1] = normalize_path(os.path.join(
6809                                         self.myroot, mydat[1].lstrip(os.path.sep)))
6810                                 if mydat[0]=="obj":
6811                                         #format: type, mtime, md5sum
6812                                         pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6813                                 elif mydat[0]=="dir":
6814                                         #format: type
6815                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6816                                 elif mydat[0]=="sym":
6817                                         #format: type, mtime, dest
6818                                         x=len(mydat)-1
6819                                         if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6820                                                 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6821                                                 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6822                                                 x=len(mydat)-1
6823                                         splitter=-1
6824                                         while(x>=0):
6825                                                 if mydat[x]=="->":
6826                                                         splitter=x
6827                                                         break
6828                                                 x=x-1
6829                                         if splitter==-1:
6830                                                 return None
6831                                         pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6832                                 elif mydat[0]=="dev":
6833                                         #format: type
6834                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6835                                 elif mydat[0]=="fif":
6836                                         #format: type
6837                                         pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6838                                 else:
6839                                         return None
6840                         except (KeyError,IndexError):
6841                                 print "portage: CONTENTS line",pos,"corrupt!"
6842                 self.contentscache=pkgfiles
6843                 return pkgfiles
6844
6845         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6846                 ldpath_mtimes=None):
6847                 """
6848                 Calls prerm
6849                 Unmerges a given package (CPV)
6850                 calls postrm
6851                 calls cleanrm
6852                 calls env_update
6853                 
6854                 @param pkgfiles: files to unmerge (generally self.getcontents() )
6855                 @type pkgfiles: Dictionary
6856                 @param trimworld: Remove CPV from world file if True, not if False
6857                 @type trimworld: Boolean
6858                 @param cleanup: cleanup to pass to doebuild (see doebuild)
6859                 @type cleanup: Boolean
6860                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6861                 @type ldpath_mtimes: Dictionary
6862                 @rtype: Integer
6863                 @returns:
6864                 1. os.EX_OK if everything went well.
6865                 2. return code of the failed phase (for prerm, postrm, cleanrm)
6866                 
6867                 Notes:
6868                 The caller must ensure that lockdb() and unlockdb() are called
6869                 before and after this method.
6870                 """
6871
6872                 contents = self.getcontents()
6873                 # Now, don't assume that the name of the ebuild is the same as the
6874                 # name of the dir; the package may have been moved.
6875                 myebuildpath = None
6876                 mystuff = listdir(self.dbdir, EmptyOnError=1)
6877                 for x in mystuff:
6878                         if x.endswith(".ebuild"):
6879                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6880                                 if x[:-7] != self.pkg:
6881                                         # Clean up after vardbapi.move_ent() breakage in
6882                                         # portage versions before 2.1.2
6883                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
6884                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6885                                 break
6886
6887                 self.settings.load_infodir(self.dbdir)
6888                 if myebuildpath:
6889                         try:
6890                                 doebuild_environment(myebuildpath, "prerm", self.myroot,
6891                                         self.settings, 0, 0, self.vartree.dbapi)
6892                         except portage_exception.UnsupportedAPIException, e:
6893                                 # Sometimes this happens due to corruption of the EAPI file.
6894                                 writemsg("!!! FAILED prerm: %s\n" % \
6895                                         os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
6896                                 writemsg("%s\n" % str(e), noiselevel=-1)
6897                                 return 1
6898                         catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6899                         portage_util.ensure_dirs(os.path.dirname(catdir),
6900                                 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6901                 builddir_lock = None
6902                 catdir_lock = None
6903                 try:
6904                         if myebuildpath:
6905                                 catdir_lock = portage_locks.lockdir(catdir)
6906                                 portage_util.ensure_dirs(catdir,
6907                                         uid=portage_uid, gid=portage_gid,
6908                                         mode=070, mask=0)
6909                                 builddir_lock = portage_locks.lockdir(
6910                                         self.settings["PORTAGE_BUILDDIR"])
6911                                 try:
6912                                         portage_locks.unlockdir(catdir_lock)
6913                                 finally:
6914                                         catdir_lock = None
6915                                 # Eventually, we'd like to pass in the saved ebuild env here...
6916                                 retval = doebuild(myebuildpath, "prerm", self.myroot,
6917                                         self.settings, cleanup=cleanup, use_cache=0,
6918                                         mydbapi=self.vartree.dbapi, tree="vartree",
6919                                         vartree=self.vartree)
6920                                 # XXX: Decide how to handle failures here.
6921                                 if retval != os.EX_OK:
6922                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
6923                                         return retval
6924
6925                         self._unmerge_pkgfiles(pkgfiles)
6926
6927                         if myebuildpath:
6928                                 retval = doebuild(myebuildpath, "postrm", self.myroot,
6929                                          self.settings, use_cache=0, tree="vartree",
6930                                          mydbapi=self.vartree.dbapi, vartree=self.vartree)
6931
6932                                 # process logs created during pre/postrm
6933                                 elog_process(self.mycpv, self.settings)
6934
6935                                 # XXX: Decide how to handle failures here.
6936                                 if retval != os.EX_OK:
6937                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
6938                                         return retval
6939                                 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
6940                                         tree="vartree", mydbapi=self.vartree.dbapi,
6941                                         vartree=self.vartree)
6942
6943                 finally:
6944                         if builddir_lock:
6945                                 portage_locks.unlockdir(builddir_lock)
6946                         try:
6947                                 if myebuildpath and not catdir_lock:
6948                                         # Lock catdir for removal if empty.
6949                                         catdir_lock = portage_locks.lockdir(catdir)
6950                         finally:
6951                                 if catdir_lock:
6952                                         try:
6953                                                 os.rmdir(catdir)
6954                                         except OSError, e:
6955                                                 if e.errno not in (errno.ENOENT,
6956                                                         errno.ENOTEMPTY, errno.EEXIST):
6957                                                         raise
6958                                                 del e
6959                                         portage_locks.unlockdir(catdir_lock)
6960                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
6961                         contents=contents)
6962                 return os.EX_OK
6963
6964         def _unmerge_pkgfiles(self, pkgfiles):
6965                 """
6966                 
6967                 Unmerges the contents of a package from the liveFS
6968                 Removes the VDB entry for self
6969                 
6970                 @param pkgfiles: typically self.getcontents()
6971                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
6972                 @rtype: None
6973                 """
6974                 global dircache
6975                 dircache={}
6976
6977                 if not pkgfiles:
6978                         writemsg_stdout("No package files given... Grabbing a set.\n")
6979                         pkgfiles=self.getcontents()
6980
6981                 if pkgfiles:
6982                         mykeys=pkgfiles.keys()
6983                         mykeys.sort()
6984                         mykeys.reverse()
6985
6986                         #process symlinks second-to-last, directories last.
6987                         mydirs=[]
6988                         modprotect="/lib/modules/"
6989                         for objkey in mykeys:
6990                                 obj = normalize_path(objkey)
6991                                 if obj[:2]=="//":
6992                                         obj=obj[1:]
6993                                 statobj = None
6994                                 try:
6995                                         statobj = os.stat(obj)
6996                                 except OSError:
6997                                         pass
6998                                 lstatobj = None
6999                                 try:
7000                                         lstatobj = os.lstat(obj)
7001                                 except (OSError, AttributeError):
7002                                         pass
7003                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
7004                                 if statobj is None:
7005                                         if not islink:
7006                                                 #we skip this if we're dealing with a symlink
7007                                                 #because os.stat() will operate on the
7008                                                 #link target rather than the link itself.
7009                                                 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
7010                                                 continue
7011                                 # next line includes a tweak to protect modules from being unmerged,
7012                                 # but we don't protect modules from being overwritten if they are
7013                                 # upgraded. We effectively only want one half of the config protection
7014                                 # functionality for /lib/modules. For portage-ng both capabilities
7015                                 # should be able to be independently specified.
7016                                 if obj.startswith(modprotect):
7017                                         writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
7018                                         continue
7019
7020                                 lmtime=str(lstatobj[stat.ST_MTIME])
7021                                 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
7022                                         writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
7023                                         continue
7024
7025                                 if pkgfiles[objkey][0]=="dir":
7026                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
7027                                                 writemsg_stdout("--- !dir   %s %s\n" % ("dir", obj))
7028                                                 continue
7029                                         mydirs.append(obj)
7030                                 elif pkgfiles[objkey][0]=="sym":
7031                                         if not islink:
7032                                                 writemsg_stdout("--- !sym   %s %s\n" % ("sym", obj))
7033                                                 continue
7034                                         try:
7035                                                 os.unlink(obj)
7036                                                 writemsg_stdout("<<<        %s %s\n" % ("sym",obj))
7037                                         except (OSError,IOError),e:
7038                                                 writemsg_stdout("!!!        %s %s\n" % ("sym",obj))
7039                                 elif pkgfiles[objkey][0]=="obj":
7040                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
7041                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7042                                                 continue
7043                                         mymd5 = None
7044                                         try:
7045                                                 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
7046                                         except portage_exception.FileNotFound, e:
7047                                                 # the file has disappeared between now and our stat call
7048                                                 writemsg_stdout("--- !obj   %s %s\n" % ("obj", obj))
7049                                                 continue
7050
7051                                         # string.lower is needed because db entries used to be in upper-case.  The
7052                                         # string.lower allows for backwards compatibility.
7053                                         if mymd5 != pkgfiles[objkey][2].lower():
7054                                                 writemsg_stdout("--- !md5   %s %s\n" % ("obj", obj))
7055                                                 continue
7056                                         try:
7057                                                 os.unlink(obj)
7058                                         except (OSError,IOError),e:
7059                                                 pass
7060                                         writemsg_stdout("<<<        %s %s\n" % ("obj",obj))
7061                                 elif pkgfiles[objkey][0]=="fif":
7062                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
7063                                                 writemsg_stdout("--- !fif   %s %s\n" % ("fif", obj))
7064                                                 continue
7065                                         writemsg_stdout("---        %s %s\n" % ("fif",obj))
7066                                 elif pkgfiles[objkey][0]=="dev":
7067                                         writemsg_stdout("---        %s %s\n" % ("dev",obj))
7068
7069                         mydirs.sort()
7070                         mydirs.reverse()
7071
7072                         for obj in mydirs:
7073                                 try:
7074                                         os.rmdir(obj)
7075                                         writemsg_stdout("<<<        %s %s\n" % ("dir",obj))
7076                                 except (OSError, IOError):
7077                                         writemsg_stdout("--- !empty dir %s\n" % obj)
7078
7079                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
7080                 self.vartree.zap(self.mycpv)
7081
7082         def isowner(self,filename,destroot):
7083                 """ 
7084                 Check if filename is a new file or belongs to this package
7085                 (for this or a previous version)
7086                 
7087                 @param filename:
7088                 @type filename:
7089                 @param destroot:
7090                 @type destroot:
7091                 @rtype: Boolean
7092                 @returns:
7093                 1. True if this package owns the file.
7094                 2. False if this package does not own the file.
7095                 """
7096                 destfile = normalize_path(
7097                         os.path.join(destroot, filename.lstrip(os.path.sep)))
7098                 try:
7099                         mylstat = os.lstat(destfile)
7100                 except (OSError, IOError):
7101                         return True
7102
7103                 pkgfiles = self.getcontents()
7104                 if pkgfiles and filename in pkgfiles:
7105                         return True
7106                 if pkgfiles:
7107                         if self._contents_inodes is None:
7108                                 self._contents_inodes = set()
7109                                 for x in pkgfiles:
7110                                         try:
7111                                                 lstat = os.lstat(x)
7112                                                 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
7113                                         except OSError:
7114                                                 pass
7115                         if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
7116                                  return True
7117
7118                 return False
7119
7120         def isprotected(self, filename):
7121                 """In cases where an installed package in the same slot owns a
7122                 protected file that will be merged, bump the mtime on the installed
7123                 file in order to ensure that it isn't unmerged."""
7124                 if not self._config_protect.isprotected(filename):
7125                         return False
7126                 if self._installed_instance is None:
7127                         return True
7128                 mydata = self._installed_instance.getcontents().get(filename, None)
7129                 if mydata is None:
7130                         return True
7131
7132                 # Bump the mtime in order to ensure that the old config file doesn't
7133                 # get unmerged.  The user will have an opportunity to merge the new
7134                 # config with the old one.
7135                 try:
7136                         os.utime(filename, None)
7137                 except OSError, e:
7138                         if e.errno != errno.ENOENT:
7139                                 raise
7140                         del e
7141                         # The file has disappeared, so it's not protected.
7142                         return False
7143                 return True
7144
7145         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
7146                 mydbapi=None, prev_mtimes=None):
7147                 """
7148                 
7149                 This function does the following:
7150                 
7151                 Collision Protection.
7152                 calls doebuild(mydo=pkg_preinst)
7153                 Merges the package to the livefs
7154                 unmerges old version (if required)
7155                 calls doebuild(mydo=pkg_postinst)
7156                 calls env_update
7157                 
7158                 @param srcroot: Typically this is ${D}
7159                 @type srcroot: String (Path)
7160                 @param destroot: Path to merge to (usually ${ROOT})
7161                 @type destroot: String (Path)
7162                 @param inforoot: root of the vardb entry ?
7163                 @type inforoot: String (Path)
7164                 @param myebuild: path to the ebuild that we are processing
7165                 @type myebuild: String (Path)
7166                 @param mydbapi: dbapi which is handed to doebuild.
7167                 @type mydbapi: portdbapi instance
7168                 @param prev_mtimes: { Filename:mtime } mapping for env_update
7169                 @type prev_mtimes: Dictionary
7170                 @rtype: Boolean
7171                 @returns:
7172                 1. 0 on success
7173                 2. 1 on failure
7174                 
7175                 secondhand is a list of symlinks that have been skipped due to their target
7176                 not existing; we will merge these symlinks at a later time.
7177                 """
7178                 if not os.path.isdir(srcroot):
7179                         writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7180                         noiselevel=-1)
7181                         return 1
7182
7183                 if not os.path.exists(self.dbcatdir):
7184                         os.makedirs(self.dbcatdir)
7185
7186                 otherversions=[]
7187                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7188                         otherversions.append(v.split("/")[1])
7189
7190                 slot_matches = self.vartree.dbapi.match(
7191                         "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7192                 if slot_matches:
7193                         # Used by self.isprotected().
7194                         self._installed_instance = dblink(self.cat,
7195                                 catsplit(slot_matches[0])[1], destroot, self.settings,
7196                                 vartree=self.vartree)
7197
7198                 # check for package collisions
7199                 if "collision-protect" in self.settings.features:
7200                         collision_ignore = set([normalize_path(myignore) for myignore in \
7201                                 self.settings.get("COLLISION_IGNORE", "").split()])
7202                         myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7203
7204                         # the linkcheck only works if we are in srcroot
7205                         mycwd = getcwd()
7206                         os.chdir(srcroot)
7207                         mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7208                         myfilelist.extend(mysymlinks)
7209                         mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7210                         del mysymlinks
7211
7212
7213                         stopmerge=False
7214                         starttime=time.time()
7215                         i=0
7216
7217                         otherpkg=[]
7218                         mypkglist=[]
7219
7220                         if self.pkg in otherversions:
7221                                 otherversions.remove(self.pkg)  # we already checked this package
7222
7223                         myslot = self.settings["SLOT"]
7224                         for v in otherversions:
7225                                 # only allow versions with same slot to overwrite files
7226                                 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7227                                         mypkglist.append(
7228                                                 dblink(self.cat, v, destroot, self.settings,
7229                                                         vartree=self.vartree))
7230
7231                         collisions = []
7232
7233                         print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7234                         for f in myfilelist:
7235                                 nocheck = False
7236                                 # listdir isn't intelligent enough to exclude symlinked dirs,
7237                                 # so we have to do it ourself
7238                                 for s in mysymlinked_directories:
7239                                         if f.startswith(s):
7240                                                 nocheck = True
7241                                                 break
7242                                 if nocheck:
7243                                         continue
7244                                 i=i+1
7245                                 if i % 1000 == 0:
7246                                         print str(i)+" files checked ..."
7247                                 if f[0] != "/":
7248                                         f="/"+f
7249                                 isowned = False
7250                                 for ver in [self]+mypkglist:
7251                                         if (ver.isowner(f, destroot) or ver.isprotected(f)):
7252                                                 isowned = True
7253                                                 break
7254                                 if not isowned:
7255                                         collisions.append(f)
7256                                         print "existing file "+f+" is not owned by this package"
7257                                         stopmerge=True
7258                                         if collision_ignore:
7259                                                 if f in collision_ignore:
7260                                                         stopmerge = False
7261                                                 else:
7262                                                         for myignore in collision_ignore:
7263                                                                 if f.startswith(myignore + os.path.sep):
7264                                                                         stopmerge = False
7265                                                                         break
7266                         #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7267                         if stopmerge:
7268                                 print red("*")+" This package is blocked because it wants to overwrite"
7269                                 print red("*")+" files belonging to other packages (see messages above)."
7270                                 print red("*")+" If you have no clue what this is all about report it "
7271                                 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7272                                 print
7273                                 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7274                                 print
7275                                 print
7276                                 print "Searching all installed packages for file collisions..."
7277                                 print "Press Ctrl-C to Stop"
7278                                 print
7279                                 """ Note: The isowner calls result in a stat call for *every*
7280                                 single installed file, since the inode numbers are used to work
7281                                 around the problem of ambiguous paths caused by symlinked files
7282                                 and/or directories.  Though it is slow, it is as accurate as
7283                                 possible."""
7284                                 found_owner = False
7285                                 for cpv in self.vartree.dbapi.cpv_all():
7286                                         cat, pkg = catsplit(cpv)
7287                                         mylink = dblink(cat, pkg, destroot, self.settings,
7288                                                 vartree=self.vartree)
7289                                         mycollisions = []
7290                                         for f in collisions:
7291                                                 if mylink.isowner(f, destroot):
7292                                                         mycollisions.append(f)
7293                                         if mycollisions:
7294                                                 found_owner = True
7295                                                 print " * %s:" % cpv
7296                                                 print
7297                                                 for f in mycollisions:
7298                                                         print "     '%s'" % \
7299                                                                 os.path.join(destroot, f.lstrip(os.path.sep))
7300                                                 print
7301                                 if not found_owner:
7302                                         print "None of the installed packages claim the above file(s)."
7303                                         print
7304                                 sys.exit(1)
7305                         try:
7306                                 os.chdir(mycwd)
7307                         except OSError:
7308                                 pass
7309
7310                 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7311                         """ The merge process may move files out of the image directory,
7312                         which causes invalidation of the .installed flag."""
7313                         try:
7314                                 os.unlink(os.path.join(
7315                                         os.path.dirname(normalize_path(srcroot)), ".installed"))
7316                         except OSError, e:
7317                                 if e.errno != errno.ENOENT:
7318                                         raise
7319                                 del e
7320
7321                 # get old contents info for later unmerging
7322                 oldcontents = self.getcontents()
7323
7324                 self.dbdir = self.dbtmpdir
7325                 self.delete()
7326                 if not os.path.exists(self.dbtmpdir):
7327                         os.makedirs(self.dbtmpdir)
7328
7329                 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7330
7331                 # run preinst script
7332                 if myebuild is None:
7333                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7334                 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7335                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7336                         vartree=self.vartree)
7337
7338                 # XXX: Decide how to handle failures here.
7339                 if a != os.EX_OK:
7340                         writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7341                         return a
7342
7343                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7344                 for x in listdir(inforoot):
7345                         self.copyfile(inforoot+"/"+x)
7346
7347                 # get current counter value (counter_tick also takes care of incrementing it)
7348                 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7349                 # XXX bis: leads to some invalidentry() call through cp_all().
7350                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7351                 # write local package counter for recording
7352                 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7353                 lcfile.write(str(counter))
7354                 lcfile.close()
7355
7356                 # open CONTENTS file (possibly overwriting old one) for recording
7357                 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7358
7359                 self.updateprotect()
7360
7361                 #if we have a file containing previously-merged config file md5sums, grab it.
7362                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7363                 cfgfiledict = grabdict(conf_mem_file)
7364                 if self.settings.has_key("NOCONFMEM"):
7365                         cfgfiledict["IGNORE"]=1
7366                 else:
7367                         cfgfiledict["IGNORE"]=0
7368
7369                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7370                 mymtime    = long(time.time())
7371                 prevmask   = os.umask(0)
7372                 secondhand = []
7373
7374                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7375                 # "second hand" of symlinks to merge later
7376                 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7377                         return 1
7378
7379                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
7380                 # broken symlinks.  We'll merge them too.
7381                 lastlen=0
7382                 while len(secondhand) and len(secondhand)!=lastlen:
7383                         # clear the thirdhand.  Anything from our second hand that
7384                         # couldn't get merged will be added to thirdhand.
7385
7386                         thirdhand=[]
7387                         self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7388
7389                         #swap hands
7390                         lastlen=len(secondhand)
7391
7392                         # our thirdhand now becomes our secondhand.  It's ok to throw
7393                         # away secondhand since thirdhand contains all the stuff that
7394                         # couldn't be merged.
7395                         secondhand = thirdhand
7396
7397                 if len(secondhand):
7398                         # force merge of remaining symlinks (broken or circular; oh well)
7399                         self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7400
7401                 #restore umask
7402                 os.umask(prevmask)
7403
7404                 #if we opened it, close it
7405                 outfile.flush()
7406                 outfile.close()
7407
7408                 if os.path.exists(self.dbpkgdir):
7409                         writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7410                         self.dbdir = self.dbpkgdir
7411                         self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7412                         self.dbdir = self.dbtmpdir
7413                         writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7414
7415                 # We hold both directory locks.
7416                 self.dbdir = self.dbpkgdir
7417                 self.delete()
7418                 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7419                 contents = self.getcontents()
7420
7421                 #write out our collection of md5sums
7422                 if cfgfiledict.has_key("IGNORE"):
7423                         del cfgfiledict["IGNORE"]
7424
7425                 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7426                 if not os.path.exists(my_private_path):
7427                         os.makedirs(my_private_path)
7428                         os.chown(my_private_path, os.getuid(), portage_gid)
7429                         os.chmod(my_private_path, 02770)
7430
7431                 writedict(cfgfiledict, conf_mem_file)
7432                 del conf_mem_file
7433
7434                 #do postinst script
7435                 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7436                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7437
7438                 # XXX: Decide how to handle failures here.
7439                 if a != os.EX_OK:
7440                         writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7441                         return a
7442
7443                 downgrade = False
7444                 for v in otherversions:
7445                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7446                                 downgrade = True
7447
7448                 #update environment settings, library paths. DO NOT change symlinks.
7449                 env_update(makelinks=(not downgrade),
7450                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7451                         contents=contents)
7452                 #dircache may break autoclean because it remembers the -MERGING-pkg file
7453                 global dircache
7454                 if dircache.has_key(self.dbcatdir):
7455                         del dircache[self.dbcatdir]
7456                 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7457
7458                 # Process ebuild logfiles
7459                 elog_process(self.mycpv, self.settings)
7460                 if "noclean" not in self.settings.features:
7461                         doebuild(myebuild, "clean", destroot, self.settings,
7462                                 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7463                 return os.EX_OK
7464
7465         def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7466                 """
7467                 
7468                 This function handles actual merging of the package contents to the livefs.
7469                 It also handles config protection.
7470                 
7471                 @param srcroot: Where are we copying files from (usually ${D})
7472                 @type srcroot: String (Path)
7473                 @param destroot: Typically ${ROOT}
7474                 @type destroot: String (Path)
7475                 @param outfile: File to log operations to
7476                 @type outfile: File Object
7477                 @param secondhand: A set of items to merge in pass two (usually
7478                 or symlinks that point to non-existing files that may get merged later)
7479                 @type secondhand: List
7480                 @param stufftomerge: Either a diretory to merge, or a list of items.
7481                 @type stufftomerge: String or List
7482                 @param cfgfiledict: { File:mtime } mapping for config_protected files
7483                 @type cfgfiledict: Dictionary
7484                 @param thismtime: The current time (typically long(time.time())
7485                 @type thismtime: Long
7486                 @rtype: None or Boolean
7487                 @returns:
7488                 1. True on failure
7489                 2. None otherwise
7490                 
7491                 """
7492                 from os.path import sep, join
7493                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7494                 destroot = normalize_path(destroot).rstrip(sep) + sep
7495                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
7496                 if type(stufftomerge)==types.StringType:
7497                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
7498                         mergelist = listdir(join(srcroot, stufftomerge))
7499                         offset=stufftomerge
7500                 else:
7501                         mergelist=stufftomerge
7502                         offset=""
7503                 for x in mergelist:
7504                         mysrc = join(srcroot, offset, x)
7505                         mydest = join(destroot, offset, x)
7506                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7507                         myrealdest = join(sep, offset, x)
7508                         # stat file once, test using S_* macros many times (faster that way)
7509                         try:
7510                                 mystat=os.lstat(mysrc)
7511                         except OSError, e:
7512                                 writemsg("\n")
7513                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7514                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
7515                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
7516                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7517                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
7518                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
7519                                 sys.exit(1)
7520                         except Exception, e:
7521                                 writemsg("\n")
7522                                 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7523                                 writemsg(red("!!!        A stat call returned the following error for the following file:"))
7524                                 writemsg(    "!!!        Please ensure that your filesystem is intact, otherwise report\n")
7525                                 writemsg(    "!!!        this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7526                                 writemsg(    "!!!        File:  "+str(mysrc)+"\n", noiselevel=-1)
7527                                 writemsg(    "!!!        Error: "+str(e)+"\n", noiselevel=-1)
7528                                 sys.exit(1)
7529
7530
7531                         mymode=mystat[stat.ST_MODE]
7532                         # handy variables; mydest is the target object on the live filesystems;
7533                         # mysrc is the source object in the temporary install dir
7534                         try:
7535                                 mydmode = os.lstat(mydest).st_mode
7536                         except OSError, e:
7537                                 if e.errno != errno.ENOENT:
7538                                         raise
7539                                 del e
7540                                 #dest file doesn't exist
7541                                 mydmode=None
7542
7543                         if stat.S_ISLNK(mymode):
7544                                 # we are merging a symbolic link
7545                                 myabsto=abssymlink(mysrc)
7546                                 if myabsto.startswith(srcroot):
7547                                         myabsto=myabsto[len(srcroot):]
7548                                 myabsto = myabsto.lstrip(sep)
7549                                 myto=os.readlink(mysrc)
7550                                 if self.settings and self.settings["D"]:
7551                                         if myto.startswith(self.settings["D"]):
7552                                                 myto=myto[len(self.settings["D"]):]
7553                                 # myrealto contains the path of the real file to which this symlink points.
7554                                 # we can simply test for existence of this file to see if the target has been merged yet
7555                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
7556                                 if mydmode!=None:
7557                                         #destination exists
7558                                         if not stat.S_ISLNK(mydmode):
7559                                                 if stat.S_ISDIR(mydmode):
7560                                                         # directory in the way: we can't merge a symlink over a directory
7561                                                         # we won't merge this, continue with next file...
7562                                                         continue
7563
7564                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7565                                                         # Kill file blocking installation of symlink to dir #71787
7566                                                         pass
7567                                                 elif self.isprotected(mydest):
7568                                                         # Use md5 of the target in ${D} if it exists...
7569                                                         try:
7570                                                                 newmd5 = portage_checksum.perform_md5(
7571                                                                         join(srcroot, myabsto))
7572                                                         except portage_exception.FileNotFound:
7573                                                                 # Maybe the target is merged already.
7574                                                                 try:
7575                                                                         newmd5 = portage_checksum.perform_md5(
7576                                                                                 myrealto)
7577                                                                 except portage_exception.FileNotFound:
7578                                                                         newmd5 = None
7579                                                         mydest = new_protect_filename(mydest,newmd5=newmd5)
7580
7581                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7582                                 if (secondhand!=None) and (not os.path.exists(myrealto)):
7583                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
7584                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
7585                                         # it later.
7586                                         secondhand.append(mysrc[len(srcroot):])
7587                                         continue
7588                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7589                                 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7590                                 if mymtime!=None:
7591                                         writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7592                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7593                                 else:
7594                                         print "!!! Failed to move file."
7595                                         print "!!!",mydest,"->",myto
7596                                         sys.exit(1)
7597                         elif stat.S_ISDIR(mymode):
7598                                 # we are merging a directory
7599                                 if mydmode!=None:
7600                                         # destination exists
7601
7602                                         if bsd_chflags:
7603                                                 # Save then clear flags on dest.
7604                                                 dflags=bsd_chflags.lgetflags(mydest)
7605                                                 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7606                                                         writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7607                                                                 noiselevel=-1)
7608
7609                                         if not os.access(mydest, os.W_OK):
7610                                                 pkgstuff = pkgsplit(self.pkg)
7611                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7612                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7613                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
7614                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7615                                                 writemsg("!!! And finish by running this: env-update\n\n")
7616                                                 return 1
7617
7618                                         if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7619                                                 # a symlink to an existing directory will work for us; keep it:
7620                                                 writemsg_stdout("--- %s/\n" % mydest)
7621                                                 if bsd_chflags:
7622                                                         bsd_chflags.lchflags(mydest, dflags)
7623                                         else:
7624                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
7625                                                 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7626                                                         sys.exit(1)
7627                                                 print "bak",mydest,mydest+".backup"
7628                                                 #now create our directory
7629                                                 if self.settings.selinux_enabled():
7630                                                         sid = selinux.get_sid(mysrc)
7631                                                         selinux.secure_mkdir(mydest,sid)
7632                                                 else:
7633                                                         os.mkdir(mydest)
7634                                                 if bsd_chflags:
7635                                                         bsd_chflags.lchflags(mydest, dflags)
7636                                                 os.chmod(mydest,mystat[0])
7637                                                 os.chown(mydest,mystat[4],mystat[5])
7638                                                 writemsg_stdout(">>> %s/\n" % mydest)
7639                                 else:
7640                                         #destination doesn't exist
7641                                         if self.settings.selinux_enabled():
7642                                                 sid = selinux.get_sid(mysrc)
7643                                                 selinux.secure_mkdir(mydest,sid)
7644                                         else:
7645                                                 os.mkdir(mydest)
7646                                         os.chmod(mydest,mystat[0])
7647                                         os.chown(mydest,mystat[4],mystat[5])
7648                                         writemsg_stdout(">>> %s/\n" % mydest)
7649                                 outfile.write("dir "+myrealdest+"\n")
7650                                 # recurse and merge this directory
7651                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
7652                                         join(offset, x), cfgfiledict, thismtime):
7653                                         return 1
7654                         elif stat.S_ISREG(mymode):
7655                                 # we are merging a regular file
7656                                 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7657                                 # calculate config file protection stuff
7658                                 mydestdir=os.path.dirname(mydest)
7659                                 moveme=1
7660                                 zing="!!!"
7661                                 if mydmode!=None:
7662                                         # destination file exists
7663                                         if stat.S_ISDIR(mydmode):
7664                                                 # install of destination is blocked by an existing directory with the same name
7665                                                 moveme=0
7666                                                 writemsg_stdout("!!! %s\n" % mydest)
7667                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7668                                                 cfgprot=0
7669                                                 # install of destination is blocked by an existing regular file,
7670                                                 # or by a symlink to an existing regular file;
7671                                                 # now, config file management may come into play.
7672                                                 # we only need to tweak mydest if cfg file management is in play.
7673                                                 if self.isprotected(mydest):
7674                                                         # we have a protection path; enable config file management.
7675                                                         destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7676                                                         if mymd5==destmd5:
7677                                                                 #file already in place; simply update mtimes of destination
7678                                                                 os.utime(mydest,(thismtime,thismtime))
7679                                                                 zing="---"
7680                                                                 moveme=0
7681                                                         else:
7682                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7683                                                                         """ An identical update has previously been
7684                                                                         merged.  Skip it unless the user has chosen
7685                                                                         --noconfmem."""
7686                                                                         zing = "-o-"
7687                                                                         moveme = cfgfiledict["IGNORE"]
7688                                                                         cfgprot = cfgfiledict["IGNORE"]
7689                                                                 else:
7690                                                                         moveme = 1
7691                                                                         cfgprot = 1
7692                                                         if moveme:
7693                                                                 # Merging a new file, so update confmem.
7694                                                                 cfgfiledict[myrealdest] = [mymd5]
7695                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7696                                                                 """A previously remembered update has been
7697                                                                 accepted, so it is removed from confmem."""
7698                                                                 del cfgfiledict[myrealdest]
7699                                                 if cfgprot:
7700                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
7701
7702                                 # whether config protection or not, we merge the new file the
7703                                 # same way.  Unless moveme=0 (blocking directory)
7704                                 if moveme:
7705                                         mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7706                                         if mymtime is None:
7707                                                 sys.exit(1)
7708                                         zing=">>>"
7709                                 else:
7710                                         mymtime=thismtime
7711                                         # We need to touch the destination so that on --update the
7712                                         # old package won't yank the file with it. (non-cfgprot related)
7713                                         os.utime(mydest,(thismtime,thismtime))
7714                                         zing="---"
7715                                 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7716
7717                                         # XXX kludge, can be killed when portage stops relying on
7718                                         # md5+mtime, and uses refcounts
7719                                         # alright, we've fooled w/ mtime on the file; this pisses off static archives
7720                                         # basically internal mtime != file's mtime, so the linker (falsely) thinks
7721                                         # the archive is stale, and needs to have it's toc rebuilt.
7722
7723                                         myf = open(mydest, "r+")
7724
7725                                         # ar mtime field is digits padded with spaces, 12 bytes.
7726                                         lms=str(thismtime+5).ljust(12)
7727                                         myf.seek(0)
7728                                         magic=myf.read(8)
7729                                         if magic != "!<arch>\n":
7730                                                 # not an archive (dolib.a from portage.py makes it here fex)
7731                                                 myf.close()
7732                                         else:
7733                                                 st = os.stat(mydest)
7734                                                 while myf.tell() < st.st_size - 12:
7735                                                         # skip object name
7736                                                         myf.seek(16,1)
7737
7738                                                         # update mtime
7739                                                         myf.write(lms)
7740
7741                                                         # skip uid/gid/mperm
7742                                                         myf.seek(20,1)
7743
7744                                                         # read the archive member's size
7745                                                         x=long(myf.read(10))
7746
7747                                                         # skip the trailing newlines, and add the potential
7748                                                         # extra padding byte if it's not an even size
7749                                                         myf.seek(x + 2 + (x % 2),1)
7750
7751                                                 # and now we're at the end. yay.
7752                                                 myf.close()
7753                                                 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7754                                         os.utime(mydest,(thismtime,thismtime))
7755
7756                                 if mymtime!=None:
7757                                         zing=">>>"
7758                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7759                                 writemsg_stdout("%s %s\n" % (zing,mydest))
7760                         else:
7761                                 # we are merging a fifo or device node
7762                                 zing="!!!"
7763                                 if mydmode is None:
7764                                         # destination doesn't exist
7765                                         if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7766                                                 zing=">>>"
7767                                         else:
7768                                                 sys.exit(1)
7769                                 if stat.S_ISFIFO(mymode):
7770                                         outfile.write("fif %s\n" % myrealdest)
7771                                 else:
7772                                         outfile.write("dev %s\n" % myrealdest)
7773                                 writemsg_stdout(zing+" "+mydest+"\n")
7774
7775         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7776                 mydbapi=None, prev_mtimes=None):
7777                 try:
7778                         self.lockdb()
7779                         return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7780                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7781                 finally:
7782                         self.unlockdb()
7783
7784         def getstring(self,name):
7785                 "returns contents of a file with whitespace converted to spaces"
7786                 if not os.path.exists(self.dbdir+"/"+name):
7787                         return ""
7788                 myfile=open(self.dbdir+"/"+name,"r")
7789                 mydata=myfile.read().split()
7790                 myfile.close()
7791                 return " ".join(mydata)
7792
7793         def copyfile(self,fname):
7794                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7795
7796         def getfile(self,fname):
7797                 if not os.path.exists(self.dbdir+"/"+fname):
7798                         return ""
7799                 myfile=open(self.dbdir+"/"+fname,"r")
7800                 mydata=myfile.read()
7801                 myfile.close()
7802                 return mydata
7803
7804         def setfile(self,fname,data):
7805                 write_atomic(os.path.join(self.dbdir, fname), data)
7806
7807         def getelements(self,ename):
7808                 if not os.path.exists(self.dbdir+"/"+ename):
7809                         return []
7810                 myelement=open(self.dbdir+"/"+ename,"r")
7811                 mylines=myelement.readlines()
7812                 myreturn=[]
7813                 for x in mylines:
7814                         for y in x[:-1].split():
7815                                 myreturn.append(y)
7816                 myelement.close()
7817                 return myreturn
7818
7819         def setelements(self,mylist,ename):
7820                 myelement=open(self.dbdir+"/"+ename,"w")
7821                 for x in mylist:
7822                         myelement.write(x+"\n")
7823                 myelement.close()
7824
7825         def isregular(self):
7826                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
7827                 return os.path.exists(self.dbdir+"/CATEGORY")
7828
7829 class FetchlistDict(UserDict.DictMixin):
7830         """This provide a mapping interface to retrieve fetch lists.  It's used
7831         to allow portage_manifest.Manifest to access fetch lists via a standard
7832         mapping interface rather than use the dbapi directly."""
7833         def __init__(self, pkgdir, settings, mydbapi):
7834                 """pkgdir is a directory containing ebuilds and settings is passed into
7835                 portdbapi.getfetchlist for __getitem__ calls."""
7836                 self.pkgdir = pkgdir
7837                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7838                 self.settings = settings
7839                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7840                 self.portdb = mydbapi
7841         def __getitem__(self, pkg_key):
7842                 """Returns the complete fetch list for a given package."""
7843                 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7844                         all=True, mytree=self.mytree)[1]
7845         def has_key(self, pkg_key):
7846                 """Returns true if the given package exists within pkgdir."""
7847                 return pkg_key in self.keys()
7848         def keys(self):
7849                 """Returns keys for all packages within pkgdir"""
7850                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7851
7852 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7853         """will merge a .tbz2 file, returning a list of runtime dependencies
7854                 that must be satisfied, or None if there was a merge error.     This
7855                 code assumes the package exists."""
7856         global db
7857         if mydbapi is None:
7858                 mydbapi = db[myroot]["bintree"].dbapi
7859         if vartree is None:
7860                 vartree = db[myroot]["vartree"]
7861         if mytbz2[-5:]!=".tbz2":
7862                 print "!!! Not a .tbz2 file"
7863                 return 1
7864
7865         tbz2_lock = None
7866         builddir_lock = None
7867         catdir_lock = None
7868         try:
7869                 """ Don't lock the tbz2 file because the filesytem could be readonly or
7870                 shared by a cluster."""
7871                 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7872
7873                 mypkg = os.path.basename(mytbz2)[:-5]
7874                 xptbz2 = xpak.tbz2(mytbz2)
7875                 mycat = xptbz2.getfile("CATEGORY")
7876                 if not mycat:
7877                         writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7878                                 noiselevel=-1)
7879                         return 1
7880                 mycat = mycat.strip()
7881
7882                 # These are the same directories that would be used at build time.
7883                 builddir = os.path.join(
7884                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7885                 catdir = os.path.dirname(builddir)
7886                 pkgloc = os.path.join(builddir, "image")
7887                 infloc = os.path.join(builddir, "build-info")
7888                 myebuild = os.path.join(
7889                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7890                 portage_util.ensure_dirs(os.path.dirname(catdir),
7891                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7892                 catdir_lock = portage_locks.lockdir(catdir)
7893                 portage_util.ensure_dirs(catdir,
7894                         uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7895                 builddir_lock = portage_locks.lockdir(builddir)
7896                 try:
7897                         portage_locks.unlockdir(catdir_lock)
7898                 finally:
7899                         catdir_lock = None
7900                 try:
7901                         shutil.rmtree(builddir)
7902                 except (IOError, OSError), e:
7903                         if e.errno != errno.ENOENT:
7904                                 raise
7905                         del e
7906                 for mydir in (builddir, pkgloc, infloc):
7907                         portage_util.ensure_dirs(mydir, uid=portage_uid,
7908                                 gid=portage_gid, mode=0755)
7909                 writemsg_stdout(">>> Extracting info\n")
7910                 xptbz2.unpackinfo(infloc)
7911                 mysettings.load_infodir(infloc)
7912                 # Store the md5sum in the vdb.
7913                 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7914                 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
7915                 fp.close()
7916
7917                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7918
7919                 # Eventually we'd like to pass in the saved ebuild env here.
7920                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7921                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
7922                 if retval != os.EX_OK:
7923                         writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7924                         return retval
7925
7926                 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7927                 retval = portage_exec.spawn_bash(
7928                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7929                         env=mysettings.environ())
7930                 if retval != os.EX_OK:
7931                         writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7932                         return retval
7933                 #portage_locks.unlockfile(tbz2_lock)
7934                 #tbz2_lock = None
7935
7936                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7937                         treetype="bintree")
7938                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7939                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7940                 return retval
7941         finally:
7942                 if tbz2_lock:
7943                         portage_locks.unlockfile(tbz2_lock)
7944                 if builddir_lock:
7945                         try:
7946                                 shutil.rmtree(builddir)
7947                         except (IOError, OSError), e:
7948                                 if e.errno != errno.ENOENT:
7949                                         raise
7950                                 del e
7951                         portage_locks.unlockdir(builddir_lock)
7952                         try:
7953                                 if not catdir_lock:
7954                                         # Lock catdir for removal if empty.
7955                                         catdir_lock = portage_locks.lockdir(catdir)
7956                         finally:
7957                                 if catdir_lock:
7958                                         try:
7959                                                 os.rmdir(catdir)
7960                                         except OSError, e:
7961                                                 if e.errno not in (errno.ENOENT,
7962                                                         errno.ENOTEMPTY, errno.EEXIST):
7963                                                         raise
7964                                                 del e
7965                                         portage_locks.unlockdir(catdir_lock)
7966
7967 def deprecated_profile_check():
7968         if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7969                 return False
7970         deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7971         dcontent = deprecatedfile.readlines()
7972         deprecatedfile.close()
7973         newprofile = dcontent[0]
7974         writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7975                 noiselevel=-1)
7976         writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7977                 noiselevel=-1)
7978         writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7979         if len(dcontent) > 1:
7980                 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7981                 for myline in dcontent[1:]:
7982                         writemsg(myline, noiselevel=-1)
7983                 writemsg("\n\n", noiselevel=-1)
7984         return True
7985
7986 # gets virtual package settings
7987 def getvirtuals(myroot):
7988         global settings
7989         writemsg("--- DEPRECATED call to getvirtual\n")
7990         return settings.getvirtuals(myroot)
7991
7992 def commit_mtimedb(mydict=None, filename=None):
7993         if mydict is None:
7994                 global mtimedb
7995                 if "mtimedb" not in globals() or mtimedb is None:
7996                         return
7997                 mtimedb.commit()
7998                 return
7999         if filename is None:
8000                 global mtimedbfile
8001                 filename = mtimedbfile
8002         mydict["version"] = VERSION
8003         d = {} # for full backward compat, pickle it as a plain dict object.
8004         d.update(mydict)
8005         try:
8006                 f = atomic_ofstream(filename)
8007                 cPickle.dump(d, f, -1)
8008                 f.close()
8009                 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
8010         except (IOError, OSError), e:
8011                 pass
8012
8013 def portageexit():
8014         global uid,portage_gid,portdb,db
8015         if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
8016                 close_portdbapi_caches()
8017                 commit_mtimedb()
8018
8019 atexit_register(portageexit)
8020
8021 def global_updates(mysettings, trees, prev_mtimes):
8022         """
8023         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8024
8025         @param mysettings: A config instance for ROOT="/".
8026         @type mysettings: config
8027         @param trees: A dictionary containing portage trees.
8028         @type trees: dict
8029         @param prev_mtimes: A dictionary containing mtimes of files located in
8030                 $PORTDIR/profiles/updates/.
8031         @type prev_mtimes: dict
8032         @rtype: None or List
8033         @return: None if no were no updates, otherwise a list of update commands
8034                 that have been performed.
8035         """
8036         # only do this if we're root and not running repoman/ebuild digest
8037         global secpass
8038         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8039                 return
8040         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8041
8042         try:
8043                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8044                         update_data = grab_updates(updpath)
8045                 else:
8046                         update_data = grab_updates(updpath, prev_mtimes)
8047         except portage_exception.DirectoryNotFound:
8048                 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
8049                 return
8050         myupd = None
8051         if len(update_data) > 0:
8052                 do_upgrade_packagesmessage = 0
8053                 myupd = []
8054                 timestamps = {}
8055                 for mykey, mystat, mycontent in update_data:
8056                         writemsg_stdout("\n\n")
8057                         writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
8058                         writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
8059                         writemsg_stdout("  "+bold(".")+"='update pass'  "+bold("*")+"='binary update'  "+bold("@")+"='/var/db move'\n"+"  "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
8060                         valid_updates, errors = parse_updates(mycontent)
8061                         myupd.extend(valid_updates)
8062                         writemsg_stdout(len(valid_updates) * "." + "\n")
8063                         if len(errors) == 0:
8064                                 # Update our internal mtime since we
8065                                 # processed all of our directives.
8066                                 timestamps[mykey] = long(mystat.st_mtime)
8067                         else:
8068                                 for msg in errors:
8069                                         writemsg("%s\n" % msg, noiselevel=-1)
8070
8071                 update_config_files("/",
8072                         mysettings.get("CONFIG_PROTECT","").split(),
8073                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
8074                         myupd)
8075
8076                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8077                         settings=mysettings)
8078                 for update_cmd in myupd:
8079                         if update_cmd[0] == "move":
8080                                 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
8081                                 trees["/"]["bintree"].move_ent(update_cmd)
8082                         elif update_cmd[0] == "slotmove":
8083                                 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
8084                                 trees["/"]["bintree"].move_slot_ent(update_cmd)
8085
8086                 # The above global updates proceed quickly, so they
8087                 # are considered a single mtimedb transaction.
8088                 if len(timestamps) > 0:
8089                         # We do not update the mtime in the mtimedb
8090                         # until after _all_ of the above updates have
8091                         # been processed because the mtimedb will
8092                         # automatically commit when killed by ctrl C.
8093                         for mykey, mtime in timestamps.iteritems():
8094                                 prev_mtimes[mykey] = mtime
8095
8096                 # We gotta do the brute force updates for these now.
8097                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8098                 "fixpackages" in mysettings.features:
8099                         trees["/"]["bintree"].update_ents(myupd)
8100                 else:
8101                         do_upgrade_packagesmessage = 1
8102
8103                 # Update progress above is indicated by characters written to stdout so
8104                 # we print a couple new lines here to separate the progress output from
8105                 # what follows.
8106                 print
8107                 print
8108
8109                 if do_upgrade_packagesmessage and \
8110                         listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
8111                         writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
8112                         writemsg_stdout("\n    tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
8113                         writemsg_stdout("\n")
8114         if myupd:
8115                 return myupd
8116
8117 #continue setting up other trees
8118
8119 class MtimeDB(dict):
8120         def __init__(self, filename):
8121                 dict.__init__(self)
8122                 self.filename = filename
8123                 self._load(filename)
8124
8125         def _load(self, filename):
8126                 try:
8127                         f = open(filename)
8128                         mypickle = cPickle.Unpickler(f)
8129                         mypickle.find_global = None
8130                         d = mypickle.load()
8131                         f.close()
8132                         del f
8133                 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
8134                         d = {}
8135
8136                 if "old" in d:
8137                         d["updates"] = d["old"]
8138                         del d["old"]
8139                 if "cur" in d:
8140                         del d["cur"]
8141
8142                 d.setdefault("starttime", 0)
8143                 d.setdefault("version", "")
8144                 for k in ("info", "ldpath", "updates"):
8145                         d.setdefault(k, {})
8146
8147                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8148                         "starttime", "updates", "version"))
8149
8150                 for k in d.keys():
8151                         if k not in mtimedbkeys:
8152                                 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
8153                                 del d[k]
8154                 self.update(d)
8155                 self._clean_data = copy.deepcopy(d)
8156
8157         def commit(self):
8158                 if not self.filename:
8159                         return
8160                 d = {}
8161                 d.update(self)
8162                 # Only commit if the internal state has changed.
8163                 if d != self._clean_data:
8164                         commit_mtimedb(mydict=d, filename=self.filename)
8165                         self._clean_data = copy.deepcopy(d)
8166
8167 def create_trees(config_root=None, target_root=None, trees=None):
8168         if trees is None:
8169                 trees = {}
8170         else:
8171                 # clean up any existing portdbapi instances
8172                 for myroot in trees:
8173                         portdb = trees[myroot]["porttree"].dbapi
8174                         portdb.close_caches()
8175                         portdbapi.portdbapi_instances.remove(portdb)
8176                         del trees[myroot]["porttree"], myroot, portdb
8177
8178         settings = config(config_root=config_root, target_root=target_root,
8179                 config_incrementals=portage_const.INCREMENTALS)
8180         settings.lock()
8181         settings.validate()
8182
8183         myroots = [(settings["ROOT"], settings)]
8184         if settings["ROOT"] != "/":
8185                 settings = config(config_root=None, target_root=None,
8186                         config_incrementals=portage_const.INCREMENTALS)
8187                 settings.lock()
8188                 settings.validate()
8189                 myroots.append((settings["ROOT"], settings))
8190
8191         for myroot, mysettings in myroots:
8192                 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8193                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8194                 trees[myroot].addLazySingleton(
8195                         "vartree", vartree, myroot, categories=mysettings.categories,
8196                                 settings=mysettings)
8197                 trees[myroot].addLazySingleton("porttree",
8198                         portagetree, myroot, settings=mysettings)
8199                 trees[myroot].addLazySingleton("bintree",
8200                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8201         return trees
8202
8203 # Initialization of legacy globals.  No functions/classes below this point
8204 # please!  When the above functions and classes become independent of the
8205 # below global variables, it will be possible to make the below code
8206 # conditional on a backward compatibility flag (backward compatibility could
8207 # be disabled via an environment variable, for example).  This will enable new
8208 # code that is aware of this flag to import portage without the unnecessary
8209 # overhead (and other issues!) of initializing the legacy globals.
8210
8211 def init_legacy_globals():
8212         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8213         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8214         profiledir, flushmtimedb
8215
8216         # Portage needs to ensure a sane umask for the files it creates.
8217         os.umask(022)
8218
8219         kwargs = {}
8220         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8221                 kwargs[k] = os.environ.get(envvar, "/")
8222
8223         db = create_trees(**kwargs)
8224
8225         settings = db["/"]["vartree"].settings
8226         portdb = db["/"]["porttree"].dbapi
8227
8228         for myroot in db:
8229                 if myroot != "/":
8230                         settings = db[myroot]["vartree"].settings
8231                         portdb = db[myroot]["porttree"].dbapi
8232                         break
8233
8234         root = settings["ROOT"]
8235
8236         mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8237         mtimedb = MtimeDB(mtimedbfile)
8238
8239         # ========================================================================
8240         # COMPATIBILITY
8241         # These attributes should not be used
8242         # within Portage under any circumstances.
8243         # ========================================================================
8244         archlist    = settings.archlist()
8245         features    = settings.features
8246         groups      = settings["ACCEPT_KEYWORDS"].split()
8247         pkglines    = settings.packages
8248         selinux_enabled   = settings.selinux_enabled()
8249         thirdpartymirrors = settings.thirdpartymirrors()
8250         usedefaults       = settings.use_defs
8251         profiledir  = None
8252         if os.path.isdir(PROFILE_PATH):
8253                 profiledir = PROFILE_PATH
8254         def flushmtimedb(record):
8255                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8256         # ========================================================================
8257         # COMPATIBILITY
8258         # These attributes should not be used
8259         # within Portage under any circumstances.
8260         # ========================================================================
8261
8262 # WARNING!
8263 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8264 # use within Portage.  External use of this variable is unsupported because
8265 # it is experimental and it's behavior is likely to change.
8266 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8267         init_legacy_globals()
8268
8269 # Clear the cache
8270 dircache={}
8271
8272 # ============================================================================
8273 # ============================================================================
8274