1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
16 print "Failed to import sys! Something is _VERY_ wrong with python."
20 import copy, errno, os, re, shutil, time, types
24 import pickle as cPickle
28 from time import sleep
29 from random import shuffle
31 if getattr(__builtins__, "set", None) is None:
32 from sets import Set as set
33 from itertools import chain, izip
34 except ImportError, e:
35 sys.stderr.write("\n\n")
36 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
40 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42 sys.stderr.write(" "+str(e)+"\n\n");
46 # XXX: This should get renamed to bsd_chflags, I think.
53 from cache.cache_errors import CacheError
58 from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59 isjustname, isspecific, isvalidatom, \
60 match_from_list, match_to_list, best_match_to_list
62 # XXX: This needs to get cleaned up.
64 from output import bold, colorize, green, red, yellow
67 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74 INCREMENTALS, EAPI, MISC_SH_BINARY
76 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77 portage_uid, portage_gid, userpriv_groups
78 from portage_manifest import Manifest
81 from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83 map_dictlist_vals, new_protect_filename, normalize_path, \
84 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86 import portage_exception
90 from portage_exec import atexit_register, run_exitfuncs
91 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92 import portage_checksum
93 from portage_checksum import perform_md5,perform_checksum,prelink_capable
95 from portage_localization import _
96 from portage_update import dep_transform, fixdbentries, grab_updates, \
97 parse_updates, update_config_files, update_dbentries
99 # Need these functions directly in portage namespace to not break every external tool in existence
100 from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101 pkgsplit, vercmp, ververify
103 # endversion and endversion_keys are for backward compatibility only.
104 from portage_versions import endversion_keys
105 from portage_versions import suffix_value as endversion
107 except ImportError, e:
108 sys.stderr.write("\n\n")
109 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114 sys.stderr.write("!!! a recovery of portage.\n")
115 sys.stderr.write(" "+str(e)+"\n\n")
120 import portage_selinux as selinux
122 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
133 modname = ".".join(name.split(".")[:-1])
134 mod = __import__(modname)
135 components = name.split('.')
136 for comp in components[1:]:
137 mod = getattr(mod, comp)
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
142 if top_dict.has_key(x) and top_dict[x].has_key(key):
144 return copy.deepcopy(top_dict[x][key])
146 return top_dict[x][key]
150 raise KeyError, "Key not found in list; '%s'" % key
153 "this fixes situations where the current directory doesn't exist"
156 except OSError: #dir doesn't exist
161 def abssymlink(symlink):
162 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163 mylink=os.readlink(symlink)
165 mydir=os.path.dirname(symlink)
166 mylink=mydir+"/"+mylink
167 return os.path.normpath(mylink)
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174 global cacheHit,cacheMiss,cacheStale
175 mypath = normalize_path(my_original_path)
176 if dircache.has_key(mypath):
178 cached_mtime, list, ftype = dircache[mypath]
181 cached_mtime, list, ftype = -1, [], []
183 pathstat = os.stat(mypath)
184 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185 mtime = pathstat[stat.ST_MTIME]
187 raise portage_exception.DirectoryNotFound(mypath)
188 except (IOError,OSError,portage_exception.PortageException):
192 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193 if mtime != cached_mtime or time.time() - mtime < 4:
194 if dircache.has_key(mypath):
196 list = os.listdir(mypath)
201 pathstat = os.stat(mypath+"/"+x)
203 pathstat = os.lstat(mypath+"/"+x)
205 if stat.S_ISREG(pathstat[stat.ST_MODE]):
207 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
209 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
213 except (IOError, OSError):
215 dircache[mypath] = mtime, list, ftype
219 for x in range(0, len(list)):
220 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221 ret_list.append(list[x])
222 ret_ftype.append(ftype[x])
223 elif (list[x] not in ignorelist):
224 ret_list.append(list[x])
225 ret_ftype.append(ftype[x])
227 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228 return ret_list, ret_ftype
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231 EmptyOnError=False, dirsonly=False):
233 Portage-specific implementation of os.listdir
235 @param mypath: Path whose contents you wish to list
237 @param recursive: Recursively scan directories contained within mypath
238 @type recursive: Boolean
239 @param filesonly; Only return files, not more directories
240 @type filesonly: Boolean
241 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242 @type ignorecvs: Boolean
243 @param ignorelist: List of filenames/directories to exclude
244 @type ignorelist: List
245 @param followSymlinks: Follow Symlink'd files and directories
246 @type followSymlinks: Boolean
247 @param EmptyOnError: Return [] if an error occurs.
248 @type EmptyOnError: Boolean
249 @param dirsonly: Only return directories.
250 @type dirsonly: Boolean
252 @returns: A list of files and directories (or just files or just directories) or an empty list.
255 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
262 if not (filesonly or dirsonly or recursive):
268 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
273 for y in range(0,len(l)):
274 l[y]=list[x]+"/"+l[y]
280 for x in range(0,len(ftype)):
282 rlist=rlist+[list[x]]
285 for x in range(0, len(ftype)):
287 rlist = rlist + [list[x]]
293 def flatten(mytokens):
294 """this function now turns a [1,[2,3]] list into
295 a [1,2,3] list and returns it."""
298 if type(x)==types.ListType:
299 newlist.extend(flatten(x))
304 #beautiful directed graph object
308 """Create an empty digraph"""
310 # { node : ( { child : priority } , { parent : priority } ) }
314 def add(self, node, parent, priority=0):
315 """Adds the specified node with the specified parent.
317 If the dep is a soft-dep and the node already has a hard
318 relationship to the parent, the relationship is left as hard."""
320 if node not in self.nodes:
321 self.nodes[node] = ({}, {})
322 self.order.append(node)
327 if parent not in self.nodes:
328 self.nodes[parent] = ({}, {})
329 self.order.append(parent)
331 if parent in self.nodes[node][1]:
332 if priority > self.nodes[node][1][parent]:
333 self.nodes[node][1][parent] = priority
335 self.nodes[node][1][parent] = priority
337 if node in self.nodes[parent][0]:
338 if priority > self.nodes[parent][0][node]:
339 self.nodes[parent][0][node] = priority
341 self.nodes[parent][0][node] = priority
343 def remove(self, node):
344 """Removes the specified node from the digraph, also removing
345 and ties to other nodes in the digraph. Raises KeyError if the
346 node doesn't exist."""
348 if node not in self.nodes:
351 for parent in self.nodes[node][1]:
352 del self.nodes[parent][0][node]
353 for child in self.nodes[node][0]:
354 del self.nodes[child][1][node]
357 self.order.remove(node)
359 def contains(self, node):
360 """Checks if the digraph contains mynode"""
361 return node in self.nodes
364 """Return a list of all nodes in the graph"""
367 def child_nodes(self, node, ignore_priority=None):
368 """Return all children of the specified node"""
369 if ignore_priority is None:
370 return self.nodes[node][0].keys()
372 for child, priority in self.nodes[node][0].iteritems():
373 if priority > ignore_priority:
374 children.append(child)
377 def parent_nodes(self, node):
378 """Return all parents of the specified node"""
379 return self.nodes[node][1].keys()
381 def leaf_nodes(self, ignore_priority=None):
382 """Return all nodes that have no children
384 If ignore_soft_deps is True, soft deps are not counted as
385 children in calculations."""
388 for node in self.order:
390 for child in self.nodes[node][0]:
391 if self.nodes[node][0][child] > ignore_priority:
395 leaf_nodes.append(node)
398 def root_nodes(self, ignore_priority=None):
399 """Return all nodes that have no parents.
401 If ignore_soft_deps is True, soft deps are not counted as
402 parents in calculations."""
405 for node in self.order:
407 for parent in self.nodes[node][1]:
408 if self.nodes[node][1][parent] > ignore_priority:
412 root_nodes.append(node)
416 """Checks if the digraph is empty"""
417 return len(self.nodes) == 0
421 clone.nodes = copy.deepcopy(self.nodes)
422 clone.order = self.order[:]
425 # Backward compatibility
428 allzeros = leaf_nodes
433 def delnode(self, node):
440 leaf_nodes = self.leaf_nodes()
445 def hasallzeros(self, ignore_priority=None):
446 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
449 def debug_print(self):
450 for node in self.nodes:
452 if self.nodes[node][0]:
455 print "(no children)"
456 for child in self.nodes[node][0]:
458 print "(%s)" % self.nodes[node][0][child]
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463 mylogfiles = listdir(mysettings["T"]+"/logging/")
464 # shortcut for packages without any messages
465 if len(mylogfiles) == 0:
467 # exploit listdir() file order so we process log entries in chronological order
470 my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
472 msgfunction, msgtype = f.split(".")
473 if msgtype.upper() not in my_elog_classes \
474 and msgtype.lower() not in my_elog_classes:
476 if msgfunction not in portage_const.EBUILD_PHASES:
477 writemsg("!!! can't process invalid log file: %s\n" % f,
480 if not msgfunction in mylogentries:
481 mylogentries[msgfunction] = []
482 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483 mylogentries[msgfunction].append((msgtype, msgcontent))
485 # in case the filters matched all messages
486 if len(mylogentries) == 0:
489 # generate a single string with all log messages
491 for phase in portage_const.EBUILD_PHASES:
492 if not phase in mylogentries:
494 for msgtype,msgcontent in mylogentries[phase]:
495 fulllog += "%s: %s\n" % (msgtype, phase)
496 for line in msgcontent:
500 # pass the processing to the individual modules
501 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
503 # - is nicer than _ for module names, so allow people to use it.
504 s = s.replace("-", "_")
506 # FIXME: ugly ad.hoc import code
507 # TODO: implement a common portage module loader
508 logmodule = __import__("elog_modules.mod_"+s)
509 m = getattr(logmodule, "mod_"+s)
510 def timeout_handler(signum, frame):
511 raise portage_exception.PortageException(
512 "Timeout in elog_process for system '%s'" % s)
514 signal.signal(signal.SIGALRM, timeout_handler)
515 # Timeout after one minute (in case something like the mail
519 m.process(mysettings, cpv, mylogentries, fulllog)
522 if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523 _elog_atexit_handlers.append(m.finalize)
524 atexit_register(m.finalize, mysettings)
525 except (ImportError, AttributeError), e:
526 writemsg("!!! Error while importing logging modules " + \
527 "while loading \"mod_%s\":\n" % str(s))
528 writemsg("%s\n" % str(e), noiselevel=-1)
529 except portage_exception.PortageException, e:
530 writemsg("%s\n" % str(e), noiselevel=-1)
532 # clean logfiles to avoid repetitions
535 os.unlink(os.path.join(mysettings["T"], "logging", f))
539 #parse /etc/env.d and generate /etc/profile.env
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542 if target_root is None:
545 if prev_mtimes is None:
547 prev_mtimes = mtimedb["ldpath"]
548 envd_dir = os.path.join(target_root, "etc", "env.d")
549 portage_util.ensure_dirs(envd_dir, mode=0755)
550 fns = listdir(envd_dir, EmptyOnError=1)
556 if not x[0].isdigit() or not x[1].isdigit():
558 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
564 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568 "PYTHONPATH", "ROOTPATH"])
573 file_path = os.path.join(envd_dir, x)
575 myconfig = getconfig(file_path, expand=False)
576 except portage_exception.ParseError, e:
577 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
581 # broken symlink or file removed by a concurrent process
582 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
584 config_list.append(myconfig)
585 if "SPACE_SEPARATED" in myconfig:
586 space_separated.update(myconfig["SPACE_SEPARATED"].split())
587 del myconfig["SPACE_SEPARATED"]
588 if "COLON_SEPARATED" in myconfig:
589 colon_separated.update(myconfig["COLON_SEPARATED"].split())
590 del myconfig["COLON_SEPARATED"]
594 for var in space_separated:
596 for myconfig in config_list:
598 mylist.extend(filter(None, myconfig[var].split()))
599 del myconfig[var] # prepare for env.update(myconfig)
601 env[var] = " ".join(mylist)
602 specials[var] = mylist
604 for var in colon_separated:
606 for myconfig in config_list:
608 mylist.extend(filter(None, myconfig[var].split(":")))
609 del myconfig[var] # prepare for env.update(myconfig)
611 env[var] = ":".join(mylist)
612 specials[var] = mylist
614 for myconfig in config_list:
615 """Cumulative variables have already been deleted from myconfig so that
616 they won't be overwritten by this dict.update call."""
619 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
621 myld = open(ldsoconf_path)
622 myldlines=myld.readlines()
626 #each line has at least one char (a newline)
630 except (IOError, OSError), e:
631 if e.errno != errno.ENOENT:
635 ld_cache_update=False
637 newld = specials["LDPATH"]
639 #ld.so.conf needs updating and ldconfig needs to be run
640 myfd = atomic_ofstream(ldsoconf_path)
641 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642 myfd.write("# contents of /etc/env.d directory\n")
643 for x in specials["LDPATH"]:
648 # Update prelink.conf if we are prelink-enabled
650 newprelink = atomic_ofstream(
651 os.path.join(target_root, "etc", "prelink.conf"))
652 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653 newprelink.write("# contents of /etc/env.d directory\n")
655 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656 newprelink.write("-l "+x+"\n");
657 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
663 for y in specials["PRELINK_PATH_MASK"]:
672 newprelink.write("-h "+x+"\n")
673 for x in specials["PRELINK_PATH_MASK"]:
674 newprelink.write("-b "+x+"\n")
677 mtime_changed = False
679 for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
680 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
682 newldpathtime = os.stat(x)[stat.ST_MTIME]
683 lib_dirs.add(normalize_path(x))
685 if oe.errno == errno.ENOENT:
690 # ignore this path because it doesn't exist
694 if prev_mtimes[x] == newldpathtime:
697 prev_mtimes[x] = newldpathtime
700 prev_mtimes[x] = newldpathtime
704 ld_cache_update = True
707 not ld_cache_update and \
708 contents is not None:
709 libdir_contents_changed = False
710 for mypath, mydata in contents.iteritems():
711 if mydata[0] not in ("obj","sym"):
713 head, tail = os.path.split(mypath)
715 libdir_contents_changed = True
717 if not libdir_contents_changed:
720 # Only run ldconfig as needed
721 if (ld_cache_update or makelinks):
722 # ldconfig has very different behaviour between FreeBSD and Linux
723 if ostype=="Linux" or ostype.lower().endswith("gnu"):
724 # We can't update links if we haven't cleaned other versions first, as
725 # an older package installed ON TOP of a newer version will cause ldconfig
726 # to overwrite the symlinks we just made. -X means no links. After 'clean'
727 # we can safely create links.
728 writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
730 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
732 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
733 elif ostype in ("FreeBSD","DragonFly"):
734 writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
735 commands.getstatusoutput(
736 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
737 (target_root, target_root))
739 del specials["LDPATH"]
741 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
742 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
743 cenvnotice = penvnotice[:]
744 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
745 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
747 #create /etc/profile.env for bash support
748 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
749 outfile.write(penvnotice)
751 env_keys = [ x for x in env if x != "LDPATH" ]
754 outfile.write("export %s='%s'\n" % (x, env[x]))
757 #create /etc/csh.env for (t)csh support
758 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
759 outfile.write(cenvnotice)
761 outfile.write("setenv %s '%s'\n" % (x, env[x]))
764 def ExtractKernelVersion(base_dir):
766 Try to figure out what kernel version we are running
767 @param base_dir: Path to sources (usually /usr/src/linux)
768 @type base_dir: string
769 @rtype: tuple( version[string], error[string])
771 1. tuple( version[string], error[string])
772 Either version or error is populated (but never both)
776 pathname = os.path.join(base_dir, 'Makefile')
778 f = open(pathname, 'r')
779 except OSError, details:
780 return (None, str(details))
781 except IOError, details:
782 return (None, str(details))
786 lines.append(f.readline())
787 except OSError, details:
788 return (None, str(details))
789 except IOError, details:
790 return (None, str(details))
792 lines = [l.strip() for l in lines]
796 #XXX: The following code relies on the ordering of vars within the Makefile
798 # split on the '=' then remove annoying whitespace
799 items = line.split("=")
800 items = [i.strip() for i in items]
801 if items[0] == 'VERSION' or \
802 items[0] == 'PATCHLEVEL':
805 elif items[0] == 'SUBLEVEL':
807 elif items[0] == 'EXTRAVERSION' and \
808 items[-1] != items[0]:
811 # Grab a list of files named localversion* and sort them
812 localversions = os.listdir(base_dir)
813 for x in range(len(localversions)-1,-1,-1):
814 if localversions[x][:12] != "localversion":
818 # Append the contents of each to the version string, stripping ALL whitespace
819 for lv in localversions:
820 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
822 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
823 kernelconfig = getconfig(base_dir+"/.config")
824 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
825 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
827 return (version,None)
829 def autouse(myvartree, use_cache=1, mysettings=None):
831 autuse returns a list of USE variables auto-enabled to packages being installed
833 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
834 @type myvartree: vartree
835 @param use_cache: read values from cache
836 @type use_cache: Boolean
837 @param mysettings: Instance of config
838 @type mysettings: config
840 @returns: A string containing a list of USE variables that are enabled via use.defaults
842 if mysettings is None:
844 mysettings = settings
845 if mysettings.profile_path is None:
848 usedefaults = mysettings.use_defs
849 for myuse in usedefaults:
851 for mydep in usedefaults[myuse]:
852 if not myvartree.dep_match(mydep,use_cache=True):
856 myusevars += " "+myuse
859 def check_config_instance(test):
860 if not test or (str(test.__class__) != 'portage.config'):
861 raise TypeError, "Invalid type for config object: %s" % test.__class__
865 This class encompasses the main portage configuration. Data is pulled from
866 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
867 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
870 Generally if you need data like USE flags, FEATURES, environment variables,
871 virtuals ...etc you look in here.
874 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
875 config_incrementals=None, config_root=None, target_root=None,
878 @param clone: If provided, init will use deepcopy to copy by value the instance.
879 @type clone: Instance of config class.
880 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
881 and then calling instance.setcpv(mycpv).
883 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
884 @type config_profile_path: String
885 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
886 @type config_incrementals: List
887 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
888 @type config_root: String
889 @param target_root: __init__ override of $ROOT env variable.
890 @type target_root: String
891 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
892 ignore local config (keywording and unmasking)
893 @type local_config: Boolean
896 debug = os.environ.get("PORTAGE_DEBUG") == "1"
898 self.already_in_regenerate = 0
903 self.modifiedkeys = []
908 self.dirVirtuals = None
911 # Virtuals obtained from the vartree
912 self.treeVirtuals = {}
913 # Virtuals by user specification. Includes negatives.
914 self.userVirtuals = {}
915 # Virtual negatives from user specifications.
916 self.negVirtuals = {}
918 self.user_profile_dir = None
919 self.local_config = local_config
922 self.incrementals = copy.deepcopy(clone.incrementals)
923 self.profile_path = copy.deepcopy(clone.profile_path)
924 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
925 self.local_config = copy.deepcopy(clone.local_config)
927 self.module_priority = copy.deepcopy(clone.module_priority)
928 self.modules = copy.deepcopy(clone.modules)
930 self.depcachedir = copy.deepcopy(clone.depcachedir)
932 self.packages = copy.deepcopy(clone.packages)
933 self.virtuals = copy.deepcopy(clone.virtuals)
935 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
936 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
937 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
939 self.use_defs = copy.deepcopy(clone.use_defs)
940 self.usemask = copy.deepcopy(clone.usemask)
941 self.usemask_list = copy.deepcopy(clone.usemask_list)
942 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
943 self.useforce = copy.deepcopy(clone.useforce)
944 self.useforce_list = copy.deepcopy(clone.useforce_list)
945 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
946 self.puse = copy.deepcopy(clone.puse)
947 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
948 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
949 self.mycpv = copy.deepcopy(clone.mycpv)
951 self.configlist = copy.deepcopy(clone.configlist)
952 self.lookuplist = self.configlist[:]
953 self.lookuplist.reverse()
955 "env.d": self.configlist[0],
956 "pkginternal": self.configlist[1],
957 "globals": self.configlist[2],
958 "defaults": self.configlist[3],
959 "conf": self.configlist[4],
960 "pkg": self.configlist[5],
961 "auto": self.configlist[6],
962 "backupenv": self.configlist[7],
963 "env": self.configlist[8] }
964 self.profiles = copy.deepcopy(clone.profiles)
965 self.backupenv = self.configdict["backupenv"]
966 self.pusedict = copy.deepcopy(clone.pusedict)
967 self.categories = copy.deepcopy(clone.categories)
968 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
969 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
970 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
971 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
972 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
973 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
974 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
975 self.features = copy.deepcopy(clone.features)
978 # backupenv is for calculated incremental variables.
979 self.backupenv = os.environ.copy()
981 def check_var_directory(varname, var):
982 if not os.path.isdir(var):
983 writemsg(("!!! Error: %s='%s' is not a directory. " + \
984 "Please correct this.\n") % (varname, var),
986 raise portage_exception.DirectoryNotFound(var)
988 if config_root is None:
992 normalize_path(config_root).rstrip(os.path.sep) + os.path.sep
994 check_var_directory("PORTAGE_CONFIGROOT", config_root)
996 self.depcachedir = DEPCACHE_PATH
998 if not config_profile_path:
999 config_profile_path = \
1000 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1001 if os.path.isdir(config_profile_path):
1002 self.profile_path = config_profile_path
1004 self.profile_path = None
1006 self.profile_path = config_profile_path[:]
1008 if not config_incrementals:
1009 writemsg("incrementals not specified to class config\n")
1010 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1012 self.incrementals = copy.deepcopy(config_incrementals)
1014 self.module_priority = ["user","default"]
1016 self.modules["user"] = getconfig(
1017 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1018 if self.modules["user"] is None:
1019 self.modules["user"] = {}
1020 self.modules["default"] = {
1021 "portdbapi.metadbmodule": "cache.metadata.database",
1022 "portdbapi.auxdbmodule": "cache.flat_hash.database",
1028 # back up our incremental variables:
1030 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1031 self.configlist.append({})
1032 self.configdict["env.d"] = self.configlist[-1]
1034 self.configlist.append({})
1035 self.configdict["pkginternal"] = self.configlist[-1]
1037 # The symlink might not exist or might not be a symlink.
1038 if self.profile_path is None:
1042 def addProfile(currentPath):
1043 parentsFile = os.path.join(currentPath, "parent")
1044 if os.path.exists(parentsFile):
1045 parents = grabfile(parentsFile)
1047 raise portage_exception.ParseError(
1048 "Empty parent file: '%s'" % parents_file)
1049 for parentPath in parents:
1050 parentPath = normalize_path(os.path.join(
1051 currentPath, parentPath))
1052 if os.path.exists(parentPath):
1053 addProfile(parentPath)
1055 raise portage_exception.ParseError(
1056 "Parent '%s' not found: '%s'" % \
1057 (parentPath, parentsFile))
1058 self.profiles.append(currentPath)
1059 addProfile(os.path.realpath(self.profile_path))
1061 custom_prof = os.path.join(
1062 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1063 if os.path.exists(custom_prof):
1064 self.user_profile_dir = custom_prof
1065 self.profiles.append(custom_prof)
1068 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1069 self.packages = stack_lists(self.packages_list, incremental=1)
1070 del self.packages_list
1071 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1074 self.prevmaskdict={}
1075 for x in self.packages:
1076 mycatpkg=dep_getkey(x)
1077 if not self.prevmaskdict.has_key(mycatpkg):
1078 self.prevmaskdict[mycatpkg]=[x]
1080 self.prevmaskdict[mycatpkg].append(x)
1082 # get profile-masked use flags -- INCREMENTAL Child over parent
1083 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1084 for x in self.profiles]
1085 self.usemask = set(stack_lists(
1086 self.usemask_list, incremental=True))
1087 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1088 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1091 self.pusemask_list = []
1092 rawpusemask = [grabdict_package(
1093 os.path.join(x, "package.use.mask")) \
1094 for x in self.profiles]
1095 for i in xrange(len(self.profiles)):
1097 for k, v in rawpusemask[i].iteritems():
1098 cpdict.setdefault(dep_getkey(k), {})[k] = v
1099 self.pusemask_list.append(cpdict)
1102 self.pkgprofileuse = []
1103 rawprofileuse = [grabdict_package(
1104 os.path.join(x, "package.use"), juststrings=True) \
1105 for x in self.profiles]
1106 for i in xrange(len(self.profiles)):
1108 for k, v in rawprofileuse[i].iteritems():
1109 cpdict.setdefault(dep_getkey(k), {})[k] = v
1110 self.pkgprofileuse.append(cpdict)
1113 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1114 for x in self.profiles]
1115 self.useforce = set(stack_lists(
1116 self.useforce_list, incremental=True))
1118 self.puseforce_list = []
1119 rawpuseforce = [grabdict_package(
1120 os.path.join(x, "package.use.force")) \
1121 for x in self.profiles]
1122 for i in xrange(len(self.profiles)):
1124 for k, v in rawpuseforce[i].iteritems():
1125 cpdict.setdefault(dep_getkey(k), {})[k] = v
1126 self.puseforce_list.append(cpdict)
1130 self.mygcfg = getconfig(os.path.join(config_root, "etc", "make.globals"))
1132 if self.mygcfg is None:
1134 except SystemExit, e:
1136 except Exception, e:
1139 writemsg("!!! %s\n" % (e), noiselevel=-1)
1140 if not isinstance(e, EnvironmentError):
1141 writemsg("!!! Incorrect multiline literals can cause " + \
1142 "this. Do not use them.\n", noiselevel=-1)
1144 self.configlist.append(self.mygcfg)
1145 self.configdict["globals"]=self.configlist[-1]
1147 self.make_defaults_use = []
1151 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1152 for cfg in mygcfg_dlists:
1154 self.make_defaults_use.append(cfg.get("USE", ""))
1156 self.make_defaults_use.append("")
1157 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1158 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1159 if self.mygcfg is None:
1161 except SystemExit, e:
1163 except Exception, e:
1166 writemsg("!!! %s\n" % (e), noiselevel=-1)
1167 if not isinstance(e, EnvironmentError):
1168 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1169 "emerge sync' may fix this. If it does\n",
1171 writemsg("!!! not then please report this to " + \
1172 "bugs.gentoo.org and, if possible, a dev\n",
1174 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1177 self.configlist.append(self.mygcfg)
1178 self.configdict["defaults"]=self.configlist[-1]
1181 self.mygcfg = getconfig(
1182 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1183 allow_sourcing=True)
1184 if self.mygcfg is None:
1186 except SystemExit, e:
1188 except Exception, e:
1191 writemsg("!!! %s\n" % (e), noiselevel=-1)
1192 if not isinstance(e, EnvironmentError):
1193 writemsg("!!! Incorrect multiline literals can cause " + \
1194 "this. Do not use them.\n", noiselevel=-1)
1197 # Allow ROOT setting to come from make.conf if it's not overridden
1198 # by the constructor argument (from the calling environment). As a
1199 # special exception for a very common use case, config_root == "/"
1200 # implies that ROOT in make.conf should be ignored. That way, the
1201 # user can chroot into $ROOT and the ROOT setting in make.conf will
1202 # be automatically ignored (unless config_root is other than "/").
1203 if config_root != "/" and \
1204 target_root is None and "ROOT" in self.mygcfg:
1205 target_root = self.mygcfg["ROOT"]
1207 self.configlist.append(self.mygcfg)
1208 self.configdict["conf"]=self.configlist[-1]
1210 self.configlist.append({})
1211 self.configdict["pkg"]=self.configlist[-1]
1214 self.configlist.append({})
1215 self.configdict["auto"]=self.configlist[-1]
1217 self.configlist.append(self.backupenv) # XXX Why though?
1218 self.configdict["backupenv"]=self.configlist[-1]
1220 self.configlist.append(os.environ.copy())
1221 self.configdict["env"]=self.configlist[-1]
1224 # make lookuplist for loading package.*
1225 self.lookuplist=self.configlist[:]
1226 self.lookuplist.reverse()
1228 # Blacklist vars that could interfere with portage internals.
1229 for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]:
1230 for cfg in self.lookuplist:
1232 del cfg[blacklisted]
1235 del blacklisted, cfg
1237 if target_root is None:
1241 normalize_path(target_root).rstrip(os.path.sep) + os.path.sep
1243 check_var_directory("ROOT", target_root)
1246 os.path.join(target_root, "etc", "profile.env"), expand=False)
1247 # env_d will be None if profile.env doesn't exist.
1249 self.configdict["env.d"].update(env_d)
1250 # Remove duplicate values so they don't override updated
1251 # profile.env values later (profile.env is reloaded in each
1252 # call to self.regenerate).
1253 for cfg in (self.configdict["backupenv"],
1254 self.configdict["env"]):
1255 for k, v in env_d.iteritems():
1263 self["PORTAGE_CONFIGROOT"] = config_root
1264 self.backup_changes("PORTAGE_CONFIGROOT")
1265 self["ROOT"] = target_root
1266 self.backup_changes("ROOT")
1269 self.pkeywordsdict = {}
1270 self.punmaskdict = {}
1271 abs_user_config = os.path.join(config_root,
1272 USER_CONFIG_PATH.lstrip(os.path.sep))
1274 # locations for "categories" and "arch.list" files
1275 locations = [os.path.join(self["PORTDIR"], "profiles")]
1276 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1277 pmask_locations.extend(self.profiles)
1279 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1280 special cases are needed here."""
1281 overlay_profiles = []
1282 for ov in self["PORTDIR_OVERLAY"].split():
1283 ov = normalize_path(ov)
1284 profiles_dir = os.path.join(ov, "profiles")
1285 if os.path.isdir(profiles_dir):
1286 overlay_profiles.append(profiles_dir)
1287 locations += overlay_profiles
1289 pmask_locations.extend(overlay_profiles)
1292 locations.append(abs_user_config)
1293 pmask_locations.append(abs_user_config)
1294 pusedict = grabdict_package(
1295 os.path.join(abs_user_config, "package.use"), recursive=1)
1296 for key in pusedict.keys():
1297 cp = dep_getkey(key)
1298 if not self.pusedict.has_key(cp):
1299 self.pusedict[cp] = {}
1300 self.pusedict[cp][key] = pusedict[key]
1303 pkgdict = grabdict_package(
1304 os.path.join(abs_user_config, "package.keywords"),
1306 for key in pkgdict.keys():
1307 # default to ~arch if no specific keyword is given
1308 if not pkgdict[key]:
1310 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1311 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1314 for keyword in groups:
1315 if not keyword[0] in "~-":
1316 mykeywordlist.append("~"+keyword)
1317 pkgdict[key] = mykeywordlist
1318 cp = dep_getkey(key)
1319 if not self.pkeywordsdict.has_key(cp):
1320 self.pkeywordsdict[cp] = {}
1321 self.pkeywordsdict[cp][key] = pkgdict[key]
1324 pkgunmasklines = grabfile_package(
1325 os.path.join(abs_user_config, "package.unmask"),
1327 for x in pkgunmasklines:
1328 mycatpkg=dep_getkey(x)
1329 if self.punmaskdict.has_key(mycatpkg):
1330 self.punmaskdict[mycatpkg].append(x)
1332 self.punmaskdict[mycatpkg]=[x]
1334 #getting categories from an external file now
1335 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1336 self.categories = stack_lists(categories, incremental=1)
1339 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1340 archlist = stack_lists(archlist, incremental=1)
1341 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1345 for x in pmask_locations:
1346 pkgmasklines.append(grabfile_package(
1347 os.path.join(x, "package.mask"), recursive=1))
1348 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1351 for x in pkgmasklines:
1352 mycatpkg=dep_getkey(x)
1353 if self.pmaskdict.has_key(mycatpkg):
1354 self.pmaskdict[mycatpkg].append(x)
1356 self.pmaskdict[mycatpkg]=[x]
1358 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1359 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1360 has_invalid_data = False
1361 for x in range(len(pkgprovidedlines)-1, -1, -1):
1362 myline = pkgprovidedlines[x]
1363 if not isvalidatom("=" + myline):
1364 writemsg("Invalid package name in package.provided:" + \
1365 " %s\n" % myline, noiselevel=-1)
1366 has_invalid_data = True
1367 del pkgprovidedlines[x]
1369 cpvr = catpkgsplit(pkgprovidedlines[x])
1370 if not cpvr or cpvr[0] == "null":
1371 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1373 has_invalid_data = True
1374 del pkgprovidedlines[x]
1376 if cpvr[0] == "virtual":
1377 writemsg("Virtual package in package.provided: %s\n" % \
1378 myline, noiselevel=-1)
1379 has_invalid_data = True
1380 del pkgprovidedlines[x]
1382 if has_invalid_data:
1383 writemsg("See portage(5) for correct package.provided usage.\n",
1385 self.pprovideddict = {}
1386 for x in pkgprovidedlines:
1390 mycatpkg=dep_getkey(x)
1391 if self.pprovideddict.has_key(mycatpkg):
1392 self.pprovideddict[mycatpkg].append(x)
1394 self.pprovideddict[mycatpkg]=[x]
1396 # reasonable defaults; this is important as without USE_ORDER,
1397 # USE will always be "" (nothing set)!
1398 if "USE_ORDER" not in self:
1399 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1401 self["PORTAGE_GID"] = str(portage_gid)
1402 self.backup_changes("PORTAGE_GID")
1404 if self.get("PORTAGE_DEPCACHEDIR", None):
1405 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1406 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1407 self.backup_changes("PORTAGE_DEPCACHEDIR")
1409 overlays = self.get("PORTDIR_OVERLAY","").split()
1413 ov = normalize_path(ov)
1414 if os.path.isdir(ov):
1417 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1418 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1419 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1420 self.backup_changes("PORTDIR_OVERLAY")
1422 if "CBUILD" not in self and "CHOST" in self:
1423 self["CBUILD"] = self["CHOST"]
1424 self.backup_changes("CBUILD")
1426 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1427 self.backup_changes("PORTAGE_BIN_PATH")
1428 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1429 self.backup_changes("PORTAGE_PYM_PATH")
1431 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1433 self[var] = str(int(self.get(var, "0")))
1435 writemsg(("!!! %s='%s' is not a valid integer. " + \
1436 "Falling back to '0'.\n") % (var, self[var]),
1439 self.backup_changes(var)
1442 self.features = portage_util.unique_array(self["FEATURES"].split())
1444 if "gpg" in self.features:
1445 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1446 not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1447 writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1448 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1449 self.features.remove("gpg")
1451 if not portage_exec.sandbox_capable and \
1452 ("sandbox" in self.features or "usersandbox" in self.features):
1453 if self.profile_path is not None and \
1454 os.path.realpath(self.profile_path) == \
1455 os.path.realpath(PROFILE_PATH):
1456 """ Don't show this warning when running repoman and the
1457 sandbox feature came from a profile that doesn't belong to
1459 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1460 " binary. Disabling...\n\n"), noiselevel=-1)
1461 if "sandbox" in self.features:
1462 self.features.remove("sandbox")
1463 if "usersandbox" in self.features:
1464 self.features.remove("usersandbox")
1466 self.features.sort()
1467 self["FEATURES"] = " ".join(self.features)
1468 self.backup_changes("FEATURES")
1475 def _init_dirs(self):
1477 Create a few directories that are critical to portage operation
1479 if not os.access(self["ROOT"], os.W_OK):
1483 "tmp" :(-1, 01777, 0),
1484 "var/tmp" :(-1, 01777, 0),
1485 "var/lib/portage" :(portage_gid, 02750, 02),
1486 "var/cache/edb" :(portage_gid, 0755, 02)
1489 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1491 mydir = os.path.join(self["ROOT"], mypath)
1492 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1493 except portage_exception.PortageException, e:
1494 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1496 writemsg("!!! %s\n" % str(e),
1500 """Validate miscellaneous settings and display warnings if necessary.
1501 (This code was previously in the global scope of portage.py)"""
1503 groups = self["ACCEPT_KEYWORDS"].split()
1504 archlist = self.archlist()
1506 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1508 for group in groups:
1509 if group not in archlist and group[0] != '-':
1510 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1513 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1514 PROFILE_PATH.lstrip(os.path.sep))
1515 if not os.path.islink(abs_profile_path) and \
1516 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1517 os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1518 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1520 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1521 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1523 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1524 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1525 if os.path.exists(abs_user_virtuals):
1526 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1527 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1528 writemsg("!!! this new location.\n\n")
1530 def loadVirtuals(self,root):
1531 """Not currently used by portage."""
1532 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1533 self.getvirtuals(root)
1535 def load_best_module(self,property_string):
1536 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1538 mod = load_mod(best_mod)
1540 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1550 def modifying(self):
1552 raise Exception, "Configuration is locked."
1554 def backup_changes(self,key=None):
1556 if key and self.configdict["env"].has_key(key):
1557 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1559 raise KeyError, "No such key defined in environment: %s" % key
1561 def reset(self,keeping_pkg=0,use_cache=1):
1563 Restore environment from self.backupenv, call self.regenerate()
1564 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1565 @type keeping_pkg: Boolean
1566 @param use_cache: Should self.regenerate use the cache or not
1567 @type use_cache: Boolean
1571 self.configdict["env"].clear()
1572 self.configdict["env"].update(self.backupenv)
1574 self.modifiedkeys = []
1578 self.configdict["pkg"].clear()
1579 self.configdict["pkginternal"].clear()
1580 self.configdict["defaults"]["USE"] = \
1581 " ".join(self.make_defaults_use)
1582 self.usemask = set(stack_lists(
1583 self.usemask_list, incremental=True))
1584 self.useforce = set(stack_lists(
1585 self.useforce_list, incremental=True))
1586 self.regenerate(use_cache=use_cache)
1588 def load_infodir(self,infodir):
1590 if self.configdict.has_key("pkg"):
1591 for x in self.configdict["pkg"].keys():
1592 del self.configdict["pkg"][x]
1594 writemsg("No pkg setup for settings instance?\n",
1598 if os.path.exists(infodir):
1599 if os.path.exists(infodir+"/environment"):
1600 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1602 myre = re.compile('^[A-Z]+$')
1604 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1605 if myre.match(filename):
1607 file_path = os.path.join(infodir, filename)
1608 mydata = open(file_path).read().strip()
1609 if len(mydata) < 2048 or filename == "USE":
1610 if null_byte in mydata:
1611 writemsg("!!! Null byte found in metadata " + \
1612 "file: '%s'\n" % file_path, noiselevel=-1)
1614 if filename == "USE":
1615 binpkg_flags = "-* " + mydata
1616 self.configdict["pkg"][filename] = binpkg_flags
1617 self.configdict["env"][filename] = mydata
1619 self.configdict["pkg"][filename] = mydata
1620 self.configdict["env"][filename] = mydata
1621 # CATEGORY is important because it's used in doebuild
1622 # to infer the cpv. If it's corrupted, it leads to
1623 # strange errors later on, so we'll validate it and
1624 # print a warning if necessary.
1625 if filename == "CATEGORY":
1626 matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1627 if not matchobj or matchobj.start() != 0 or \
1628 matchobj.end() != len(mydata):
1629 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1630 os.path.join(infodir, filename), noiselevel=-1)
1631 except (OSError, IOError):
1632 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1638 def setcpv(self, mycpv, use_cache=1, mydb=None):
1640 Load a particular CPV into the config, this lets us see the
1641 Default USE flags for a particular ebuild as well as the USE
1642 flags from package.use.
1644 @param mycpv: A cpv to load
1646 @param use_cache: Enables caching
1647 @type use_cache: Boolean
1648 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1649 @type mydb: dbapi or derivative.
1654 if self.mycpv == mycpv:
1658 cp = dep_getkey(mycpv)
1661 pkginternaluse = " ".join([x[1:] \
1662 for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \
1663 if x.startswith("+")])
1664 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1665 self.configdict["pkginternal"]["USE"] = pkginternaluse
1668 for i in xrange(len(self.profiles)):
1669 defaults.append(self.make_defaults_use[i])
1670 cpdict = self.pkgprofileuse[i].get(cp, None)
1672 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1674 defaults.append(cpdict[best_match])
1675 defaults = " ".join(defaults)
1676 if defaults != self.configdict["defaults"].get("USE",""):
1677 self.configdict["defaults"]["USE"] = defaults
1680 for i in xrange(len(self.profiles)):
1681 useforce.append(self.useforce_list[i])
1682 cpdict = self.puseforce_list[i].get(cp, None)
1684 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1686 useforce.append(cpdict[best_match])
1687 useforce = set(stack_lists(useforce, incremental=True))
1688 if useforce != self.useforce:
1689 self.useforce = useforce
1692 for i in xrange(len(self.profiles)):
1693 usemask.append(self.usemask_list[i])
1694 cpdict = self.pusemask_list[i].get(cp, None)
1696 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1698 usemask.append(cpdict[best_match])
1699 usemask = set(stack_lists(usemask, incremental=True))
1700 if usemask != self.usemask:
1701 self.usemask = usemask
1705 if self.pusedict.has_key(cp):
1706 self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1708 self.puse = " ".join(self.pusedict[cp][self.pusekey])
1709 if oldpuse != self.puse:
1711 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1712 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1713 # CATEGORY is essential for doebuild calls
1714 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1716 self.reset(keeping_pkg=1,use_cache=use_cache)
1718 def setinst(self,mycpv,mydbapi):
1720 if len(self.virtuals) == 0:
1722 # Grab the virtuals this package provides and add them into the tree virtuals.
1723 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1724 if isinstance(mydbapi, portdbapi):
1727 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1728 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1730 cp = dep_getkey(mycpv)
1732 virt = dep_getkey(virt)
1733 if not self.treeVirtuals.has_key(virt):
1734 self.treeVirtuals[virt] = []
1735 # XXX: Is this bad? -- It's a permanent modification
1736 if cp not in self.treeVirtuals[virt]:
1737 self.treeVirtuals[virt].append(cp)
1739 self.virtuals = self.__getvirtuals_compile()
1742 def regenerate(self,useonly=0,use_cache=1):
1745 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1746 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1747 variables. This also updates the env.d configdict; useful in case an ebuild
1748 changes the environment.
1750 If FEATURES has already stacked, it is not stacked twice.
1752 @param useonly: Only regenerate USE flags (not any other incrementals)
1753 @type useonly: Boolean
1754 @param use_cache: Enable Caching (only for autouse)
1755 @type use_cache: Boolean
1760 if self.already_in_regenerate:
1761 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1762 writemsg("!!! Looping in regenerate.\n",1)
1765 self.already_in_regenerate = 1
1767 # We grab the latest profile.env here since it changes frequently.
1768 self.configdict["env.d"].clear()
1770 os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1772 # env_d will be None if profile.env doesn't exist.
1773 self.configdict["env.d"].update(env_d)
1776 myincrementals=["USE"]
1778 myincrementals = self.incrementals
1779 myincrementals = set(myincrementals)
1780 # If self.features exists, it has already been stacked and may have
1781 # been mutated, so don't stack it again or else any mutations will be
1783 if "FEATURES" in myincrementals and hasattr(self, "features"):
1784 myincrementals.remove("FEATURES")
1786 if "USE" in myincrementals:
1787 # Process USE last because it depends on USE_EXPAND which is also
1789 myincrementals.remove("USE")
1791 for mykey in myincrementals:
1793 mydbs=self.configlist[:-1]
1797 if mykey not in curdb:
1799 #variables are already expanded
1800 mysplit = curdb[mykey].split()
1804 # "-*" is a special "minus" var that means "unset all settings".
1805 # so USE="-* gnome" will have *just* gnome enabled.
1810 # Not legal. People assume too much. Complain.
1811 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1818 if (x[1:] in myflags):
1820 del myflags[myflags.index(x[1:])]
1823 # We got here, so add it now.
1824 if x not in myflags:
1828 #store setting in last element of configlist, the original environment:
1829 if myflags or mykey in self:
1830 self.configlist[-1][mykey] = " ".join(myflags)
1833 # Do the USE calculation last because it depends on USE_EXPAND.
1834 if "auto" in self["USE_ORDER"].split(":"):
1835 self.configdict["auto"]["USE"] = autouse(
1836 vartree(root=self["ROOT"], categories=self.categories,
1838 use_cache=use_cache, mysettings=self)
1840 self.configdict["auto"]["USE"] = ""
1842 use_expand_protected = []
1843 use_expand = self.get("USE_EXPAND", "").split()
1844 for var in use_expand:
1845 var_lower = var.lower()
1846 for x in self.get(var, "").split():
1847 # Any incremental USE_EXPAND variables have already been
1848 # processed, so leading +/- operators are invalid here.
1850 writemsg(colorize("BAD", "Invalid '+' operator in " + \
1851 "non-incremental variable '%s': '%s'\n" % (var, x)),
1855 writemsg(colorize("BAD", "Invalid '-' operator in " + \
1856 "non-incremental variable '%s': '%s'\n" % (var, x)),
1859 mystr = var_lower + "_" + x
1860 if mystr not in use_expand_protected:
1861 use_expand_protected.append(mystr)
1864 for x in self["USE_ORDER"].split(":"):
1865 if x in self.configdict:
1866 self.uvlist.append(self.configdict[x])
1867 self.uvlist.reverse()
1869 myflags = use_expand_protected[:]
1870 for curdb in self.uvlist:
1871 if "USE" not in curdb:
1873 mysplit = curdb["USE"].split()
1876 myflags = use_expand_protected[:]
1880 writemsg(colorize("BAD", "USE flags should not start " + \
1881 "with a '+': %s\n" % x), noiselevel=-1)
1888 myflags.remove(x[1:])
1893 if x not in myflags:
1896 myflags = set(myflags)
1897 myflags.update(self.useforce)
1899 # FEATURES=test should imply USE=test
1900 if "test" in self.configlist[-1].get("FEATURES","").split():
1903 usesplit = [ x for x in myflags if \
1904 x not in self.usemask]
1908 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1909 # that they are consistent.
1910 for var in use_expand:
1911 prefix = var.lower() + "_"
1912 prefix_len = len(prefix)
1913 expand_flags = set([ x[prefix_len:] for x in usesplit \
1914 if x.startswith(prefix) ])
1915 var_split = self.get(var, "").split()
1916 # Preserve the order of var_split because it can matter for things
1918 var_split = [ x for x in var_split if x in expand_flags ]
1919 var_split.extend(expand_flags.difference(var_split))
1920 if var_split or var in self:
1921 # Don't export empty USE_EXPAND vars unless the user config
1922 # exports them as empty. This is required for vars such as
1923 # LINGUAS, where unset and empty have different meanings.
1924 self[var] = " ".join(var_split)
1926 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1927 if self.configdict["defaults"].has_key("ARCH"):
1928 if self.configdict["defaults"]["ARCH"]:
1929 if self.configdict["defaults"]["ARCH"] not in usesplit:
1930 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1932 self.configlist[-1]["USE"]= " ".join(usesplit)
1934 self.already_in_regenerate = 0
1936 def get_virts_p(self, myroot):
1939 virts = self.getvirtuals(myroot)
1941 myvkeys = virts.keys()
1943 vkeysplit = x.split("/")
1944 if not self.virts_p.has_key(vkeysplit[1]):
1945 self.virts_p[vkeysplit[1]] = virts[x]
1948 def getvirtuals(self, myroot=None):
1949 """myroot is now ignored because, due to caching, it has always been
1950 broken for all but the first call."""
1951 myroot = self["ROOT"]
1953 return self.virtuals
1956 for x in self.profiles:
1957 virtuals_file = os.path.join(x, "virtuals")
1958 virtuals_dict = grabdict(virtuals_file)
1959 for k in virtuals_dict.keys():
1960 if not isvalidatom(k) or dep_getkey(k) != k:
1961 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
1962 (virtuals_file, k), noiselevel=-1)
1963 del virtuals_dict[k]
1965 myvalues = virtuals_dict[k]
1968 if x.startswith("-"):
1969 # allow incrementals
1971 if not isvalidatom(myatom):
1972 writemsg("--- Invalid atom in %s: %s\n" % \
1973 (virtuals_file, x), noiselevel=-1)
1976 del virtuals_dict[k]
1978 virtuals_list.append(virtuals_dict)
1980 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
1983 for virt in self.dirVirtuals:
1984 # Preference for virtuals decreases from left to right.
1985 self.dirVirtuals[virt].reverse()
1987 # Repoman does not use user or tree virtuals.
1988 if self.local_config and not self.treeVirtuals:
1989 temp_vartree = vartree(myroot, None,
1990 categories=self.categories, settings=self)
1991 # Reduce the provides into a list by CP.
1992 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
1994 self.virtuals = self.__getvirtuals_compile()
1995 return self.virtuals
1997 def __getvirtuals_compile(self):
1998 """Stack installed and profile virtuals. Preference for virtuals
1999 decreases from left to right.
2000 Order of preference:
2001 1. installed and in profile
2006 # Virtuals by profile+tree preferences.
2009 for virt, installed_list in self.treeVirtuals.iteritems():
2010 profile_list = self.dirVirtuals.get(virt, None)
2011 if not profile_list:
2013 for cp in installed_list:
2014 if cp in profile_list:
2015 ptVirtuals.setdefault(virt, [])
2016 ptVirtuals[virt].append(cp)
2018 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2022 def __delitem__(self,mykey):
2024 for x in self.lookuplist:
2029 def __getitem__(self,mykey):
2031 for x in self.lookuplist:
2033 writemsg("!!! lookuplist is null.\n")
2034 elif x.has_key(mykey):
2039 def has_key(self,mykey):
2040 for x in self.lookuplist:
2041 if x.has_key(mykey):
2045 def __contains__(self, mykey):
2046 """Called to implement membership test operators (in and not in)."""
2047 return bool(self.has_key(mykey))
2049 def setdefault(self, k, x=None):
2056 def get(self, k, x=None):
2063 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2065 def __setitem__(self,mykey,myvalue):
2066 "set a value; will be thrown away at reset() time"
2067 if type(myvalue) != types.StringType:
2068 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2070 self.modifiedkeys += [mykey]
2071 self.configdict["env"][mykey]=myvalue
2074 "return our locally-maintained environment"
2076 for x in self.keys():
2078 if not isinstance(myvalue, basestring):
2079 writemsg("!!! Non-string value in config: %s=%s\n" % \
2080 (x, myvalue), noiselevel=-1)
2083 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2084 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2085 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2089 def thirdpartymirrors(self):
2090 if getattr(self, "_thirdpartymirrors", None) is None:
2091 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2092 for x in self["PORTDIR_OVERLAY"].split():
2093 profileroots.insert(0, os.path.join(x, "profiles"))
2094 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2095 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2096 return self._thirdpartymirrors
2099 return flatten([[myarch, "~" + myarch] \
2100 for myarch in self["PORTAGE_ARCHLIST"].split()])
2102 def selinux_enabled(self):
2103 if getattr(self, "_selinux_enabled", None) is None:
2104 self._selinux_enabled = 0
2105 if "selinux" in self["USE"].split():
2106 if "selinux" in globals():
2107 if selinux.is_selinux_enabled() == 1:
2108 self._selinux_enabled = 1
2110 self._selinux_enabled = 0
2112 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2114 self._selinux_enabled = 0
2115 if self._selinux_enabled == 0:
2117 del sys.modules["selinux"]
2120 return self._selinux_enabled
2122 # XXX This would be to replace getstatusoutput completely.
2123 # XXX Issue: cannot block execution. Deadlock condition.
2124 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2126 Spawn a subprocess with extra portage-specific options.
2129 Sandbox: Sandbox means the spawned process will be limited in its ability t
2130 read and write files (normally this means it is restricted to ${IMAGE}/)
2131 SElinux Sandbox: Enables sandboxing on SElinux
2132 Reduced Privileges: Drops privilages such that the process runs as portage:portage
2135 Notes: os.system cannot be used because it messes with signal handling. Instead we
2136 use the portage_exec spawn* family of functions.
2138 This function waits for the process to terminate.
2140 @param mystring: Command to run
2141 @type mystring: String
2142 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2143 @type mysettings: Dictionary or config instance
2144 @param debug: Ignored
2145 @type debug: Boolean
2146 @param free: Enable sandboxing for this process
2148 @param droppriv: Drop to portage:portage when running this command
2149 @type droppriv: Boolean
2150 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2151 @type sesandbox: Boolean
2152 @param keywords: Extra options encoded as a dict, to be passed to spawn
2153 @type keywords: Dictionary
2156 1. The return code of the spawned process.
2159 if type(mysettings) == types.DictType:
2161 keywords["opt_name"]="[ %s ]" % "portage"
2163 check_config_instance(mysettings)
2164 env=mysettings.environ()
2165 keywords["opt_name"]="[%s]" % mysettings["PF"]
2167 features = mysettings.features
2168 # XXX: Negative RESTRICT word
2169 droppriv=(droppriv and ("userpriv" in features) and not \
2170 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2171 ("userpriv" in mysettings["RESTRICT"].split())))
2173 if droppriv and not uid and portage_gid and portage_uid:
2174 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2177 free=((droppriv and "usersandbox" not in features) or \
2178 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2181 keywords["opt_name"] += " bash"
2182 spawn_func = portage_exec.spawn_bash
2184 keywords["opt_name"] += " sandbox"
2185 spawn_func = portage_exec.spawn_sandbox
2188 con = selinux.getcontext()
2189 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2190 selinux.setexec(con)
2192 retval = spawn_func(mystring, env=env, **keywords)
2195 selinux.setexec(None)
2199 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2200 "fetch files. Will use digest file if available."
2202 features = mysettings.features
2203 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2204 if ("mirror" in mysettings["RESTRICT"].split()) or \
2205 ("nomirror" in mysettings["RESTRICT"].split()):
2206 if ("mirror" in features) and ("lmirror" not in features):
2207 # lmirror should allow you to bypass mirror restrictions.
2208 # XXX: This is not a good thing, and is temporary at best.
2209 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2212 thirdpartymirrors = mysettings.thirdpartymirrors()
2214 check_config_instance(mysettings)
2216 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2217 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2221 if listonly or ("distlocks" not in features):
2225 if "skiprocheck" in features:
2228 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2230 writemsg(red("!!! For fetching to a read-only filesystem, " + \
2231 "locking should be turned off.\n"), noiselevel=-1)
2232 writemsg("!!! This can be done by adding -distlocks to " + \
2233 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2236 # local mirrors are always added
2237 if custommirrors.has_key("local"):
2238 mymirrors += custommirrors["local"]
2240 if ("nomirror" in mysettings["RESTRICT"].split()) or \
2241 ("mirror" in mysettings["RESTRICT"].split()):
2242 # We don't add any mirrors.
2246 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2248 mydigests = Manifest(
2249 mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST")
2252 for x in range(len(mymirrors)-1,-1,-1):
2253 if mymirrors[x] and mymirrors[x][0]=='/':
2254 fsmirrors += [mymirrors[x]]
2257 restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2258 custom_local_mirrors = custommirrors.get("local", [])
2260 # With fetch restriction, a normal uri may only be fetched from
2261 # custom local mirrors (if available). A mirror:// uri may also
2262 # be fetched from specific mirrors (effectively overriding fetch
2263 # restriction, but only for specific mirrors).
2264 locations = custom_local_mirrors
2266 locations = mymirrors
2269 primaryuri_indexes={}
2270 for myuri in myuris:
2271 myfile=os.path.basename(myuri)
2272 if not filedict.has_key(myfile):
2274 for y in range(0,len(locations)):
2275 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2276 if myuri[:9]=="mirror://":
2277 eidx = myuri.find("/", 9)
2279 mirrorname = myuri[9:eidx]
2281 # Try user-defined mirrors first
2282 if custommirrors.has_key(mirrorname):
2283 for cmirr in custommirrors[mirrorname]:
2284 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2285 # remove the mirrors we tried from the list of official mirrors
2286 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2287 thirdpartymirrors[mirrorname].remove(cmirr)
2288 # now try the official mirrors
2289 if thirdpartymirrors.has_key(mirrorname):
2290 shuffle(thirdpartymirrors[mirrorname])
2292 for locmirr in thirdpartymirrors[mirrorname]:
2293 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2295 if not filedict[myfile]:
2296 writemsg("No known mirror by the name: %s\n" % (mirrorname))
2298 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2299 writemsg(" %s\n" % (myuri), noiselevel=-1)
2302 # Only fetch from specific mirrors is allowed.
2304 if "primaryuri" in mysettings["RESTRICT"].split():
2305 # Use the source site first.
2306 if primaryuri_indexes.has_key(myfile):
2307 primaryuri_indexes[myfile] += 1
2309 primaryuri_indexes[myfile] = 0
2310 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2312 filedict[myfile].append(myuri)
2319 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2320 if not mysettings.get(var_name, None):
2328 if "distlocks" in features:
2329 distdir_dirs.append(".locks")
2332 for x in distdir_dirs:
2333 mydir = os.path.join(mysettings["DISTDIR"], x)
2334 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2335 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2338 raise # bail out on the first error that occurs during recursion
2339 if not apply_recursive_permissions(mydir,
2340 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2341 filemode=filemode, filemask=modemask, onerror=onerror):
2342 raise portage_exception.OperationNotPermitted(
2343 "Failed to apply recursive permissions for the portage group.")
2344 except portage_exception.PortageException, e:
2345 if not os.path.isdir(mysettings["DISTDIR"]):
2346 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2347 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2348 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2351 not fetch_to_ro and \
2352 not os.access(mysettings["DISTDIR"], os.W_OK):
2353 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2357 if can_fetch and use_locks and locks_in_subdir:
2358 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2359 if not os.access(distlocks_subdir, os.W_OK):
2360 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
2363 del distlocks_subdir
2364 for myfile in filedict.keys():
2368 1 partially downloaded
2369 2 completely downloaded
2371 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2375 writemsg_stdout("\n", noiselevel=-1)
2377 if use_locks and can_fetch:
2379 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
2381 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
2384 if fsmirrors and not os.path.exists(myfile_path):
2385 for mydir in fsmirrors:
2386 mirror_file = os.path.join(mydir, myfile)
2388 shutil.copyfile(mirror_file, myfile_path)
2389 writemsg(_("Local mirror has file:" + \
2390 " %(file)s\n" % {"file":myfile}))
2392 except (IOError, OSError), e:
2393 if e.errno != errno.ENOENT:
2398 mystat = os.stat(myfile_path)
2400 if e.errno != errno.ENOENT:
2405 apply_secpass_permissions(
2406 myfile_path, gid=portage_gid, mode=0664, mask=02,
2408 except portage_exception.PortageException, e:
2409 if not os.access(myfile_path, os.R_OK):
2410 writemsg("!!! Failed to adjust permissions:" + \
2411 " %s\n" % str(e), noiselevel=-1)
2412 if myfile not in mydigests:
2413 # We don't have a digest, but the file exists. We must
2414 # assume that it is fully downloaded.
2417 if mystat.st_size < mydigests[myfile]["size"] and \
2419 fetched = 1 # Try to resume this download.
2421 verified_ok, reason = portage_checksum.verify_all(
2422 myfile_path, mydigests[myfile])
2424 writemsg("!!! Previously fetched" + \
2425 " file: '%s'\n" % myfile, noiselevel=-1)
2426 writemsg("!!! Reason: %s\n" % reason[0],
2428 writemsg(("!!! Got: %s\n" + \
2429 "!!! Expected: %s\n") % \
2430 (reason[1], reason[2]), noiselevel=-1)
2431 if can_fetch and not restrict_fetch:
2432 writemsg("Refetching...\n\n",
2434 os.unlink(myfile_path)
2436 eout = output.EOutput()
2438 mysettings.get("PORTAGE_QUIET", None) == "1"
2439 for digest_name in mydigests[myfile]:
2441 "%s %s ;-)" % (myfile, digest_name))
2443 continue # fetch any remaining files
2445 for loc in filedict[myfile]:
2447 writemsg_stdout(loc+" ", noiselevel=-1)
2449 # allow different fetchcommands per protocol
2450 protocol = loc[0:loc.find("://")]
2451 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2452 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2454 fetchcommand=mysettings["FETCHCOMMAND"]
2455 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2456 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2458 resumecommand=mysettings["RESUMECOMMAND"]
2460 fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2461 resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2466 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2469 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2471 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2472 if not mysettings.get(var_name, None):
2473 writemsg(("!!! %s is unset. It should " + \
2474 "have been defined in /etc/make.globals.\n") \
2475 % var_name, noiselevel=-1)
2481 #we either need to resume or start the download
2482 #you can't use "continue" when you're inside a "try" block
2485 writemsg(">>> Resuming download...\n")
2486 locfetch=resumecommand
2489 locfetch=fetchcommand
2490 writemsg_stdout(">>> Downloading '%s'\n" % \
2491 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2492 myfetch=locfetch.replace("${URI}",loc)
2493 myfetch=myfetch.replace("${FILE}",myfile)
2496 if "userfetch" in mysettings.features and \
2497 os.getuid() == 0 and portage_gid and portage_uid:
2498 spawn_keywords.update({
2499 "uid" : portage_uid,
2500 "gid" : portage_gid,
2501 "groups" : userpriv_groups,
2506 if mysettings.selinux_enabled():
2507 con = selinux.getcontext()
2508 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2509 selinux.setexec(con)
2511 myret = portage_exec.spawn_bash(myfetch,
2512 env=mysettings.environ(), **spawn_keywords)
2514 if mysettings.selinux_enabled():
2515 selinux.setexec(None)
2519 apply_secpass_permissions(myfile_path,
2520 gid=portage_gid, mode=0664, mask=02)
2521 except portage_exception.FileNotFound, e:
2523 except portage_exception.PortageException, e:
2524 if not os.access(myfile_path, os.R_OK):
2525 writemsg("!!! Failed to adjust permissions:" + \
2526 " %s\n" % str(e), noiselevel=-1)
2528 if mydigests!=None and mydigests.has_key(myfile):
2530 mystat = os.stat(myfile_path)
2532 if e.errno != errno.ENOENT:
2537 # no exception? file exists. let digestcheck() report
2538 # an appropriately for size or checksum errors
2539 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2540 # Fetch failed... Try the next one... Kill 404 files though.
2541 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2542 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2543 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2545 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2546 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2549 except (IOError, OSError):
2557 # File is the correct size--check the checksums for the fetched
2558 # file NOW, for those users who don't have a stable/continuous
2559 # net connection. This way we have a chance to try to download
2560 # from another mirror...
2561 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2564 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2566 writemsg("!!! Reason: "+reason[0]+"\n",
2568 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
2569 (reason[1], reason[2]), noiselevel=-1)
2570 writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2571 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2574 eout = output.EOutput()
2575 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2576 for x_key in mydigests[myfile].keys():
2577 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2585 elif mydigests!=None:
2586 writemsg("No digest file available and download failed.\n\n",
2589 if use_locks and file_lock:
2590 portage_locks.unlockfile(file_lock)
2593 writemsg_stdout("\n", noiselevel=-1)
2596 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2597 mysettings["PF"], "has fetch restriction turned on."
2598 print "!!! This probably means that this " + \
2599 "ebuild's files must be downloaded"
2600 print "!!! manually. See the comments in" + \
2601 " the ebuild for more information.\n"
2602 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2605 elif not filedict[myfile]:
2606 writemsg("Warning: No mirrors available for file" + \
2607 " '%s'\n" % (myfile), noiselevel=-1)
2609 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2614 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2616 Generates a digest file if missing. Assumes all files are available.
2617 DEPRECATED: this now only is a compability wrapper for
2618 portage_manifest.Manifest()
2619 NOTE: manifestonly and overwrite are useless with manifest2 and
2620 are therefore ignored."""
2621 if myportdb is None:
2622 writemsg("Warning: myportdb not specified to digestgen\n")
2625 global _doebuild_manifest_exempt_depend
2627 _doebuild_manifest_exempt_depend += 1
2629 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2630 for cpv, fetchlist in fetchlist_dict.iteritems():
2631 for myfile in fetchlist:
2632 distfiles_map.setdefault(myfile, []).append(cpv)
2633 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2634 fetchlist_dict=fetchlist_dict)
2635 required_hash_types = set(portage_const.MANIFEST1_HASH_FUNCTIONS)
2636 required_hash_types.update(portage_const.MANIFEST2_HASH_FUNCTIONS)
2637 required_hash_types.add("size")
2638 dist_hashes = mf.fhashdict.get("DIST", {})
2639 missing_hashes = set()
2640 for myfile in distfiles_map:
2641 myhashes = dist_hashes.get(myfile)
2643 missing_hashes.add(myfile)
2645 if required_hash_types.difference(myhashes):
2646 missing_hashes.add(myfile)
2649 for myfile in missing_hashes:
2651 os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2653 if e.errno != errno.ENOENT:
2656 missing_files.append(myfile)
2658 mytree = os.path.realpath(os.path.dirname(
2659 os.path.dirname(mysettings["O"])))
2661 for myfile in missing_files:
2662 for cpv in distfiles_map[myfile]:
2663 alluris, aalist = myportdb.getfetchlist(
2664 cpv, mytree=mytree, all=True,
2665 mysettings=mysettings)
2667 if os.path.basename(uri) == myfile:
2669 if not fetch(myuris, mysettings):
2670 writemsg(("!!! File %s doesn't exist, can't update " + \
2671 "Manifest\n") % myfile, noiselevel=-1)
2673 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2675 mf.create(requiredDistfiles=myarchives,
2676 assumeDistHashesSometimes=True,
2677 assumeDistHashesAlways=(
2678 "assume-digests" in mysettings.features))
2679 except portage_exception.FileNotFound, e:
2680 writemsg(("!!! File %s doesn't exist, can't update " + \
2681 "Manifest\n") % e, noiselevel=-1)
2683 mf.write(sign=False)
2684 if "assume-digests" not in mysettings.features:
2685 distlist = mf.fhashdict.get("DIST", {}).keys()
2688 for filename in distlist:
2689 if not os.path.exists(
2690 os.path.join(mysettings["DISTDIR"], filename)):
2691 auto_assumed.append(filename)
2693 mytree = os.path.realpath(
2694 os.path.dirname(os.path.dirname(mysettings["O"])))
2695 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2696 pkgs = myportdb.cp_list(cp, mytree=mytree)
2698 writemsg_stdout(" digest.assumed" + output.colorize("WARN",
2699 str(len(auto_assumed)).rjust(18)) + "\n")
2700 for pkg_key in pkgs:
2701 fetchlist = myportdb.getfetchlist(pkg_key,
2702 mysettings=mysettings, all=True, mytree=mytree)[1]
2703 pv = pkg_key.split("/")[1]
2704 for filename in auto_assumed:
2705 if filename in fetchlist:
2707 " digest-%s::%s\n" % (pv, filename))
2710 _doebuild_manifest_exempt_depend -= 1
2712 def digestParseFile(myfilename, mysettings=None):
2713 """(filename) -- Parses a given file for entries matching:
2714 <checksumkey> <checksum_hex_string> <filename> <filesize>
2715 Ignores lines that don't start with a valid checksum identifier
2716 and returns a dict with the filenames as keys and {checksumkey:checksum}
2718 DEPRECATED: this function is now only a compability wrapper for
2719 portage_manifest.Manifest()."""
2721 mysplit = myfilename.split(os.sep)
2722 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2723 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2724 elif mysplit[-1] == "Manifest":
2725 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2727 if mysettings is None:
2729 mysettings = config(clone=settings)
2731 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2733 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2734 """Verifies checksums. Assumes all files have been downloaded.
2735 DEPRECATED: this is now only a compability wrapper for
2736 portage_manifest.Manifest()."""
2739 pkgdir = mysettings["O"]
2740 manifest_path = os.path.join(pkgdir, "Manifest")
2741 if not os.path.exists(manifest_path):
2742 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2746 mf = Manifest(pkgdir, mysettings["DISTDIR"])
2747 eout = output.EOutput()
2748 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2750 eout.ebegin("checking ebuild checksums ;-)")
2751 mf.checkTypeHashes("EBUILD")
2753 eout.ebegin("checking auxfile checksums ;-)")
2754 mf.checkTypeHashes("AUX")
2756 eout.ebegin("checking miscfile checksums ;-)")
2757 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2760 eout.ebegin("checking %s ;-)" % f)
2761 mf.checkFileHashes(mf.findFile(f), f)
2765 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2767 except portage_exception.FileNotFound, e:
2769 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2772 except portage_exception.DigestException, e:
2774 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2775 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2776 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2777 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2778 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2780 # Make sure that all of the ebuilds are actually listed in the Manifest.
2781 for f in os.listdir(pkgdir):
2782 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2783 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2784 os.path.join(pkgdir, f), noiselevel=-1)
2786 """ epatch will just grab all the patches out of a directory, so we have to
2787 make sure there aren't any foreign files that it might grab."""
2788 filesdir = os.path.join(pkgdir, "files")
2789 for parent, dirs, files in os.walk(filesdir):
2791 if d.startswith(".") or d == "CVS":
2794 if f.startswith("."):
2796 f = os.path.join(parent, f)[len(filesdir) + 1:]
2797 file_type = mf.findFile(f)
2798 if file_type != "AUX" and not f.startswith("digest-"):
2799 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2800 os.path.join(filesdir, f), noiselevel=-1)
2804 # parse actionmap to spawn ebuild with the appropriate args
2805 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2806 if alwaysdep or "noauto" not in mysettings.features:
2807 # process dependency first
2808 if "dep" in actionmap[mydo].keys():
2809 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2812 kwargs = actionmap[mydo]["args"]
2813 mysettings["EBUILD_PHASE"] = mydo
2814 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2815 mysettings["EBUILD_PHASE"] = ""
2817 if not kwargs["droppriv"] and secpass >= 2:
2818 """ Privileged phases may have left files that need to be made
2819 writable to a less privileged user."""
2820 apply_recursive_permissions(mysettings["T"],
2821 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2822 filemode=060, filemask=0)
2824 if phase_retval == os.EX_OK:
2825 if mydo == "install":
2826 # User and group bits that match the "portage" user or group are
2827 # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2828 # necessary. The chown system call may clear S_ISUID and S_ISGID
2829 # bits, so those bits are restored if necessary.
2830 inst_uid = int(mysettings["PORTAGE_INST_UID"])
2831 inst_gid = int(mysettings["PORTAGE_INST_GID"])
2832 for parent, dirs, files in os.walk(mysettings["D"]):
2833 for fname in chain(dirs, files):
2834 fpath = os.path.join(parent, fname)
2835 mystat = os.lstat(fpath)
2836 if mystat.st_uid != portage_uid and \
2837 mystat.st_gid != portage_gid:
2841 if mystat.st_uid == portage_uid:
2843 if mystat.st_gid == portage_gid:
2845 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2846 mode=mystat.st_mode, stat_cached=mystat,
2848 mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2849 qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2851 writemsg("!!! install_qa_check failed; exiting.\n",
2857 def eapi_is_supported(eapi):
2858 return str(eapi).strip() == str(portage_const.EAPI).strip()
2860 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2862 ebuild_path = os.path.abspath(myebuild)
2863 pkg_dir = os.path.dirname(ebuild_path)
2865 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2866 cat = mysettings.configdict["pkg"]["CATEGORY"]
2868 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
2869 mypv = os.path.basename(ebuild_path)[:-7]
2870 mycpv = cat+"/"+mypv
2871 mysplit=pkgsplit(mypv,silent=0)
2873 raise portage_exception.IncorrectParameter(
2874 "Invalid ebuild path: '%s'" % myebuild)
2876 if mydo != "depend":
2877 """For performance reasons, setcpv only triggers reset when it
2878 detects a package-specific change in config. For the ebuild
2879 environment, a reset call is forced in order to ensure that the
2880 latest env.d variables are used."""
2881 mysettings.reset(use_cache=use_cache)
2882 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
2884 mysettings["EBUILD_PHASE"] = mydo
2886 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
2888 # We are disabling user-specific bashrc files.
2889 mysettings["BASH_ENV"] = INVALID_ENV_FILE
2891 if debug: # Otherwise it overrides emerge's settings.
2892 # We have no other way to set debug... debug can't be passed in
2893 # due to how it's coded... Don't overwrite this so we can use it.
2894 mysettings["PORTAGE_DEBUG"] = "1"
2896 mysettings["ROOT"] = myroot
2897 mysettings["STARTDIR"] = getcwd()
2899 mysettings["EBUILD"] = ebuild_path
2900 mysettings["O"] = pkg_dir
2901 mysettings.configdict["pkg"]["CATEGORY"] = cat
2902 mysettings["FILESDIR"] = pkg_dir+"/files"
2903 mysettings["PF"] = mypv
2905 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
2906 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2908 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
2909 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
2910 mysettings["PN"] = mysplit[0]
2911 mysettings["PV"] = mysplit[1]
2912 mysettings["PR"] = mysplit[2]
2914 if portage_util.noiselimit < 0:
2915 mysettings["PORTAGE_QUIET"] = "1"
2917 if mydo != "depend":
2918 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
2919 mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
2920 if not eapi_is_supported(eapi):
2921 # can't do anything with this.
2922 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2923 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
2924 portage_dep.use_reduce(portage_dep.paren_reduce(
2925 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
2927 if mysplit[2] == "r0":
2928 mysettings["PVR"]=mysplit[1]
2930 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
2932 if mysettings.has_key("PATH"):
2933 mysplit=mysettings["PATH"].split(":")
2936 if PORTAGE_BIN_PATH not in mysplit:
2937 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
2939 # Sandbox needs cannonical paths.
2940 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
2941 mysettings["PORTAGE_TMPDIR"])
2942 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
2943 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
2945 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
2946 # locations in order to prevent interference.
2947 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
2948 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
2949 mysettings["PKG_TMPDIR"],
2950 mysettings["CATEGORY"], mysettings["PF"])
2952 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
2953 mysettings["BUILD_PREFIX"],
2954 mysettings["CATEGORY"], mysettings["PF"])
2956 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
2957 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
2958 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
2959 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
2961 mysettings["PORTAGE_BASHRC"] = os.path.join(
2962 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
2964 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
2965 if (mydo!="depend") or not mysettings.has_key("KV"):
2966 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
2968 # Regular source tree
2969 mysettings["KV"]=mykv
2973 if (mydo!="depend") or not mysettings.has_key("KVERS"):
2975 mysettings["KVERS"]=myso[1]
2977 # Allow color.map to control colors associated with einfo, ewarn, etc...
2979 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
2980 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
2981 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
2983 def prepare_build_dirs(myroot, mysettings, cleanup):
2985 clean_dirs = [mysettings["HOME"]]
2987 # We enable cleanup when we want to make sure old cruft (such as the old
2988 # environment) doesn't interfere with the current phase.
2990 clean_dirs.append(mysettings["T"])
2992 for clean_dir in clean_dirs:
2994 shutil.rmtree(clean_dir)
2996 if errno.ENOENT == oe.errno:
2998 elif errno.EPERM == oe.errno:
2999 writemsg("%s\n" % oe, noiselevel=-1)
3000 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3001 clean_dir, noiselevel=-1)
3006 def makedirs(dir_path):
3008 os.makedirs(dir_path)
3010 if errno.EEXIST == oe.errno:
3012 elif errno.EPERM == oe.errno:
3013 writemsg("%s\n" % oe, noiselevel=-1)
3014 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3015 dir_path, noiselevel=-1)
3021 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3023 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3024 mydirs.append(os.path.dirname(mydirs[-1]))
3027 for mydir in mydirs:
3028 portage_util.ensure_dirs(mydir)
3029 portage_util.apply_secpass_permissions(mydir,
3030 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3031 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3032 """These directories don't necessarily need to be group writable.
3033 However, the setup phase is commonly run as a privileged user prior
3034 to the other phases being run by an unprivileged user. Currently,
3035 we use the portage group to ensure that the unprivleged user still
3036 has write access to these directories in any case."""
3037 portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3038 portage_util.apply_secpass_permissions(mysettings[dir_key],
3039 uid=portage_uid, gid=portage_gid)
3040 except portage_exception.PermissionDenied, e:
3041 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3043 except portage_exception.OperationNotPermitted, e:
3044 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3046 except portage_exception.FileNotFound, e:
3047 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3052 "basedir_var":"CCACHE_DIR",
3053 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3054 "always_recurse":False},
3056 "basedir_var":"CONFCACHE_DIR",
3057 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3058 "always_recurse":True},
3060 "basedir_var":"DISTCC_DIR",
3061 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3062 "subdirs":("lock", "state"),
3063 "always_recurse":True}
3068 for myfeature, kwargs in features_dirs.iteritems():
3069 if myfeature in mysettings.features:
3070 basedir = mysettings[kwargs["basedir_var"]]
3072 basedir = kwargs["default_dir"]
3073 mysettings[kwargs["basedir_var"]] = basedir
3075 mydirs = [mysettings[kwargs["basedir_var"]]]
3076 if "subdirs" in kwargs:
3077 for subdir in kwargs["subdirs"]:
3078 mydirs.append(os.path.join(basedir, subdir))
3079 for mydir in mydirs:
3080 modified = portage_util.ensure_dirs(mydir,
3081 gid=portage_gid, mode=dirmode, mask=modemask)
3082 # To avoid excessive recursive stat calls, we trigger
3083 # recursion when the top level directory does not initially
3084 # match our permission requirements.
3085 if modified or kwargs["always_recurse"]:
3087 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3090 raise # The feature is disabled if a single error
3091 # occurs during permissions adjustment.
3092 if not apply_recursive_permissions(mydir,
3093 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3094 filemode=filemode, filemask=modemask, onerror=onerror):
3095 raise portage_exception.OperationNotPermitted(
3096 "Failed to apply recursive permissions for the portage group.")
3097 except portage_exception.PortageException, e:
3098 mysettings.features.remove(myfeature)
3099 mysettings["FEATURES"] = " ".join(mysettings.features)
3100 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3101 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3102 (kwargs["basedir_var"], basedir), noiselevel=-1)
3103 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3109 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3111 parsed_mode = int(mode, 8)
3116 if parsed_mode & 07777 != parsed_mode:
3117 raise ValueError("Invalid file mode: %s" % mode)
3119 workdir_mode = parsed_mode
3121 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3122 except ValueError, e:
3124 writemsg("%s\n" % e)
3125 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3126 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3127 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3129 apply_secpass_permissions(mysettings["WORKDIR"],
3130 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3131 except portage_exception.FileNotFound:
3132 pass # ebuild.sh will create it
3134 if mysettings.get("PORT_LOGDIR", "") == "":
3135 while "PORT_LOGDIR" in mysettings:
3136 del mysettings["PORT_LOGDIR"]
3137 if "PORT_LOGDIR" in mysettings:
3139 portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3140 uid=portage_uid, gid=portage_gid, mode=02770)
3141 except portage_exception.PortageException, e:
3142 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3143 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3144 mysettings["PORT_LOGDIR"], noiselevel=-1)
3145 writemsg("!!! Disabling logging.\n", noiselevel=-1)
3146 while "PORT_LOGDIR" in mysettings:
3147 del mysettings["PORT_LOGDIR"]
3148 if "PORT_LOGDIR" in mysettings:
3149 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3150 if not os.path.exists(logid_path):
3151 f = open(logid_path, "w")
3154 logid_time = time.strftime("%Y%m%d-%H%M%S",
3155 time.gmtime(os.stat(logid_path).st_mtime))
3156 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3157 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3158 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3159 del logid_path, logid_time
3161 mysettings["PORTAGE_LOG_FILE"] = os.path.join(mysettings["T"], "build.log")
3163 _doebuild_manifest_exempt_depend = 0
3164 _doebuild_manifest_checked = None
3166 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3167 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3168 mydbapi=None, vartree=None, prev_mtimes=None):
3171 Wrapper function that invokes specific ebuild phases through the spawning
3174 @param myebuild: name of the ebuild to invoke the phase on (CPV)
3175 @type myebuild: String
3176 @param mydo: Phase to run
3178 @param myroot: $ROOT (usually '/', see man make.conf)
3179 @type myroot: String
3180 @param mysettings: Portage Configuration
3181 @type mysettings: instance of portage.config
3182 @param debug: Turns on various debug information (eg, debug for spawn)
3183 @type debug: Boolean
3184 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3185 @type listonly: Boolean
3186 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3187 @type fetchonly: Boolean
3188 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3189 @type cleanup: Boolean
3190 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3191 @type dbkey: Dict or String
3192 @param use_cache: Enables the cache
3193 @type use_cache: Boolean
3194 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3195 @type fetchall: Boolean
3196 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3198 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3199 @type mydbapi: portdbapi instance
3200 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3201 @type vartree: vartree instance
3202 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3203 @type prev_mtimes: dictionary
3209 Most errors have an accompanying error message.
3211 listonly and fetchonly are only really necessary for operations involving 'fetch'
3212 prev_mtimes are only necessary for merge operations.
3213 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3218 writemsg("Warning: tree not specified to doebuild\n")
3222 # chunked out deps for each phase, so that ebuild binary can use it
3223 # to collapse targets down.
3227 "unpack": ["setup"],
3228 "compile":["unpack"],
3229 "test": ["compile"],
3232 "package":["install"],
3236 mydbapi = db[myroot][tree].dbapi
3238 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3239 vartree = db[myroot]["vartree"]
3241 features = mysettings.features
3243 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3244 "config","setup","depend","fetch","digest",
3245 "unpack","compile","test","install","rpm","qmerge","merge",
3246 "package","unmerge", "manifest"]
3248 if mydo not in validcommands:
3249 validcommands.sort()
3250 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3252 for vcount in range(len(validcommands)):
3254 writemsg("\n!!! ", noiselevel=-1)
3255 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3256 writemsg("\n", noiselevel=-1)
3259 if not os.path.exists(myebuild):
3260 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3264 global _doebuild_manifest_exempt_depend
3266 if "strict" in features and \
3267 "digest" not in features and \
3268 tree == "porttree" and \
3269 mydo not in ("digest", "manifest", "help") and \
3270 not _doebuild_manifest_exempt_depend:
3271 # Always verify the ebuild checksums before executing it.
3272 pkgdir = os.path.dirname(myebuild)
3273 manifest_path = os.path.join(pkgdir, "Manifest")
3274 global _doebuild_manifest_checked
3275 # Avoid checking the same Manifest several times in a row during a
3276 # regen with an empty cache.
3277 if _doebuild_manifest_checked != manifest_path:
3278 if not os.path.exists(manifest_path):
3279 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3282 mf = Manifest(pkgdir, mysettings["DISTDIR"])
3284 mf.checkTypeHashes("EBUILD")
3285 except portage_exception.FileNotFound, e:
3286 writemsg("!!! A file listed in the Manifest " + \
3287 "could not be found: %s\n" % str(e), noiselevel=-1)
3289 except portage_exception.DigestException, e:
3290 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3291 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3292 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3293 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3294 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3296 # Make sure that all of the ebuilds are actually listed in the
3298 for f in os.listdir(pkgdir):
3299 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3300 writemsg("!!! A file is not listed in the " + \
3301 "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3304 _doebuild_manifest_checked = manifest_path
3307 builddir_lock = None
3309 if mydo in ("digest", "manifest", "help"):
3310 # Temporarily exempt the depend phase from manifest checks, in case
3311 # aux_get calls trigger cache generation.
3312 _doebuild_manifest_exempt_depend += 1
3314 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3317 # get possible slot information from the deps file
3318 if mydo == "depend":
3319 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3320 if isinstance(dbkey, dict):
3321 mysettings["dbkey"] = ""
3323 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3324 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3325 fd_pipes=fd_pipes, returnpid=True)
3326 os.close(pw) # belongs exclusively to the child process now
3330 mybytes.append(os.read(pr, maxbytes))
3334 mybytes = "".join(mybytes)
3336 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3338 retval = os.waitpid(mypids[0], 0)[1]
3339 portage_exec.spawned_pids.remove(mypids[0])
3340 # If it got a signal, return the signal that was sent, but
3341 # shift in order to distinguish it from a return value. (just
3342 # like portage_exec.spawn() would do).
3344 return (retval & 0xff) << 8
3345 # Otherwise, return its exit code.
3348 mysettings["dbkey"] = dbkey
3350 mysettings["dbkey"] = \
3351 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3353 return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3355 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3356 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3357 metadata = dict(izip(dep_keys, mydbapi.aux_get(mycpv, dep_keys)))
3358 class FakeTree(object):
3359 def __init__(self, mydb):
3361 dep_check_trees = {myroot:{}}
3362 dep_check_trees[myroot]["porttree"] = \
3363 FakeTree(fakedbapi(settings=mysettings))
3364 for dep_type in dep_keys:
3365 mycheck = dep_check(metadata[dep_type], None, mysettings,
3366 myuse="all", myroot=myroot, trees=dep_check_trees)
3368 writemsg("%s: %s\n%s\n" % (
3369 dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3372 if "PORTAGE_TMPDIR" not in mysettings or \
3373 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3374 writemsg("The directory specified in your " + \
3375 "PORTAGE_TMPDIR variable, '%s',\n" % \
3376 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3377 writemsg("does not exist. Please create this directory or " + \
3378 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3381 # Build directory creation isn't required for any of these.
3382 if mydo not in ("digest", "fetch", "help", "manifest"):
3383 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3386 # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3387 logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3388 if mydo == "unmerge":
3389 return unmerge(mysettings["CATEGORY"],
3390 mysettings["PF"], myroot, mysettings, vartree=vartree)
3392 # if any of these are being called, handle them -- running them out of
3393 # the sandbox -- and stop now.
3394 if mydo in ["clean","cleanrm"]:
3395 return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3396 debug=debug, free=1, logfile=None)
3397 elif mydo == "help":
3398 return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3399 debug=debug, free=1, logfile=logfile)
3400 elif mydo == "setup":
3401 infodir = os.path.join(
3402 mysettings["PORTAGE_BUILDDIR"], "build-info")
3403 if os.path.isdir(infodir):
3404 """Load USE flags for setup phase of a binary package.
3405 Ideally, the environment.bz2 would be used instead."""
3406 mysettings.load_infodir(infodir)
3407 retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3408 debug=debug, free=1, logfile=logfile)
3410 """ Privileged phases may have left files that need to be made
3411 writable to a less privileged user."""
3412 apply_recursive_permissions(mysettings["T"],
3413 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3414 filemode=060, filemask=0)
3416 elif mydo == "preinst":
3417 mysettings["IMAGE"] = mysettings["D"]
3418 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3419 mysettings, debug=debug, free=1, logfile=logfile)
3420 if phase_retval == os.EX_OK:
3421 # Post phase logic and tasks that have been factored out of
3423 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3424 "preinst_sfperms", "preinst_selinux_labels",
3425 "preinst_suid_scan"]
3426 mysettings["EBUILD_PHASE"] = ""
3427 phase_retval = spawn(" ".join(myargs),
3428 mysettings, debug=debug, free=1, logfile=logfile)
3429 if phase_retval != os.EX_OK:
3430 writemsg("!!! post preinst failed; exiting.\n",
3432 del mysettings["IMAGE"]
3434 elif mydo == "postinst":
3435 mysettings.load_infodir(mysettings["O"])
3436 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3437 mysettings, debug=debug, free=1, logfile=logfile)
3438 if phase_retval == os.EX_OK:
3439 # Post phase logic and tasks that have been factored out of
3441 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3442 mysettings["EBUILD_PHASE"] = ""
3443 phase_retval = spawn(" ".join(myargs),
3444 mysettings, debug=debug, free=1, logfile=logfile)
3445 if phase_retval != os.EX_OK:
3446 writemsg("!!! post postinst failed; exiting.\n",
3449 elif mydo in ["prerm","postrm","config"]:
3450 mysettings.load_infodir(mysettings["O"])
3451 return spawn(EBUILD_SH_BINARY + " " + mydo,
3452 mysettings, debug=debug, free=1, logfile=logfile)
3454 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3456 # Make sure we get the correct tree in case there are overlays.
3457 mytree = os.path.realpath(
3458 os.path.dirname(os.path.dirname(mysettings["O"])))
3459 newuris, alist = mydbapi.getfetchlist(
3460 mycpv, mytree=mytree, mysettings=mysettings)
3461 alluris, aalist = mydbapi.getfetchlist(
3462 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3463 mysettings["A"] = " ".join(alist)
3464 mysettings["AA"] = " ".join(aalist)
3465 if ("mirror" in features) or fetchall:
3466 fetchme = alluris[:]
3468 elif mydo == "digest":
3469 fetchme = alluris[:]
3471 # Skip files that we already have digests for.
3472 mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3473 mydigests = mf.getTypeDigests("DIST")
3474 for filename, hashes in mydigests.iteritems():
3475 if len(hashes) == len(mf.hashes):
3476 checkme = [i for i in checkme if i != filename]
3477 fetchme = [i for i in fetchme \
3478 if os.path.basename(i) != filename]
3479 del filename, hashes
3481 fetchme = newuris[:]
3484 # Only try and fetch the files if we are going to need them ...
3485 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3486 # unpack compile install`, we will try and fetch 4 times :/
3487 need_distfiles = (mydo in ("fetch", "unpack") or \
3488 mydo not in ("digest", "manifest") and "noauto" not in features)
3489 if need_distfiles and not fetch(
3490 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3493 if mydo == "fetch" and listonly:
3497 if mydo == "manifest":
3498 return not digestgen(aalist, mysettings, overwrite=1,
3499 manifestonly=1, myportdb=mydbapi)
3500 elif mydo == "digest":
3501 return not digestgen(aalist, mysettings, overwrite=1,
3503 elif "digest" in mysettings.features:
3504 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3505 except portage_exception.PermissionDenied, e:
3506 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3507 if mydo in ("digest", "manifest"):
3510 # See above comment about fetching only when needed
3511 if not digestcheck(checkme, mysettings, ("strict" in features),
3512 (mydo not in ["digest","fetch","unpack"] and \
3513 mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3514 "noauto" in features)):
3520 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3521 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3522 orig_distdir = mysettings["DISTDIR"]
3523 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3524 edpath = mysettings["DISTDIR"] = \
3525 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3526 if os.path.exists(edpath):
3528 if os.path.isdir(edpath) and not os.path.islink(edpath):
3529 shutil.rmtree(edpath)
3533 print "!!! Failed reseting ebuild distdir path, " + edpath
3536 apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3539 os.symlink(os.path.join(orig_distdir, file),
3540 os.path.join(edpath, file))
3542 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3545 #initial dep checks complete; time to process main commands
3547 nosandbox = (("userpriv" in features) and \
3548 ("usersandbox" not in features) and \
3549 ("userpriv" not in mysettings["RESTRICT"]) and \
3550 ("nouserpriv" not in mysettings["RESTRICT"]))
3551 if nosandbox and ("userpriv" not in features or \
3552 "userpriv" in mysettings["RESTRICT"] or \
3553 "nouserpriv" in mysettings["RESTRICT"]):
3554 nosandbox = ("sandbox" not in features and \
3555 "usersandbox" not in features)
3557 sesandbox = mysettings.selinux_enabled() and \
3558 "sesandbox" in mysettings.features
3559 ebuild_sh = EBUILD_SH_BINARY + " %s"
3560 misc_sh = MISC_SH_BINARY + " dyn_%s"
3562 # args are for the to spawn function
3564 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":0}},
3565 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0}},
3566 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}},
3567 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3568 "test": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3569 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}},
3570 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3571 "package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3574 # merge the deps in so we have again a 'full' actionmap
3575 # be glad when this can die.
3576 for x in actionmap.keys():
3577 if len(actionmap_deps.get(x, [])):
3578 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3580 if mydo in actionmap.keys():
3582 portage_util.ensure_dirs(
3583 os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3584 portage_util.ensure_dirs(
3585 os.path.join(mysettings["PKGDIR"], "All"))
3586 retval = spawnebuild(mydo,
3587 actionmap, mysettings, debug, logfile=logfile)
3588 elif mydo=="qmerge":
3589 # check to ensure install was run. this *only* pops up when users
3590 # forget it and are using ebuild
3591 if not os.path.exists(
3592 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3593 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3596 # qmerge is a special phase that implies noclean.
3597 if "noclean" not in mysettings.features:
3598 mysettings.features.append("noclean")
3599 #qmerge is specifically not supposed to do a runtime dep check
3601 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3602 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3603 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3604 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3606 retval = spawnebuild("install", actionmap, mysettings, debug,
3607 alwaysdep=1, logfile=logfile)
3608 if retval == os.EX_OK:
3609 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3610 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3611 "build-info"), myroot, mysettings,
3612 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3613 vartree=vartree, prev_mtimes=prev_mtimes)
3615 print "!!! Unknown mydo:",mydo
3618 if retval != os.EX_OK and tree == "porttree":
3619 for i in xrange(len(mydbapi.porttrees)-1):
3620 t = mydbapi.porttrees[i+1]
3621 if myebuild.startswith(t):
3622 # Display the non-cannonical path, in case it's different, to
3623 # prevent confusion.
3624 overlays = mysettings["PORTDIR_OVERLAY"].split()
3626 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3627 overlays[i], noiselevel=-1)
3635 portage_locks.unlockdir(builddir_lock)
3637 # Make sure that DISTDIR is restored to it's normal value before we return!
3638 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3639 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3640 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3644 if os.stat(logfile).st_size == 0:
3649 if mydo in ("digest", "manifest", "help"):
3650 # If necessary, depend phase has been triggered by aux_get calls
3651 # and the exemption is no longer needed.
3652 _doebuild_manifest_exempt_depend -= 1
3656 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3657 """moves a file from src to dest, preserving all permissions and attributes; mtime will
3658 be preserved even when moving across filesystems. Returns true on success and false on
3659 failure. Move is atomic."""
3660 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3662 if mysettings is None:
3664 mysettings = settings
3665 selinux_enabled = mysettings.selinux_enabled()
3670 except SystemExit, e:
3672 except Exception, e:
3673 print "!!! Stating source file failed... movefile()"
3679 dstat=os.lstat(dest)
3680 except (OSError, IOError):
3681 dstat=os.lstat(os.path.dirname(dest))
3685 # Check that we can actually unset schg etc flags...
3686 # Clear the flags on source and destination; we'll reinstate them after merging
3687 if destexists and dstat.st_flags != 0:
3688 if bsd_chflags.lchflags(dest, 0) < 0:
3689 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3691 # We might have an immutable flag on the parent dir; save and clear.
3692 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3694 bsd_chflags.lchflags(os.path.dirname(dest), 0)
3696 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3697 bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3698 # This is bad: we can't merge the file with these flags set.
3699 writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3704 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3708 except SystemExit, e:
3710 except Exception, e:
3713 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3715 target=os.readlink(src)
3716 if mysettings and mysettings["D"]:
3717 if target.find(mysettings["D"])==0:
3718 target=target[len(mysettings["D"]):]
3719 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3722 sid = selinux.get_lsid(src)
3723 selinux.secure_symlink(target,dest,sid)
3725 os.symlink(target,dest)
3726 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3727 return os.lstat(dest)[stat.ST_MTIME]
3728 except SystemExit, e:
3730 except Exception, e:
3731 print "!!! failed to properly create symlink:"
3732 print "!!!",dest,"->",target
3737 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3740 ret=selinux.secure_rename(src,dest)
3742 ret=os.rename(src,dest)
3744 except SystemExit, e:
3746 except Exception, e:
3747 if e[0]!=errno.EXDEV:
3748 # Some random error.
3749 print "!!! Failed to move",src,"to",dest
3752 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3755 if stat.S_ISREG(sstat[stat.ST_MODE]):
3756 try: # For safety copy then move it over.
3758 selinux.secure_copy(src,dest+"#new")
3759 selinux.secure_rename(dest+"#new",dest)
3761 shutil.copyfile(src,dest+"#new")
3762 os.rename(dest+"#new",dest)
3764 except SystemExit, e:
3766 except Exception, e:
3767 print '!!! copy',src,'->',dest,'failed.'
3771 #we don't yet handle special, so we need to fall back to /bin/mv
3773 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3775 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3777 print "!!! Failed to move special file:"
3778 print "!!! '"+src+"' to '"+dest+"'"
3780 return None # failure
3783 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3784 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3786 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3787 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3789 except SystemExit, e:
3791 except Exception, e:
3792 print "!!! Failed to chown/chmod/unlink in movefile()"
3798 os.utime(dest,(newmtime,newmtime))
3800 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3801 newmtime=sstat[stat.ST_MTIME]
3804 # Restore the flags we saved before moving
3805 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3806 writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3807 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3812 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3813 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3814 if not os.access(myroot, os.W_OK):
3815 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3818 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3820 return mylink.merge(pkgloc, infloc, myroot, myebuild,
3821 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3823 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3825 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3829 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3830 ldpath_mtimes=ldpath_mtimes)
3831 if retval == os.EX_OK:
3838 def getCPFromCPV(mycpv):
3839 """Calls pkgsplit on a cpv and returns only the cp."""
3840 return pkgsplit(mycpv)[0]
3842 def dep_virtual(mysplit, mysettings):
3843 "Does virtual dependency conversion"
3845 myvirtuals = mysettings.getvirtuals()
3847 if type(x)==types.ListType:
3848 newsplit.append(dep_virtual(x, mysettings))
3851 mychoices = myvirtuals.get(mykey, None)
3853 if len(mychoices) == 1:
3854 a = x.replace(mykey, mychoices[0])
3857 # blocker needs "and" not "or(||)".
3862 a.append(x.replace(mykey, y))
3868 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
3869 trees=None, **kwargs):
3870 """Recursively expand new-style virtuals so as to collapse one or more
3871 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
3872 zero cost regardless of whether or not they are currently installed. Virtual
3873 blockers are supported but only when the virtual expands to a single
3874 atom because it wouldn't necessarily make sense to block all the components
3875 of a compound virtual. When more than one new-style virtual is matched,
3876 the matches are sorted from highest to lowest versions and the atom is
3877 expanded to || ( highest match ... lowest match )."""
3879 # According to GLEP 37, RDEPEND is the only dependency type that is valid
3880 # for new-style virtuals. Repoman should enforce this.
3881 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
3882 def compare_pkgs(a, b):
3883 return pkgcmp(b[1], a[1])
3884 portdb = trees[myroot]["porttree"].dbapi
3885 myvirtuals = mysettings.getvirtuals()
3890 elif isinstance(x, list):
3891 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
3892 mysettings, myroot=myroot, trees=trees, **kwargs))
3894 if portage_dep._dep_check_strict and \
3895 not isvalidatom(x, allow_blockers=True):
3896 raise portage_exception.ParseError(
3897 "invalid atom: '%s'" % x)
3898 mykey = dep_getkey(x)
3899 if not mykey.startswith("virtual/"):
3902 mychoices = myvirtuals.get(mykey, [])
3903 isblocker = x.startswith("!")
3908 for cpv in portdb.match(match_atom):
3909 # only use new-style matches
3910 if cpv.startswith("virtual/"):
3911 pkgs.append((cpv, pkgsplit(cpv)))
3912 if not (pkgs or mychoices):
3913 # This one couldn't be expanded as a new-style virtual. Old-style
3914 # virtuals have already been expanded by dep_virtual, so this one
3915 # is unavailable and dep_zapdeps will identify it as such. The
3916 # atom is not eliminated here since it may still represent a
3917 # dependency that needs to be satisfied.
3920 if not pkgs and len(mychoices) == 1:
3921 newsplit.append(x.replace(mykey, mychoices[0]))
3923 pkgs.sort(compare_pkgs) # Prefer higher versions.
3929 depstring = " ".join(portdb.aux_get(y[0], dep_keys))
3931 print "Virtual Parent: ", y[0]
3932 print "Virtual Depstring:", depstring
3933 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
3934 trees=trees, **kwargs)
3936 raise portage_exception.ParseError(
3937 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
3939 virtual_atoms = [atom for atom in mycheck[1] \
3940 if not atom.startswith("!")]
3941 if len(virtual_atoms) == 1:
3942 # It wouldn't make sense to block all the components of a
3943 # compound virtual, so only a single atom block is allowed.
3944 a.append("!" + virtual_atoms[0])
3946 mycheck[1].append("="+y[0]) # pull in the new-style virtual
3947 a.append(mycheck[1])
3948 # Plain old-style virtuals. New-style virtuals are preferred.
3950 a.append(x.replace(mykey, y))
3951 if isblocker and not a:
3952 # Probably a compound virtual. Pass the atom through unprocessed.
3958 def dep_eval(deplist):
3961 if deplist[0]=="||":
3962 #or list; we just need one "1"
3963 for x in deplist[1:]:
3964 if type(x)==types.ListType:
3969 #XXX: unless there's no available atoms in the list
3970 #in which case we need to assume that everything is
3971 #okay as some ebuilds are relying on an old bug.
3972 if len(deplist) == 1:
3977 if type(x)==types.ListType:
3984 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
3985 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
3986 Returned deplist contains steps that must be taken to satisfy dependencies."""
3990 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
3991 if not reduced or unreduced == ["||"] or dep_eval(reduced):
3994 if unreduced[0] != "||":
3996 for dep, satisfied in izip(unreduced, reduced):
3997 if isinstance(dep, list):
3998 unresolved += dep_zapdeps(dep, satisfied, myroot,
3999 use_binaries=use_binaries, trees=trees)
4001 unresolved.append(dep)
4004 # We're at a ( || atom ... ) type level and need to make a choice
4005 deps = unreduced[1:]
4006 satisfieds = reduced[1:]
4008 # Our preference order is for an the first item that:
4009 # a) contains all unmasked packages with the same key as installed packages
4010 # b) contains all unmasked packages
4011 # c) contains masked installed packages
4012 # d) is the first item
4015 possible_upgrades = []
4018 # Alias the trees we'll be checking availability against
4020 if "vartree" in trees[myroot]:
4021 vardb = trees[myroot]["vartree"].dbapi
4023 mydbapi = trees[myroot]["bintree"].dbapi
4025 mydbapi = trees[myroot]["porttree"].dbapi
4027 # Sort the deps into preferred (installed) and other
4028 # with values of [[required_atom], availablility]
4029 for dep, satisfied in izip(deps, satisfieds):
4030 if isinstance(dep, list):
4031 atoms = dep_zapdeps(dep, satisfied, myroot,
4032 use_binaries=use_binaries, trees=trees)
4036 all_available = True
4038 if not mydbapi.match(atom):
4039 all_available = False
4044 preferred.append((atoms, None, all_available))
4047 """ The package names rather than the exact atoms are used for an
4048 initial rough match against installed packages. More specific
4049 preference selection is handled later via slot and version comparison."""
4050 all_installed = True
4051 for atom in set([dep_getkey(atom) for atom in atoms]):
4052 # New-style virtuals have zero cost to install.
4053 if not vardb.match(atom) and not atom.startswith("virtual/"):
4054 all_installed = False
4057 # Check if the set of atoms will result in a downgrade of
4058 # an installed package. If they will then don't prefer them
4060 has_downgrade = False
4062 if all_installed or all_available:
4064 mykey = dep_getkey(atom)
4065 avail_pkg = best(mydbapi.match(atom))
4068 avail_slot = "%s:%s" % (mykey,
4069 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4070 versions[avail_slot] = avail_pkg
4071 inst_pkg = vardb.match(avail_slot)
4074 # emerge guarantees 1 package per slot here (highest counter)
4075 inst_pkg = inst_pkg[0]
4076 if avail_pkg != inst_pkg and \
4077 avail_pkg != best([avail_pkg, inst_pkg]):
4078 has_downgrade = True
4081 this_choice = (atoms, versions, all_available)
4082 if not has_downgrade:
4084 preferred.append(this_choice)
4087 possible_upgrades.append(this_choice)
4089 other.append(this_choice)
4091 # Compare the "all_installed" choices against the "all_available" choices
4092 # for possible missed upgrades. The main purpose of this code is to find
4093 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4094 # into || ( highest version ... lowest version ). We want to prefer the
4095 # highest all_available version of the new-style virtual when there is a
4096 # lower all_installed version.
4097 for possible_upgrade in list(possible_upgrades):
4098 atoms, versions, all_available = possible_upgrade
4099 myslots = set(versions)
4100 for other_choice in preferred:
4101 o_atoms, o_versions, o_all_available = other_choice
4102 intersecting_slots = myslots.intersection(o_versions)
4103 if not intersecting_slots:
4106 has_downgrade = False
4107 for myslot in intersecting_slots:
4108 myversion = versions[myslot]
4109 o_version = o_versions[myslot]
4110 if myversion != o_version:
4111 if myversion == best([myversion, o_version]):
4114 has_downgrade = True
4116 if has_upgrade and not has_downgrade:
4117 o_index = preferred.index(other_choice)
4118 preferred.insert(o_index, possible_upgrade)
4119 possible_upgrades.remove(possible_upgrade)
4121 preferred.extend(possible_upgrades)
4123 # preferred now contains a) and c) from the order above with
4124 # the masked flag differentiating the two. other contains b)
4125 # and d) so adding other to preferred will give us a suitable
4126 # list to iterate over.
4127 preferred.extend(other)
4129 for allow_masked in (False, True):
4130 for atoms, versions, all_available in preferred:
4131 if all_available or allow_masked:
4134 assert(False) # This point should not be reachable
4137 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4143 mydep = dep_getcpv(orig_dep)
4144 myindex = orig_dep.index(mydep)
4145 prefix = orig_dep[:myindex]
4146 postfix = orig_dep[myindex+len(mydep):]
4147 return prefix + cpv_expand(
4148 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4150 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4151 use_cache=1, use_binaries=0, myroot="/", trees=None):
4152 """Takes a depend string and parses the condition."""
4153 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4154 #check_config_instance(mysettings)
4156 trees = globals()["db"]
4160 myusesplit = mysettings["USE"].split()
4163 # We've been given useflags to use.
4164 #print "USE FLAGS PASSED IN."
4166 #if "bindist" in myusesplit:
4167 # print "BINDIST is set!"
4169 # print "BINDIST NOT set."
4171 #we are being run by autouse(), don't consult USE vars yet.
4172 # WE ALSO CANNOT USE SETTINGS
4175 #convert parenthesis to sublists
4176 mysplit = portage_dep.paren_reduce(depstring)
4180 useforce.add(mysettings["ARCH"])
4182 # This masking/forcing is only for repoman. In other cases, relevant
4183 # masking/forcing should have already been applied via
4184 # config.regenerate(). Also, binary or installed packages may have
4185 # been built with flags that are now masked, and it would be
4186 # inconsistent to mask them now. Additionally, myuse may consist of
4187 # flags from a parent package that is being merged to a $ROOT that is
4188 # different from the one that mysettings represents.
4189 mymasks.update(mysettings.usemask)
4190 mymasks.update(mysettings.archlist())
4191 mymasks.discard(mysettings["ARCH"])
4192 useforce.update(mysettings.useforce)
4193 useforce.difference_update(mymasks)
4195 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4196 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4197 except portage_exception.InvalidDependString, e:
4200 # Do the || conversions
4201 mysplit=portage_dep.dep_opconvert(mysplit)
4204 #dependencies were reduced to nothing
4207 # Recursively expand new-style virtuals so as to
4208 # collapse one or more levels of indirection.
4210 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4211 use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4212 use_binaries=use_binaries, myroot=myroot, trees=trees)
4213 except portage_exception.ParseError, e:
4217 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4218 if mysplit2 is None:
4219 return [0,"Invalid token"]
4221 writemsg("\n\n\n", 1)
4222 writemsg("mysplit: %s\n" % (mysplit), 1)
4223 writemsg("mysplit2: %s\n" % (mysplit2), 1)
4225 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4226 use_binaries=use_binaries, trees=trees)
4227 mylist = flatten(myzaps)
4228 writemsg("myzaps: %s\n" % (myzaps), 1)
4229 writemsg("mylist: %s\n" % (mylist), 1)
4234 writemsg("mydict: %s\n" % (mydict), 1)
4235 return [1,mydict.keys()]
4237 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4238 "Reduces the deplist to ones and zeros"
4239 deplist=mydeplist[:]
4240 for mypos in xrange(len(deplist)):
4241 if type(deplist[mypos])==types.ListType:
4243 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4244 elif deplist[mypos]=="||":
4247 mykey = dep_getkey(deplist[mypos])
4248 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4249 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4251 elif mydbapi is None:
4252 # Assume nothing is satisfied. This forces dep_zapdeps to
4253 # return all of deps the deps that have been selected
4254 # (excluding those satisfied by package.provided).
4255 deplist[mypos] = False
4258 mydep=mydbapi.xmatch(mode,deplist[mypos])
4260 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4263 if deplist[mypos][0]=="!":
4267 #encountered invalid string
4271 def cpv_getkey(mycpv):
4272 myslash=mycpv.split("/")
4273 mysplit=pkgsplit(myslash[-1])
4276 return myslash[0]+"/"+mysplit[0]
4282 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4283 mysplit=mykey.split("/")
4284 if settings is None:
4285 settings = globals()["settings"]
4286 virts = settings.getvirtuals("/")
4287 virts_p = settings.get_virts_p("/")
4289 if mydb and type(mydb)==types.InstanceType:
4290 for x in settings.categories:
4291 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4293 if virts_p.has_key(mykey):
4294 return(virts_p[mykey][0])
4295 return "null/"+mykey
4297 if type(mydb)==types.InstanceType:
4298 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4299 return virts[mykey][0]
4302 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4303 """Given a string (packagename or virtual) expand it into a valid
4304 cat/package string. Virtuals use the mydb to determine which provided
4305 virtual is a valid choice and defaults to the first element when there
4306 are no installed/available candidates."""
4307 myslash=mycpv.split("/")
4308 mysplit=pkgsplit(myslash[-1])
4309 if settings is None:
4310 settings = globals()["settings"]
4311 virts = settings.getvirtuals("/")
4312 virts_p = settings.get_virts_p("/")
4314 # this is illegal case.
4317 elif len(myslash)==2:
4319 mykey=myslash[0]+"/"+mysplit[0]
4322 if mydb and virts and mykey in virts:
4323 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4324 if type(mydb)==types.InstanceType:
4325 if not mydb.cp_list(mykey, use_cache=use_cache):
4326 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4327 mykey_orig = mykey[:]
4328 for vkey in virts[mykey]:
4329 if mydb.cp_list(vkey,use_cache=use_cache):
4331 writemsg("virts chosen: %s\n" % (mykey), 1)
4333 if mykey == mykey_orig:
4334 mykey=virts[mykey][0]
4335 writemsg("virts defaulted: %s\n" % (mykey), 1)
4336 #we only perform virtual expansion if we are passed a dbapi
4338 #specific cpv, no category, ie. "foo-1.0"
4347 for x in settings.categories:
4348 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4349 matches.append(x+"/"+myp)
4350 if (len(matches)>1):
4351 raise ValueError, matches
4355 if not mykey and type(mydb)!=types.ListType:
4356 if virts_p.has_key(myp):
4357 mykey=virts_p[myp][0]
4358 #again, we only perform virtual expansion if we have a dbapi (not a list)
4362 if mysplit[2]=="r0":
4363 return mykey+"-"+mysplit[1]
4365 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4369 def getmaskingreason(mycpv, settings=None, portdb=None):
4370 from portage_util import grablines
4371 if settings is None:
4372 settings = globals()["settings"]
4374 portdb = globals()["portdb"]
4375 mysplit = catpkgsplit(mycpv)
4377 raise ValueError("invalid CPV: %s" % mycpv)
4378 if not portdb.cpv_exists(mycpv):
4379 raise KeyError("CPV %s does not exist" % mycpv)
4380 mycp=mysplit[0]+"/"+mysplit[1]
4382 # XXX- This is a temporary duplicate of code from the config constructor.
4383 locations = [os.path.join(settings["PORTDIR"], "profiles")]
4384 locations.extend(settings.profiles)
4385 for ov in settings["PORTDIR_OVERLAY"].split():
4386 profdir = os.path.join(normalize_path(ov), "profiles")
4387 if os.path.isdir(profdir):
4388 locations.append(profdir)
4389 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4390 USER_CONFIG_PATH.lstrip(os.path.sep)))
4392 pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4394 while pmasklists: # stack_lists doesn't preserve order so it can't be used
4395 pmasklines.extend(pmasklists.pop(0))
4398 if settings.pmaskdict.has_key(mycp):
4399 for x in settings.pmaskdict[mycp]:
4400 if mycpv in portdb.xmatch("match-all", x):
4404 for i in xrange(len(pmasklines)):
4405 l = pmasklines[i].strip()
4411 comment_valid = i + 1
4413 if comment_valid != i:
4416 elif comment_valid != -1:
4417 # Apparently this comment applies to muliple masks, so
4418 # it remains valid until a blank line is encountered.
4422 def getmaskingstatus(mycpv, settings=None, portdb=None):
4423 if settings is None:
4424 settings = globals()["settings"]
4426 portdb = globals()["portdb"]
4427 mysplit = catpkgsplit(mycpv)
4429 raise ValueError("invalid CPV: %s" % mycpv)
4430 if not portdb.cpv_exists(mycpv):
4431 raise KeyError("CPV %s does not exist" % mycpv)
4432 mycp=mysplit[0]+"/"+mysplit[1]
4437 revmaskdict=settings.prevmaskdict
4438 if revmaskdict.has_key(mycp):
4439 for x in revmaskdict[mycp]:
4444 if not match_to_list(mycpv, [myatom]):
4445 rValue.append("profile")
4448 # package.mask checking
4449 maskdict=settings.pmaskdict
4450 unmaskdict=settings.punmaskdict
4451 if maskdict.has_key(mycp):
4452 for x in maskdict[mycp]:
4453 if mycpv in portdb.xmatch("match-all", x):
4455 if unmaskdict.has_key(mycp):
4456 for z in unmaskdict[mycp]:
4457 if mycpv in portdb.xmatch("match-all",z):
4461 rValue.append("package.mask")
4465 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4467 # The "depend" phase apparently failed for some reason. An associated
4468 # error message will have already been printed to stderr.
4469 return ["corruption"]
4470 if not eapi_is_supported(eapi):
4471 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4472 mygroups = mygroups.split()
4473 pgroups = settings["ACCEPT_KEYWORDS"].split()
4474 myarch = settings["ARCH"]
4475 if pgroups and myarch not in pgroups:
4476 """For operating systems other than Linux, ARCH is not necessarily a
4478 myarch = pgroups[0].lstrip("~")
4479 pkgdict = settings.pkeywordsdict
4481 cp = dep_getkey(mycpv)
4482 if pkgdict.has_key(cp):
4483 matches = match_to_list(mycpv, pkgdict[cp].keys())
4484 for match in matches:
4485 pgroups.extend(pkgdict[cp][match])
4489 if x != "-*" and x.startswith("-"):
4491 inc_pgroups.remove(x[1:])
4494 if x not in inc_pgroups:
4495 inc_pgroups.append(x)
4496 pgroups = inc_pgroups
4501 for keyword in pgroups:
4502 if keyword in mygroups:
4511 elif gp=="-"+myarch:
4514 elif gp=="~"+myarch:
4519 rValue.append(kmask+" keyword")
4523 def __init__(self, root="/", virtual=None, clone=None, settings=None):
4526 self.root=clone.root
4527 self.portroot=clone.portroot
4528 self.pkglines=clone.pkglines
4531 if settings is None:
4532 settings = globals()["settings"]
4533 self.settings = settings
4534 self.portroot=settings["PORTDIR"]
4535 self.virtual=virtual
4536 self.dbapi = portdbapi(
4537 settings["PORTDIR"], mysettings=settings)
4539 def dep_bestmatch(self,mydep):
4540 "compatibility method"
4541 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4546 def dep_match(self,mydep):
4547 "compatibility method"
4548 mymatch=self.dbapi.xmatch("match-visible",mydep)
4553 def exists_specific(self,cpv):
4554 return self.dbapi.cpv_exists(cpv)
4556 def getallnodes(self):
4557 """new behavior: these are all *unmasked* nodes. There may or may not be available
4558 masked package for nodes in this nodes list."""
4559 return self.dbapi.cp_all()
4561 def getname(self,pkgname):
4562 "returns file location for this particular package (DEPRECATED)"
4565 mysplit=pkgname.split("/")
4566 psplit=pkgsplit(mysplit[1])
4567 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4569 def resolve_specific(self,myspec):
4570 cps=catpkgsplit(myspec)
4573 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4574 settings=self.settings)
4575 mykey=mykey+"-"+cps[2]
4577 mykey=mykey+"-"+cps[3]
4580 def depcheck(self,mycheck,use="yes",myusesplit=None):
4581 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4583 def getslot(self,mycatpkg):
4584 "Get a slot for a catpkg; assume it exists."
4587 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4588 except SystemExit, e:
4590 except Exception, e:
4599 def close_caches(self):
4602 def cp_list(self,cp,use_cache=1):
4607 for cp in self.cp_all():
4608 cpv_list.extend(self.cp_list(cp))
4611 def aux_get(self,mycpv,mylist):
4612 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4613 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4614 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4615 raise NotImplementedError
4617 def match(self,origdep,use_cache=1):
4618 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4619 mykey=dep_getkey(mydep)
4620 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4621 myslot = portage_dep.dep_getslot(mydep)
4622 if myslot is not None:
4623 mylist = [cpv for cpv in mylist \
4624 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4627 def match2(self,mydep,mykey,mylist):
4628 writemsg("DEPRECATED: dbapi.match2\n")
4629 match_from_list(mydep,mylist)
4631 def invalidentry(self, mypath):
4632 if re.search("portage_lockfile$",mypath):
4633 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4634 writemsg("Lockfile removed: %s\n" % mypath, 1)
4635 portage_locks.unlockfile((mypath,None,None))
4637 # Nothing we can do about it. We're probably sandboxed.
4639 elif re.search(".*/-MERGING-(.*)",mypath):
4640 if os.path.exists(mypath):
4641 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4643 writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4647 class fakedbapi(dbapi):
4648 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4649 def __init__(self, settings=None):
4652 if settings is None:
4653 settings = globals()["settings"]
4654 self.settings = settings
4655 self._match_cache = {}
4657 def _clear_cache(self):
4658 if self._match_cache:
4659 self._match_cache = {}
4661 def match(self, origdep, use_cache=1):
4662 result = self._match_cache.get(origdep, None)
4663 if result is not None:
4665 result = dbapi.match(self, origdep, use_cache=use_cache)
4666 self._match_cache[origdep] = result
4669 def cpv_exists(self,mycpv):
4670 return self.cpvdict.has_key(mycpv)
4672 def cp_list(self,mycp,use_cache=1):
4673 if not self.cpdict.has_key(mycp):
4676 return self.cpdict[mycp]
4680 for x in self.cpdict.keys():
4681 returnme.extend(self.cpdict[x])
4685 return self.cpvdict.keys()
4687 def cpv_inject(self, mycpv, metadata=None):
4688 """Adds a cpv from the list of available packages."""
4690 mycp=cpv_getkey(mycpv)
4691 self.cpvdict[mycpv] = metadata
4694 myslot = metadata.get("SLOT", None)
4695 if myslot and mycp in self.cpdict:
4696 # If necessary, remove another package in the same SLOT.
4697 for cpv in self.cpdict[mycp]:
4699 other_metadata = self.cpvdict[cpv]
4701 if myslot == other_metadata.get("SLOT", None):
4702 self.cpv_remove(cpv)
4704 if mycp not in self.cpdict:
4705 self.cpdict[mycp] = []
4706 if not mycpv in self.cpdict[mycp]:
4707 self.cpdict[mycp].append(mycpv)
4709 def cpv_remove(self,mycpv):
4710 """Removes a cpv from the list of available packages."""
4712 mycp=cpv_getkey(mycpv)
4713 if self.cpvdict.has_key(mycpv):
4714 del self.cpvdict[mycpv]
4715 if not self.cpdict.has_key(mycp):
4717 while mycpv in self.cpdict[mycp]:
4718 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4719 if not len(self.cpdict[mycp]):
4720 del self.cpdict[mycp]
4722 def aux_get(self, mycpv, wants):
4723 if not self.cpv_exists(mycpv):
4724 raise KeyError(mycpv)
4725 metadata = self.cpvdict[mycpv]
4727 return ["" for x in wants]
4728 return [metadata.get(x, "") for x in wants]
4730 def aux_update(self, cpv, values):
4732 self.cpvdict[cpv].update(values)
4734 class bindbapi(fakedbapi):
4735 def __init__(self, mybintree=None, settings=None):
4736 self.bintree = mybintree
4739 if settings is None:
4740 settings = globals()["settings"]
4741 self.settings = settings
4742 self._match_cache = {}
4743 # Selectively cache metadata in order to optimize dep matching.
4744 self._aux_cache_keys = set(["SLOT"])
4745 self._aux_cache = {}
4747 def match(self, *pargs, **kwargs):
4748 if self.bintree and not self.bintree.populated:
4749 self.bintree.populate()
4750 return fakedbapi.match(self, *pargs, **kwargs)
4752 def aux_get(self,mycpv,wants):
4753 if self.bintree and not self.bintree.populated:
4754 self.bintree.populate()
4756 if not set(wants).difference(self._aux_cache_keys):
4757 aux_cache = self._aux_cache.get(mycpv)
4758 if aux_cache is not None:
4759 return [aux_cache[x] for x in wants]
4761 mysplit = mycpv.split("/")
4763 tbz2name = mysplit[1]+".tbz2"
4764 if self.bintree and not self.bintree.isremote(mycpv):
4765 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4766 getitem = tbz2.getfile
4768 getitem = self.bintree.remotepkgs[tbz2name].get
4772 mykeys = self._aux_cache_keys.union(wants)
4775 # myval is None if the key doesn't exist
4776 # or the tbz2 is corrupt.
4778 mydata[x] = " ".join(myval.split())
4779 if "EAPI" in mykeys:
4780 if not mydata.setdefault("EAPI", "0"):
4781 mydata["EAPI"] = "0"
4784 for x in self._aux_cache_keys:
4785 aux_cache[x] = mydata.get(x, "")
4786 self._aux_cache[mycpv] = aux_cache
4787 return [mydata.get(x, "") for x in wants]
4789 def aux_update(self, cpv, values):
4790 if not self.bintree.populated:
4791 self.bintree.populate()
4792 tbz2path = self.bintree.getname(cpv)
4793 if not os.path.exists(tbz2path):
4795 mytbz2 = xpak.tbz2(tbz2path)
4796 mydata = mytbz2.get_data()
4797 mydata.update(values)
4798 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4800 def cp_list(self, *pargs, **kwargs):
4801 if not self.bintree.populated:
4802 self.bintree.populate()
4803 return fakedbapi.cp_list(self, *pargs, **kwargs)
4806 if not self.bintree.populated:
4807 self.bintree.populate()
4808 return fakedbapi.cpv_all(self)
4810 class vardbapi(dbapi):
4811 def __init__(self, root, categories=None, settings=None, vartree=None):
4813 #cache for category directory mtimes
4814 self.mtdircache = {}
4815 #cache for dependency checks
4816 self.matchcache = {}
4817 #cache for cp_list results
4819 self.blockers = None
4820 if settings is None:
4821 settings = globals()["settings"]
4822 self.settings = settings
4823 if categories is None:
4824 categories = settings.categories
4825 self.categories = categories[:]
4827 vartree = globals()["db"][root]["vartree"]
4828 self.vartree = vartree
4829 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
4830 "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
4831 self._aux_cache = None
4832 self._aux_cache_version = "1"
4833 self._aux_cache_filename = os.path.join(self.root,
4834 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
4836 def cpv_exists(self,mykey):
4837 "Tells us whether an actual ebuild exists on disk (no masking)"
4838 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4840 def cpv_counter(self,mycpv):
4841 "This method will grab the COUNTER. Returns a counter value."
4843 return long(self.aux_get(mycpv, ["COUNTER"])[0])
4844 except KeyError, ValueError:
4846 cdir=self.root+VDB_PATH+"/"+mycpv
4847 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4849 # We write our new counter value to a new file that gets moved into
4850 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4852 if os.path.exists(cpath):
4853 cfile=open(cpath, "r")
4855 counter=long(cfile.readline())
4857 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4861 elif os.path.exists(cdir):
4862 mys = pkgsplit(mycpv)
4863 myl = self.match(mys[0],use_cache=0)
4867 # Only one package... Counter doesn't matter.
4868 write_atomic(cpath, "1")
4870 except SystemExit, e:
4872 except Exception, e:
4873 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
4875 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
4877 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
4878 writemsg("!!! %s\n" % e, noiselevel=-1)
4881 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
4883 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
4885 writemsg("!!! remerge the package.\n", noiselevel=-1)
4890 # update new global counter file
4891 write_atomic(cpath, str(counter))
4894 def cpv_inject(self,mycpv):
4895 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
4896 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
4897 counter = self.counter_tick(self.root, mycpv=mycpv)
4898 # write local package counter so that emerge clean does the right thing
4899 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
4901 def isInjected(self,mycpv):
4902 if self.cpv_exists(mycpv):
4903 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
4905 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
4909 def move_ent(self,mylist):
4914 for cp in [origcp,newcp]:
4915 if not (isvalidatom(cp) and isjustname(cp)):
4916 raise portage_exception.InvalidPackageName(cp)
4917 origmatches=self.match(origcp,use_cache=0)
4920 for mycpv in origmatches:
4921 mycpsplit=catpkgsplit(mycpv)
4922 mynewcpv=newcp+"-"+mycpsplit[2]
4923 mynewcat=newcp.split("/")[0]
4924 if mycpsplit[3]!="r0":
4925 mynewcpv += "-"+mycpsplit[3]
4926 mycpsplit_new = catpkgsplit(mynewcpv)
4927 origpath=self.root+VDB_PATH+"/"+mycpv
4928 if not os.path.exists(origpath):
4930 writemsg_stdout("@")
4931 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
4932 #create the directory
4933 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
4934 newpath=self.root+VDB_PATH+"/"+mynewcpv
4935 if os.path.exists(newpath):
4936 #dest already exists; keep this puppy where it is.
4938 os.rename(origpath, newpath)
4940 # We need to rename the ebuild now.
4941 old_pf = catsplit(mycpv)[1]
4942 new_pf = catsplit(mynewcpv)[1]
4943 if new_pf != old_pf:
4945 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
4946 os.path.join(newpath, new_pf + ".ebuild"))
4948 if e.errno != errno.ENOENT:
4951 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
4953 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
4954 fixdbentries([mylist], newpath)
4956 def update_ents(self, update_iter):
4957 """Run fixdbentries on all installed packages (time consuming). Like
4958 fixpackages, this should be run from a helper script and display
4959 a progress indicator."""
4960 dbdir = os.path.join(self.root, VDB_PATH)
4961 for catdir in listdir(dbdir):
4962 catdir = dbdir+"/"+catdir
4963 if os.path.isdir(catdir):
4964 for pkgdir in listdir(catdir):
4965 pkgdir = catdir+"/"+pkgdir
4966 if os.path.isdir(pkgdir):
4967 fixdbentries(update_iter, pkgdir)
4969 def move_slot_ent(self,mylist):
4974 if not isvalidatom(pkg):
4975 raise portage_exception.InvalidAtom(pkg)
4977 origmatches=self.match(pkg,use_cache=0)
4981 for mycpv in origmatches:
4982 origpath=self.root+VDB_PATH+"/"+mycpv
4983 if not os.path.exists(origpath):
4986 slot=grabfile(origpath+"/SLOT");
4990 if (slot[0]!=origslot):
4993 writemsg_stdout("s")
4994 write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
4996 def cp_list(self,mycp,use_cache=1):
4997 mysplit=mycp.split("/")
4998 if mysplit[0] == '*':
4999 mysplit[0] = mysplit[0][1:]
5001 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5004 if use_cache and self.cpcache.has_key(mycp):
5005 cpc=self.cpcache[mycp]
5008 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5014 if x.startswith("."):
5017 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5021 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5023 if len(mysplit) > 1:
5024 if ps[0]==mysplit[1]:
5025 returnme.append(mysplit[0]+"/"+x)
5027 self.cpcache[mycp]=[mystat,returnme]
5028 elif self.cpcache.has_key(mycp):
5029 del self.cpcache[mycp]
5032 def cpv_all(self,use_cache=1):
5034 basepath = self.root+VDB_PATH+"/"
5036 for x in self.categories:
5037 for y in listdir(basepath+x,EmptyOnError=1):
5038 if y.startswith("."):
5041 # -MERGING- should never be a cpv, nor should files.
5042 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5043 returnme += [subpath]
5046 def cp_all(self,use_cache=1):
5047 mylist = self.cpv_all(use_cache=use_cache)
5052 mysplit=catpkgsplit(y)
5054 self.invalidentry(self.root+VDB_PATH+"/"+y)
5056 d[mysplit[0]+"/"+mysplit[1]] = None
5059 def checkblockers(self,origdep):
5062 def match(self,origdep,use_cache=1):
5063 "caching match function"
5065 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5066 mykey=dep_getkey(mydep)
5067 mycat=mykey.split("/")[0]
5069 if self.matchcache.has_key(mycat):
5070 del self.mtdircache[mycat]
5071 del self.matchcache[mycat]
5072 mymatch = match_from_list(mydep,
5073 self.cp_list(mykey, use_cache=use_cache))
5074 myslot = portage_dep.dep_getslot(mydep)
5075 if myslot is not None:
5076 mymatch = [cpv for cpv in mymatch \
5077 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5080 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5081 except (IOError, OSError):
5084 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5086 self.mtdircache[mycat]=curmtime
5087 self.matchcache[mycat]={}
5088 if not self.matchcache[mycat].has_key(mydep):
5089 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5090 myslot = portage_dep.dep_getslot(mydep)
5091 if myslot is not None:
5092 mymatch = [cpv for cpv in mymatch \
5093 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5094 self.matchcache[mycat][mydep]=mymatch
5095 return self.matchcache[mycat][mydep][:]
5097 def findname(self, mycpv):
5098 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5100 def flush_cache(self):
5101 """If the current user has permission and the internal aux_get cache has
5102 been updated, save it to disk and mark it unmodified. This is called
5103 by emerge after it has loaded the full vdb for use in dependency
5104 calculations. Currently, the cache is only written if the user has
5105 superuser privileges (since that's required to obtain a lock), but all
5106 users have read access and benefit from faster metadata lookups (as
5107 long as at least part of the cache is still valid)."""
5108 if self._aux_cache is not None and \
5109 self._aux_cache["modified"] and \
5111 valid_nodes = set(self.cpv_all())
5112 for cpv in self._aux_cache["packages"].keys():
5113 if cpv not in valid_nodes:
5114 del self._aux_cache["packages"][cpv]
5115 del self._aux_cache["modified"]
5117 f = atomic_ofstream(self._aux_cache_filename)
5118 cPickle.dump(self._aux_cache, f, -1)
5120 portage_util.apply_secpass_permissions(
5121 self._aux_cache_filename, gid=portage_gid, mode=0644)
5122 except (IOError, OSError), e:
5124 self._aux_cache["modified"] = False
5126 def aux_get(self, mycpv, wants):
5127 """This automatically caches selected keys that are frequently needed
5128 by emerge for dependency calculations. The cached metadata is
5129 considered valid if the mtime of the package directory has not changed
5130 since the data was cached. The cache is stored in a pickled dict
5131 object with the following format:
5133 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5135 If an error occurs while loading the cache pickle or the version is
5136 unrecognized, the cache will simple be recreated from scratch (it is
5137 completely disposable).
5139 if not self._aux_cache_keys.intersection(wants):
5140 return self._aux_get(mycpv, wants)
5141 if self._aux_cache is None:
5143 f = open(self._aux_cache_filename)
5144 mypickle = cPickle.Unpickler(f)
5145 mypickle.find_global = None
5146 self._aux_cache = mypickle.load()
5149 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5151 if not self._aux_cache or \
5152 not isinstance(self._aux_cache, dict) or \
5153 self._aux_cache.get("version") != self._aux_cache_version or \
5154 not self._aux_cache.get("packages"):
5155 self._aux_cache = {"version":self._aux_cache_version}
5156 self._aux_cache["packages"] = {}
5157 self._aux_cache["modified"] = False
5158 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5161 mydir_stat = os.stat(mydir)
5163 if e.errno != errno.ENOENT:
5165 raise KeyError(mycpv)
5166 mydir_mtime = long(mydir_stat.st_mtime)
5167 pkg_data = self._aux_cache["packages"].get(mycpv)
5171 cache_mtime, metadata = pkg_data
5172 cache_valid = cache_mtime == mydir_mtime
5173 if cache_valid and set(metadata) != self._aux_cache_keys:
5174 # Allow self._aux_cache_keys to change without a cache version
5178 mydata.update(metadata)
5179 pull_me = set(wants).difference(self._aux_cache_keys)
5181 pull_me = self._aux_cache_keys.union(wants)
5183 # pull any needed data and cache it
5184 aux_keys = list(pull_me)
5185 for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5189 for aux_key in self._aux_cache_keys:
5190 cache_data[aux_key] = mydata[aux_key]
5191 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5192 self._aux_cache["modified"] = True
5193 return [mydata[x] for x in wants]
5195 def _aux_get(self, mycpv, wants):
5196 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5197 if not os.path.isdir(mydir):
5198 raise KeyError(mycpv)
5202 myf = open(os.path.join(mydir, x), "r")
5207 myd = " ".join(myd.split())
5210 if x == "EAPI" and not myd:
5216 def aux_update(self, cpv, values):
5217 cat, pkg = cpv.split("/")
5218 mylink = dblink(cat, pkg, self.root, self.settings,
5219 treetype="vartree", vartree=self.vartree)
5220 if not mylink.exists():
5222 for k, v in values.iteritems():
5223 mylink.setfile(k, v)
5225 def counter_tick(self,myroot,mycpv=None):
5226 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5228 def get_counter_tick_core(self,myroot,mycpv=None):
5229 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5231 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5232 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
5233 cpath=myroot+"var/cache/edb/counter"
5237 mysplit = pkgsplit(mycpv)
5238 for x in self.match(mysplit[0],use_cache=0):
5242 old_counter = long(self.aux_get(x,["COUNTER"])[0])
5243 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5244 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5246 writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5247 if old_counter > min_counter:
5248 min_counter = old_counter
5250 # We write our new counter value to a new file that gets moved into
5251 # place to avoid filesystem corruption.
5252 find_counter = ("find '%s' -type f -name COUNTER | " + \
5253 "while read f; do echo $(<\"${f}\"); done | " + \
5254 "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5255 if os.path.exists(cpath):
5256 cfile=open(cpath, "r")
5258 counter=long(cfile.readline())
5259 except (ValueError,OverflowError):
5261 counter = long(commands.getoutput(find_counter).strip())
5262 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5265 except (ValueError,OverflowError):
5266 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5268 writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5270 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5275 counter = long(commands.getoutput(find_counter).strip())
5276 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5278 except ValueError: # Value Error for long(), probably others for commands.getoutput
5279 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5283 if counter < min_counter:
5284 counter = min_counter+1000
5287 if incrementing or changed:
5291 # update new global counter file
5292 write_atomic(cpath, str(counter))
5295 class vartree(object):
5296 "this tree will scan a var/db/pkg database located at root (passed to init)"
5297 def __init__(self, root="/", virtual=None, clone=None, categories=None,
5300 self.root = clone.root[:]
5301 self.dbapi = copy.deepcopy(clone.dbapi)
5303 self.settings = config(clone=clone.settings)
5306 if settings is None:
5307 settings = globals()["settings"]
5308 self.settings = settings # for key_expand calls
5309 if categories is None:
5310 categories = settings.categories
5311 self.dbapi = vardbapi(self.root, categories=categories,
5312 settings=settings, vartree=self)
5315 def zap(self,mycpv):
5318 def inject(self,mycpv):
5321 def get_provide(self,mycpv):
5325 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5327 myuse = myuse.split()
5328 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5329 for myprovide in mylines:
5330 mys = catpkgsplit(myprovide)
5332 mys = myprovide.split("/")
5333 myprovides += [mys[0] + "/" + mys[1]]
5335 except SystemExit, e:
5337 except Exception, e:
5338 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5339 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5342 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5344 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5347 def get_all_provides(self):
5349 for node in self.getallcpv():
5350 for mykey in self.get_provide(node):
5351 if myprovides.has_key(mykey):
5352 myprovides[mykey] += [node]
5354 myprovides[mykey] = [node]
5357 def dep_bestmatch(self,mydep,use_cache=1):
5358 "compatibility method -- all matches, not just visible ones"
5359 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5360 mymatch = best(self.dbapi.match(
5361 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5362 use_cache=use_cache))
5368 def dep_match(self,mydep,use_cache=1):
5369 "compatibility method -- we want to see all matches, not just visible ones"
5370 #mymatch=match(mydep,self.dbapi)
5371 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5377 def exists_specific(self,cpv):
5378 return self.dbapi.cpv_exists(cpv)
5380 def getallcpv(self):
5381 """temporary function, probably to be renamed --- Gets a list of all
5382 category/package-versions installed on the system."""
5383 return self.dbapi.cpv_all()
5385 def getallnodes(self):
5386 """new behavior: these are all *unmasked* nodes. There may or may not be available
5387 masked package for nodes in this nodes list."""
5388 return self.dbapi.cp_all()
5390 def exists_specific_cat(self,cpv,use_cache=1):
5391 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5392 settings=self.settings)
5396 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5400 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5406 def getebuildpath(self,fullpackage):
5407 cat,package=fullpackage.split("/")
5408 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5410 def getnode(self,mykey,use_cache=1):
5411 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5412 settings=self.settings)
5415 mysplit=mykey.split("/")
5416 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5419 mypsplit=pkgsplit(x)
5421 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5423 if mypsplit[0]==mysplit[1]:
5424 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5425 returnme.append(appendme)
5429 def getslot(self,mycatpkg):
5430 "Get a slot for a catpkg; assume it exists."
5432 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5436 def hasnode(self,mykey,use_cache):
5437 """Does the particular node (cat/pkg key) exist?"""
5438 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5439 settings=self.settings)
5440 mysplit=mykey.split("/")
5441 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5443 mypsplit=pkgsplit(x)
5445 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5447 if mypsplit[0]==mysplit[1]:
5455 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
5456 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
5457 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
5458 'PDEPEND', 'PROVIDE', 'EAPI',
5459 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5460 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5462 auxdbkeylen=len(auxdbkeys)
5464 def close_portdbapi_caches():
5465 for i in portdbapi.portdbapi_instances:
5469 class portdbapi(dbapi):
5470 """this tree will scan a portage directory located at root (passed to init)"""
5471 portdbapi_instances = []
5473 def __init__(self,porttree_root,mysettings=None):
5474 portdbapi.portdbapi_instances.append(self)
5477 self.mysettings = mysettings
5480 self.mysettings = config(clone=settings)
5482 # This is strictly for use in aux_get() doebuild calls when metadata
5483 # is generated by the depend phase. It's safest to use a clone for
5484 # this purpose because doebuild makes many changes to the config
5485 # instance that is passed in.
5486 self.doebuild_settings = config(clone=self.mysettings)
5488 self.manifestVerifyLevel = None
5489 self.manifestVerifier = None
5490 self.manifestCache = {} # {location: [stat, md5]}
5491 self.manifestMissingCache = []
5493 if "gpg" in self.mysettings.features:
5494 self.manifestVerifyLevel = portage_gpg.EXISTS
5495 if "strict" in self.mysettings.features:
5496 self.manifestVerifyLevel = portage_gpg.MARGINAL
5497 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5498 elif "severe" in self.mysettings.features:
5499 self.manifestVerifyLevel = portage_gpg.TRUSTED
5500 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5502 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5504 #self.root=settings["PORTDIR"]
5505 self.porttree_root = os.path.realpath(porttree_root)
5507 self.depcachedir = self.mysettings.depcachedir[:]
5509 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5510 if self.tmpfs and not os.path.exists(self.tmpfs):
5512 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5514 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5517 self.eclassdb = eclass_cache.cache(self.porttree_root,
5518 overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5520 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5522 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5526 self.porttrees = [self.porttree_root] + \
5527 [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5528 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5530 self._init_cache_dirs()
5531 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5533 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5535 from cache import metadata_overlay, volatile
5536 for x in self.porttrees:
5537 db_ro = self.auxdbmodule(self.depcachedir, x,
5538 filtered_auxdbkeys, gid=portage_gid, readonly=True)
5539 self.auxdb[x] = metadata_overlay.database(
5540 self.depcachedir, x, filtered_auxdbkeys,
5541 gid=portage_gid, db_rw=volatile.database,
5544 for x in self.porttrees:
5545 # location, label, auxdbkeys
5546 self.auxdb[x] = self.auxdbmodule(
5547 self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5548 # Selectively cache metadata in order to optimize dep matching.
5549 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5550 self._aux_cache = {}
5552 def _init_cache_dirs(self):
5553 """Create /var/cache/edb/dep and adjust permissions for the portage
5561 for mydir in (self.depcachedir,):
5562 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5563 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5566 raise # bail out on the first error that occurs during recursion
5567 if not apply_recursive_permissions(mydir,
5568 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5569 filemode=filemode, filemask=modemask, onerror=onerror):
5570 raise portage_exception.OperationNotPermitted(
5571 "Failed to apply recursive permissions for the portage group.")
5572 except portage_exception.PortageException, e:
5575 def close_caches(self):
5576 for x in self.auxdb.keys():
5577 self.auxdb[x].sync()
5580 def flush_cache(self):
5581 for x in self.auxdb.values():
5584 def finddigest(self,mycpv):
5586 mydig = self.findname2(mycpv)[0]
5589 mydigs = mydig.split("/")[:-1]
5590 mydig = "/".join(mydigs)
5591 mysplit = mycpv.split("/")
5594 return mydig+"/files/digest-"+mysplit[-1]
5596 def findname(self,mycpv):
5597 return self.findname2(mycpv)[0]
5599 def findname2(self, mycpv, mytree=None):
5601 Returns the location of the CPV, and what overlay it was in.
5602 Searches overlays first, then PORTDIR; this allows us to return the first
5603 matching file. As opposed to starting in portdir and then doing overlays
5604 second, we would have to exhaustively search the overlays until we found
5609 mysplit=mycpv.split("/")
5610 psplit=pkgsplit(mysplit[1])
5615 mytrees = self.porttrees[:]
5619 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5620 if os.access(file, os.R_OK):
5624 def aux_get(self, mycpv, mylist, mytree=None):
5625 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5626 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5627 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5629 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5630 aux_cache = self._aux_cache.get(mycpv)
5631 if aux_cache is not None:
5632 return [aux_cache[x] for x in mylist]
5634 global auxdbkeys,auxdbkeylen
5635 cat,pkg = mycpv.split("/", 1)
5637 myebuild, mylocation = self.findname2(mycpv, mytree)
5640 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5642 writemsg("!!! %s\n" % myebuild, noiselevel=1)
5643 raise KeyError(mycpv)
5645 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5646 if "gpg" in self.mysettings.features:
5648 mys = portage_gpg.fileStats(myManifestPath)
5649 if (myManifestPath in self.manifestCache) and \
5650 (self.manifestCache[myManifestPath] == mys):
5652 elif self.manifestVerifier:
5653 if not self.manifestVerifier.verify(myManifestPath):
5654 # Verification failed the desired level.
5655 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5657 if ("severe" in self.mysettings.features) and \
5658 (mys != portage_gpg.fileStats(myManifestPath)):
5659 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5661 except portage_exception.InvalidSignature, e:
5662 if ("strict" in self.mysettings.features) or \
5663 ("severe" in self.mysettings.features):
5665 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5666 except portage_exception.MissingSignature, e:
5667 if ("severe" in self.mysettings.features):
5669 if ("strict" in self.mysettings.features):
5670 if myManifestPath not in self.manifestMissingCache:
5671 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5672 self.manifestMissingCache.insert(0,myManifestPath)
5673 except (OSError,portage_exception.FileNotFound), e:
5674 if ("strict" in self.mysettings.features) or \
5675 ("severe" in self.mysettings.features):
5676 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5677 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5681 if os.access(myebuild, os.R_OK):
5682 emtime=os.stat(myebuild)[stat.ST_MTIME]
5684 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5686 writemsg("!!! %s\n" % myebuild,
5691 mydata = self.auxdb[mylocation][mycpv]
5692 if emtime != long(mydata.get("_mtime_", 0)):
5694 elif len(mydata.get("_eclasses_", [])) > 0:
5695 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5703 try: del self.auxdb[mylocation][mycpv]
5704 except KeyError: pass
5706 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5709 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5710 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5712 self.doebuild_settings.reset()
5714 myret = doebuild(myebuild, "depend",
5715 self.doebuild_settings["ROOT"], self.doebuild_settings,
5716 dbkey=mydata, tree="porttree", mydbapi=self)
5717 if myret != os.EX_OK:
5718 raise KeyError(mycpv)
5720 if "EAPI" not in mydata or not mydata["EAPI"].strip():
5721 mydata["EAPI"] = "0"
5723 if not eapi_is_supported(mydata["EAPI"]):
5724 # if newer version, wipe everything and negate eapi
5725 eapi = mydata["EAPI"]
5727 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5728 mydata["EAPI"] = "-"+eapi
5730 if mydata.get("INHERITED", False):
5731 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5733 mydata["_eclasses_"] = {}
5735 del mydata["INHERITED"]
5737 mydata["_mtime_"] = emtime
5739 self.auxdb[mylocation][mycpv] = mydata
5741 if not mydata.setdefault("EAPI", "0"):
5742 mydata["EAPI"] = "0"
5744 #finally, we look at our internal cache entry and return the requested data.
5747 if x == "INHERITED":
5748 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5750 returnme.append(mydata.get(x,""))
5754 for x in self._aux_cache_keys:
5755 aux_cache[x] = mydata.get(x, "")
5756 self._aux_cache[mycpv] = aux_cache
5760 def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5761 if mysettings is None:
5762 mysettings = self.mysettings
5764 myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5766 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5769 if useflags is None:
5770 useflags = mysettings["USE"].split()
5772 myurilist = portage_dep.paren_reduce(myuris)
5773 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5774 newuris = flatten(myurilist)
5778 mya = os.path.basename(x)
5779 if not mya in myfiles:
5781 return [newuris, myfiles]
5783 def getfetchsizes(self,mypkg,useflags=None,debug=0):
5784 # returns a filename:size dictionnary of remaining downloads
5785 myebuild = self.findname(mypkg)
5786 pkgdir = os.path.dirname(myebuild)
5787 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5788 checksums = mf.getDigests()
5790 if debug: print "[empty/missing/bad digest]: "+mypkg
5793 if useflags is None:
5794 myuris, myfiles = self.getfetchlist(mypkg,all=1)
5796 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
5797 #XXX: maybe this should be improved: take partial downloads
5798 # into account? check checksums?
5799 for myfile in myfiles:
5800 if myfile not in checksums:
5802 writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
5804 file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
5807 mystat = os.stat(file_path)
5813 existing_size = mystat.st_size
5814 remaining_size = int(checksums[myfile]["size"]) - existing_size
5815 if remaining_size > 0:
5816 # Assume the download is resumable.
5817 filesdict[myfile] = remaining_size
5818 elif remaining_size < 0:
5819 # The existing file is too large and therefore corrupt.
5820 filesdict[myfile] = int(checksums[myfile]["size"])
5823 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5826 useflags = mysettings["USE"].split()
5827 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5828 myebuild = self.findname(mypkg)
5829 pkgdir = os.path.dirname(myebuild)
5830 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5831 mysums = mf.getDigests()
5835 if not mysums or x not in mysums:
5837 reason = "digest missing"
5840 ok, reason = portage_checksum.verify_all(
5841 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
5842 except portage_exception.FileNotFound, e:
5844 reason = "File Not Found: '%s'" % str(e)
5846 failures[x] = reason
5851 def getsize(self,mypkg,useflags=None,debug=0):
5852 # returns the total size of remaining downloads
5854 # we use getfetchsizes() now, so this function would be obsoleted
5856 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5857 if filesdict is None:
5858 return "[empty/missing/bad digest]"
5860 for myfile in filesdict.keys():
5861 mysum+=filesdict[myfile]
5864 def cpv_exists(self,mykey):
5865 "Tells us whether an actual ebuild exists on disk (no masking)"
5866 cps2=mykey.split("/")
5867 cps=catpkgsplit(mykey,silent=0)
5871 if self.findname(cps[0]+"/"+cps2[1]):
5877 "returns a list of all keys in our tree"
5879 for x in self.mysettings.categories:
5880 for oroot in self.porttrees:
5881 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
5887 def p_list(self,mycp):
5889 for oroot in self.porttrees:
5890 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5891 if x[-7:]==".ebuild":
5895 def cp_list(self, mycp, use_cache=1, mytree=None):
5896 mysplit=mycp.split("/")
5901 mytrees = self.porttrees
5902 for oroot in mytrees:
5903 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5904 if x[-7:]==".ebuild":
5905 d[mysplit[0]+"/"+x[:-7]] = None
5909 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
5917 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
5918 "caching match function; very trick stuff"
5919 #if no updates are being made to the tree, we can consult our xcache...
5922 return self.xcache[level][origdep][:]
5927 #this stuff only runs on first call of xmatch()
5928 #create mydep, mykey from origdep
5929 mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
5930 mykey=dep_getkey(mydep)
5932 if level=="list-visible":
5933 #a list of all visible packages, not called directly (just by xmatch())
5934 #myval=self.visible(self.cp_list(mykey))
5935 myval=self.gvisible(self.visible(self.cp_list(mykey)))
5936 elif level=="bestmatch-visible":
5937 #dep match -- best match of all visible packages
5938 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
5939 #get all visible matches (from xmatch()), then choose the best one
5940 elif level=="bestmatch-list":
5941 #dep match -- find best match but restrict search to sublist
5942 myval=best(match_from_list(mydep,mylist))
5943 #no point is calling xmatch again since we're not caching list deps
5944 elif level=="match-list":
5945 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
5946 myval=match_from_list(mydep,mylist)
5947 elif level=="match-visible":
5948 #dep match -- find all visible matches
5949 myval = match_from_list(mydep,
5950 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
5951 #get all visible packages, then get the matching ones
5952 elif level=="match-all":
5953 #match *all* visible *and* masked packages
5954 myval=match_from_list(mydep,self.cp_list(mykey))
5956 print "ERROR: xmatch doesn't handle",level,"query!"
5958 myslot = portage_dep.dep_getslot(mydep)
5959 if myslot is not None:
5960 myval = [cpv for cpv in myval \
5961 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5962 if self.frozen and (level not in ["match-list","bestmatch-list"]):
5963 self.xcache[level][mydep]=myval
5964 if origdep and origdep != mydep:
5965 self.xcache[level][origdep] = myval
5968 def match(self,mydep,use_cache=1):
5969 return self.xmatch("match-visible",mydep)
5971 def visible(self,mylist):
5972 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
5973 packages file to remove invisible entries, returning remaining items. This function assumes
5974 that all entries in mylist have the same category and package name."""
5975 if (mylist is None) or (len(mylist)==0):
5978 #first, we mask out packages in the package.mask file
5980 cpv=catpkgsplit(mykey)
5983 print "visible(): invalid cat/pkg-v:",mykey
5985 mycp=cpv[0]+"/"+cpv[1]
5986 maskdict=self.mysettings.pmaskdict
5987 unmaskdict=self.mysettings.punmaskdict
5988 if maskdict.has_key(mycp):
5989 for x in maskdict[mycp]:
5990 mymatches=self.xmatch("match-all",x)
5991 if mymatches is None:
5992 #error in package.mask file; print warning and continue:
5993 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
5997 if unmaskdict.has_key(mycp):
5998 for z in unmaskdict[mycp]:
5999 mymatches_unmask=self.xmatch("match-all",z)
6000 if y in mymatches_unmask:
6009 revmaskdict=self.mysettings.prevmaskdict
6010 if revmaskdict.has_key(mycp):
6011 for x in revmaskdict[mycp]:
6012 #important: only match against the still-unmasked entries...
6013 #notice how we pass "newlist" to the xmatch() call below....
6014 #Without this, ~ deps in the packages files are broken.
6015 mymatches=self.xmatch("match-list",x,mylist=newlist)
6016 if mymatches is None:
6017 #error in packages file; print warning and continue:
6018 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6021 while pos<len(newlist):
6022 if newlist[pos] not in mymatches:
6028 def gvisible(self,mylist):
6029 "strip out group-masked (not in current group) entries"
6035 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6036 pkgdict = self.mysettings.pkeywordsdict
6037 for mycpv in mylist:
6039 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6042 except portage_exception.PortageException, e:
6043 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6044 mycpv, noiselevel=-1)
6045 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6048 mygroups=keys.split()
6049 # Repoman may modify this attribute as necessary.
6050 pgroups = accept_keywords[:]
6052 cp = dep_getkey(mycpv)
6053 if pkgdict.has_key(cp):
6054 matches = match_to_list(mycpv, pkgdict[cp].keys())
6055 for atom in matches:
6056 pgroups.extend(pkgdict[cp][atom])
6060 if x != "-*" and x.startswith("-"):
6062 inc_pgroups.remove(x[1:])
6065 if x not in inc_pgroups:
6066 inc_pgroups.append(x)
6067 pgroups = inc_pgroups
6073 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6084 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
6086 if match and eapi_is_supported(eapi):
6087 newlist.append(mycpv)
6090 class binarytree(object):
6091 "this tree scans for a list of all packages available in PKGDIR"
6092 def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6094 # XXX This isn't cloning. It's an instance of the same thing.
6095 self.root=clone.root
6096 self.pkgdir=clone.pkgdir
6097 self.dbapi=clone.dbapi
6098 self.populated=clone.populated
6099 self.tree=clone.tree
6100 self.remotepkgs=clone.remotepkgs
6101 self.invalids=clone.invalids
6102 self.settings = clone.settings
6105 #self.pkgdir=settings["PKGDIR"]
6106 self.pkgdir = normalize_path(pkgdir)
6107 self.dbapi = bindbapi(self, settings=settings)
6112 self.settings = settings
6113 self._pkg_paths = {}
6115 def move_ent(self,mylist):
6116 if not self.populated:
6121 for cp in [origcp,newcp]:
6122 if not (isvalidatom(cp) and isjustname(cp)):
6123 raise portage_exception.InvalidPackageName(cp)
6124 origcat = origcp.split("/")[0]
6125 mynewcat=newcp.split("/")[0]
6126 origmatches=self.dbapi.cp_list(origcp)
6129 for mycpv in origmatches:
6131 mycpsplit=catpkgsplit(mycpv)
6132 mynewcpv=newcp+"-"+mycpsplit[2]
6133 if mycpsplit[3]!="r0":
6134 mynewcpv += "-"+mycpsplit[3]
6135 myoldpkg=mycpv.split("/")[1]
6136 mynewpkg=mynewcpv.split("/")[1]
6138 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6139 writemsg("!!! Cannot update binary: Destination exists.\n",
6141 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6144 tbz2path=self.getname(mycpv)
6145 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6146 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6150 #print ">>> Updating data in:",mycpv
6151 writemsg_stdout("%")
6152 mytbz2 = xpak.tbz2(tbz2path)
6153 mydata = mytbz2.get_data()
6154 updated_items = update_dbentries([mylist], mydata)
6155 mydata.update(updated_items)
6156 mydata["CATEGORY"] = mynewcat+"\n"
6157 if mynewpkg != myoldpkg:
6158 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6159 del mydata[myoldpkg+".ebuild"]
6160 mydata["PF"] = mynewpkg + "\n"
6161 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6163 self.dbapi.cpv_remove(mycpv)
6164 del self._pkg_paths[mycpv]
6165 new_path = self.getname(mynewcpv)
6166 self._pkg_paths[mynewcpv] = os.path.join(
6167 *new_path.split(os.path.sep)[-2:])
6168 if new_path != mytbz2:
6170 os.makedirs(os.path.dirname(new_path))
6172 if e.errno != errno.EEXIST:
6175 os.rename(tbz2path, new_path)
6176 self._remove_symlink(mycpv)
6177 if new_path.split(os.path.sep)[-2] == "All":
6178 self._create_symlink(mynewcpv)
6179 self.dbapi.cpv_inject(mynewcpv)
6183 def _remove_symlink(self, cpv):
6184 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6185 the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
6186 removed if os.path.islink() returns False."""
6187 mycat, mypkg = catsplit(cpv)
6188 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6189 if os.path.islink(mylink):
6190 """Only remove it if it's really a link so that this method never
6191 removes a real package that was placed here to avoid a collision."""
6194 os.rmdir(os.path.join(self.pkgdir, mycat))
6196 if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
6200 def _create_symlink(self, cpv):
6201 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6202 ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
6203 exist in the location of the symlink will first be removed."""
6204 mycat, mypkg = catsplit(cpv)
6205 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6207 os.makedirs(os.path.dirname(full_path))
6209 if e.errno != errno.EEXIST:
6213 os.unlink(full_path)
6215 if e.errno != errno.ENOENT:
6218 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6220 def move_slot_ent(self, mylist):
6221 if not self.populated:
6227 if not isvalidatom(pkg):
6228 raise portage_exception.InvalidAtom(pkg)
6230 origmatches=self.dbapi.match(pkg)
6233 for mycpv in origmatches:
6234 mycpsplit=catpkgsplit(mycpv)
6235 myoldpkg=mycpv.split("/")[1]
6236 tbz2path=self.getname(mycpv)
6237 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6238 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6242 #print ">>> Updating data in:",mycpv
6243 mytbz2 = xpak.tbz2(tbz2path)
6244 mydata = mytbz2.get_data()
6246 slot = mydata["SLOT"]
6250 if (slot[0]!=origslot):
6253 writemsg_stdout("S")
6254 mydata["SLOT"] = newslot+"\n"
6255 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6258 def update_ents(self, update_iter):
6259 if len(update_iter) == 0:
6261 if not self.populated:
6264 for mycpv in self.dbapi.cp_all():
6265 tbz2path=self.getname(mycpv)
6266 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6267 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6270 #print ">>> Updating binary data:",mycpv
6271 writemsg_stdout("*")
6272 mytbz2 = xpak.tbz2(tbz2path)
6273 mydata = mytbz2.get_data()
6274 updated_items = update_dbentries(update_iter, mydata)
6275 if len(updated_items) > 0:
6276 mydata.update(updated_items)
6277 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6280 def prevent_collision(self, cpv):
6281 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6282 use for a given cpv. If a collision will occur with an existing
6283 package from another category, the existing package will be bumped to
6284 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6285 full_path = self.getname(cpv)
6286 if "All" == full_path.split(os.path.sep)[-2]:
6288 """Move a colliding package if it exists. Code below this point only
6289 executes in rare cases."""
6290 mycat, mypkg = catsplit(cpv)
6291 myfile = mypkg + ".tbz2"
6292 mypath = os.path.join("All", myfile)
6293 dest_path = os.path.join(self.pkgdir, mypath)
6294 if os.path.exists(dest_path):
6295 # For invalid packages, other_cat could be None.
6296 other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6298 other_cat = other_cat.strip()
6299 self._move_from_all(other_cat + "/" + mypkg)
6300 """The file may or may not exist. Move it if necessary and update
6301 internal state for future calls to getname()."""
6302 self._move_to_all(cpv)
6304 def _move_to_all(self, cpv):
6305 """If the file exists, move it. Whether or not it exists, update state
6306 for future getname() calls."""
6307 mycat , mypkg = catsplit(cpv)
6308 myfile = mypkg + ".tbz2"
6309 src_path = os.path.join(self.pkgdir, mycat, myfile)
6311 mystat = os.lstat(src_path)
6314 if mystat and stat.S_ISREG(mystat.st_mode):
6316 os.makedirs(os.path.join(self.pkgdir, "All"))
6318 if e.errno != errno.EEXIST:
6321 os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6322 self._create_symlink(cpv)
6323 self._pkg_paths[cpv] = os.path.join("All", myfile)
6325 def _move_from_all(self, cpv):
6326 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6327 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6328 self._remove_symlink(cpv)
6329 mycat , mypkg = catsplit(cpv)
6330 myfile = mypkg + ".tbz2"
6331 mypath = os.path.join(mycat, myfile)
6332 dest_path = os.path.join(self.pkgdir, mypath)
6334 os.makedirs(os.path.dirname(dest_path))
6336 if e.errno != errno.EEXIST:
6339 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6340 self._pkg_paths[cpv] = mypath
6342 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6343 "populates the binarytree"
6344 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6346 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
6349 if not getbinpkgsonly:
6351 dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6355 dirs.insert(0, "All")
6357 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6358 if not myfile.endswith(".tbz2"):
6360 mypath = os.path.join(mydir, myfile)
6361 full_path = os.path.join(self.pkgdir, mypath)
6362 if os.path.islink(full_path):
6364 mytbz2 = xpak.tbz2(full_path)
6365 # For invalid packages, mycat could be None.
6366 mycat = mytbz2.getfile("CATEGORY")
6369 #old-style or corrupt package
6370 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6372 writemsg("!!! This binary package is not " + \
6373 "recoverable and should be deleted.\n",
6375 self.invalids.append(mypkg)
6377 mycat = mycat.strip()
6378 if mycat != mydir and mydir != "All":
6380 if mypkg != mytbz2.getfile("PF").strip():
6382 mycpv = mycat + "/" + mypkg
6383 if mycpv in pkg_paths:
6384 # All is first, so it's preferred.
6386 pkg_paths[mycpv] = mypath
6387 self.dbapi.cpv_inject(mycpv)
6388 self._pkg_paths = pkg_paths
6390 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6391 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6395 self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6397 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6400 except (ValueError, KeyError):
6403 writemsg(green("Fetching binary packages info...\n"))
6404 self.remotepkgs = getbinpkg.dir_get_metadata(
6405 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6406 writemsg(green(" -- DONE!\n\n"))
6408 for mypkg in self.remotepkgs.keys():
6409 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6410 #old-style or corrupt package
6411 writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6413 del self.remotepkgs[mypkg]
6415 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6416 fullpkg=mycat+"/"+mypkg[:-5]
6417 mykey=dep_getkey(fullpkg)
6419 # invalid tbz2's can hurt things.
6420 #print "cpv_inject("+str(fullpkg)+")"
6421 self.dbapi.cpv_inject(fullpkg)
6422 #print " -- Injected"
6423 except SystemExit, e:
6426 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6428 del self.remotepkgs[mypkg]
6432 def inject(self,cpv):
6433 return self.dbapi.cpv_inject(cpv)
6435 def exists_specific(self,cpv):
6436 if not self.populated:
6438 return self.dbapi.match(
6439 dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6441 def dep_bestmatch(self,mydep):
6442 "compatibility method -- all matches, not just visible ones"
6443 if not self.populated:
6446 writemsg("mydep: %s\n" % mydep, 1)
6447 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6448 writemsg("mydep: %s\n" % mydep, 1)
6449 mykey=dep_getkey(mydep)
6450 writemsg("mykey: %s\n" % mykey, 1)
6451 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6452 writemsg("mymatch: %s\n" % mymatch, 1)
6457 def getname(self,pkgname):
6458 """Returns a file location for this package. The default location is
6459 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6460 in the rare event of a collision. The prevent_collision() method can
6461 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6463 if not self.populated:
6466 mypath = self._pkg_paths.get(mycpv, None)
6468 return os.path.join(self.pkgdir, mypath)
6469 mycat, mypkg = catsplit(mycpv)
6470 mypath = os.path.join("All", mypkg + ".tbz2")
6471 if mypath in self._pkg_paths.values():
6472 mypath = os.path.join(mycat, mypkg + ".tbz2")
6473 self._pkg_paths[mycpv] = mypath # cache for future lookups
6474 return os.path.join(self.pkgdir, mypath)
6476 def isremote(self,pkgname):
6477 "Returns true if the package is kept remotely."
6478 mysplit=pkgname.split("/")
6479 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6482 def get_use(self,pkgname):
6483 mysplit=pkgname.split("/")
6484 if self.isremote(pkgname):
6485 return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6486 tbz2=xpak.tbz2(self.getname(pkgname))
6487 return tbz2.getfile("USE").split()
6489 def gettbz2(self,pkgname):
6490 "fetches the package from a remote site, if necessary."
6491 print "Fetching '"+str(pkgname)+"'"
6492 mysplit = pkgname.split("/")
6493 tbz2name = mysplit[1]+".tbz2"
6494 if not self.isremote(pkgname):
6495 if (tbz2name not in self.invalids):
6498 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6500 mydest = self.pkgdir+"/All/"
6502 os.makedirs(mydest, 0775)
6503 except (OSError, IOError):
6505 return getbinpkg.file_get(
6506 self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6507 mydest, fcmd=self.settings["RESUMECOMMAND"])
6509 def getslot(self,mycatpkg):
6510 "Get a slot for a catpkg; assume it exists."
6513 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6514 except SystemExit, e:
6516 except Exception, e:
6522 This class provides an interface to the installed package database
6523 At present this is implemented as a text backend in /var/db/pkg.
6525 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6528 Creates a DBlink object for a given CPV.
6529 The given CPV may not be present in the database already.
6531 @param cat: Category
6533 @param pkg: Package (PV)
6535 @param myroot: Typically ${ROOT}
6536 @type myroot: String (Path)
6537 @param mysettings: Typically portage.config
6538 @type mysettings: An instance of portage.config
6539 @param treetype: one of ['porttree','bintree','vartree']
6540 @type treetype: String
6541 @param vartree: an instance of vartree corresponding to myroot.
6542 @type vartree: vartree
6547 self.mycpv = self.cat+"/"+self.pkg
6548 self.mysplit = pkgsplit(self.mycpv)
6549 self.treetype = treetype
6552 vartree = db[myroot]["vartree"]
6553 self.vartree = vartree
6555 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
6556 self.dbcatdir = self.dbroot+"/"+cat
6557 self.dbpkgdir = self.dbcatdir+"/"+pkg
6558 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6559 self.dbdir = self.dbpkgdir
6561 self._lock_vdb = None
6563 self.settings = mysettings
6564 if self.settings==1:
6568 protect_obj = portage_util.ConfigProtect(myroot,
6569 mysettings.get("CONFIG_PROTECT","").split(),
6570 mysettings.get("CONFIG_PROTECT_MASK","").split())
6571 self.updateprotect = protect_obj.updateprotect
6572 self._config_protect = protect_obj
6573 self._installed_instance = None
6574 self.contentscache=[]
6575 self._contents_inodes = None
6579 raise AssertionError("Lock already held.")
6580 # At least the parent needs to exist for the lock file.
6581 portage_util.ensure_dirs(self.dbroot)
6582 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6586 portage_locks.unlockdir(self._lock_vdb)
6587 self._lock_vdb = None
6590 "return path to location of db information (for >>> informational display)"
6594 "does the db entry exist? boolean."
6595 return os.path.exists(self.dbdir)
6598 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
6600 This function should never get called (there is no reason to use it).
6602 # XXXXX Delete this eventually
6603 raise Exception, "This is bad. Don't use it."
6604 if not os.path.exists(self.dbdir):
6605 os.makedirs(self.dbdir)
6609 Remove this entry from the database
6611 if not os.path.exists(self.dbdir):
6614 for x in listdir(self.dbdir):
6615 os.unlink(self.dbdir+"/"+x)
6616 os.rmdir(self.dbdir)
6618 print "!!! Unable to remove db entry for this package."
6619 print "!!! It is possible that a directory is in this one. Portage will still"
6620 print "!!! register this package as installed as long as this directory exists."
6621 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6626 def clearcontents(self):
6628 For a given db entry (self), erase the CONTENTS values.
6630 if os.path.exists(self.dbdir+"/CONTENTS"):
6631 os.unlink(self.dbdir+"/CONTENTS")
6633 def getcontents(self):
6635 Get the installed files of a given package (aka what that package installed)
6637 if not os.path.exists(self.dbdir+"/CONTENTS"):
6639 if self.contentscache != []:
6640 return self.contentscache
6642 myc=open(self.dbdir+"/CONTENTS","r")
6643 mylines=myc.readlines()
6646 for line in mylines:
6647 mydat = line.split()
6648 # we do this so we can remove from non-root filesystems
6649 # (use the ROOT var to allow maintenance on other partitions)
6651 mydat[1] = normalize_path(os.path.join(
6652 self.myroot, mydat[1].lstrip(os.path.sep)))
6654 #format: type, mtime, md5sum
6655 pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6656 elif mydat[0]=="dir":
6658 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6659 elif mydat[0]=="sym":
6660 #format: type, mtime, dest
6662 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6663 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6664 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6674 pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6675 elif mydat[0]=="dev":
6677 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6678 elif mydat[0]=="fif":
6680 pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6683 except (KeyError,IndexError):
6684 print "portage: CONTENTS line",pos,"corrupt!"
6686 self.contentscache=pkgfiles
6689 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6690 ldpath_mtimes=None):
6693 Unmerges a given package (CPV)
6698 @param pkgfiles: files to unmerge (generally self.getcontents() )
6699 @type pkgfiles: Dictionary
6700 @param trimworld: Remove CPV from world file if True, not if False
6701 @type trimworld: Boolean
6702 @param cleanup: cleanup to pass to doebuild (see doebuild)
6703 @type cleanup: Boolean
6704 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6705 @type ldpath_mtimes: Dictionary
6708 1. os.EX_OK if everything went well.
6709 2. return code of the failed phase (for prerm, postrm, cleanrm)
6712 The caller must ensure that lockdb() and unlockdb() are called
6713 before and after this method.
6716 contents = self.getcontents()
6717 # Now, don't assume that the name of the ebuild is the same as the
6718 # name of the dir; the package may have been moved.
6720 mystuff = listdir(self.dbdir, EmptyOnError=1)
6722 if x.endswith(".ebuild"):
6723 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6724 if x[:-7] != self.pkg:
6725 # Clean up after vardbapi.move_ent() breakage in
6726 # portage versions before 2.1.2
6727 os.rename(os.path.join(self.dbdir, x), myebuildpath)
6728 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6731 self.settings.load_infodir(self.dbdir)
6733 doebuild_environment(myebuildpath, "prerm", self.myroot,
6734 self.settings, 0, 0, self.vartree.dbapi)
6735 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6736 portage_util.ensure_dirs(os.path.dirname(catdir),
6737 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6738 builddir_lock = None
6742 catdir_lock = portage_locks.lockdir(catdir)
6743 portage_util.ensure_dirs(catdir,
6744 uid=portage_uid, gid=portage_gid,
6746 builddir_lock = portage_locks.lockdir(
6747 self.settings["PORTAGE_BUILDDIR"])
6749 portage_locks.unlockdir(catdir_lock)
6752 # Eventually, we'd like to pass in the saved ebuild env here...
6753 retval = doebuild(myebuildpath, "prerm", self.myroot,
6754 self.settings, cleanup=cleanup, use_cache=0,
6755 mydbapi=self.vartree.dbapi, tree="vartree",
6756 vartree=self.vartree)
6757 # XXX: Decide how to handle failures here.
6758 if retval != os.EX_OK:
6759 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
6762 self._unmerge_pkgfiles(pkgfiles)
6765 retval = doebuild(myebuildpath, "postrm", self.myroot,
6766 self.settings, use_cache=0, tree="vartree",
6767 mydbapi=self.vartree.dbapi, vartree=self.vartree)
6769 # process logs created during pre/postrm
6770 elog_process(self.mycpv, self.settings)
6772 # XXX: Decide how to handle failures here.
6773 if retval != os.EX_OK:
6774 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
6776 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
6777 tree="vartree", mydbapi=self.vartree.dbapi,
6778 vartree=self.vartree)
6782 portage_locks.unlockdir(builddir_lock)
6784 if myebuildpath and not catdir_lock:
6785 # Lock catdir for removal if empty.
6786 catdir_lock = portage_locks.lockdir(catdir)
6792 if e.errno != errno.ENOTEMPTY:
6795 portage_locks.unlockdir(catdir_lock)
6796 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
6800 def _unmerge_pkgfiles(self, pkgfiles):
6803 Unmerges the contents of a package from the liveFS
6804 Removes the VDB entry for self
6806 @param pkgfiles: typically self.getcontents()
6807 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
6814 writemsg_stdout("No package files given... Grabbing a set.\n")
6815 pkgfiles=self.getcontents()
6818 mykeys=pkgfiles.keys()
6822 #process symlinks second-to-last, directories last.
6824 modprotect="/lib/modules/"
6825 for objkey in mykeys:
6826 obj = normalize_path(objkey)
6831 statobj = os.stat(obj)
6836 lstatobj = os.lstat(obj)
6837 except (OSError, AttributeError):
6839 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
6842 #we skip this if we're dealing with a symlink
6843 #because os.stat() will operate on the
6844 #link target rather than the link itself.
6845 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
6847 # next line includes a tweak to protect modules from being unmerged,
6848 # but we don't protect modules from being overwritten if they are
6849 # upgraded. We effectively only want one half of the config protection
6850 # functionality for /lib/modules. For portage-ng both capabilities
6851 # should be able to be independently specified.
6852 if obj.startswith(modprotect):
6853 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
6856 lmtime=str(lstatobj[stat.ST_MTIME])
6857 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
6858 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
6861 if pkgfiles[objkey][0]=="dir":
6862 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
6863 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
6866 elif pkgfiles[objkey][0]=="sym":
6868 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
6872 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
6873 except (OSError,IOError),e:
6874 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
6875 elif pkgfiles[objkey][0]=="obj":
6876 if statobj is None or not stat.S_ISREG(statobj.st_mode):
6877 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
6881 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
6882 except portage_exception.FileNotFound, e:
6883 # the file has disappeared between now and our stat call
6884 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
6887 # string.lower is needed because db entries used to be in upper-case. The
6888 # string.lower allows for backwards compatibility.
6889 if mymd5 != pkgfiles[objkey][2].lower():
6890 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
6894 except (OSError,IOError),e:
6896 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
6897 elif pkgfiles[objkey][0]=="fif":
6898 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
6899 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
6901 writemsg_stdout("--- %s %s\n" % ("fif",obj))
6902 elif pkgfiles[objkey][0]=="dev":
6903 writemsg_stdout("--- %s %s\n" % ("dev",obj))
6911 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
6912 except (OSError, IOError):
6913 writemsg_stdout("--- !empty dir %s\n" % obj)
6915 #remove self from vartree database so that our own virtual gets zapped if we're the last node
6916 self.vartree.zap(self.mycpv)
6918 def isowner(self,filename,destroot):
6920 Check if filename is a new file or belongs to this package
6921 (for this or a previous version)
6929 1. True if this package owns the file.
6930 2. False if this package does not own the file.
6932 destfile = normalize_path(
6933 os.path.join(destroot, filename.lstrip(os.path.sep)))
6935 mylstat = os.lstat(destfile)
6936 except (OSError, IOError):
6939 pkgfiles = self.getcontents()
6940 if pkgfiles and filename in pkgfiles:
6943 if self._contents_inodes is None:
6944 self._contents_inodes = set()
6948 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
6951 if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
6956 def isprotected(self, filename):
6957 """In cases where an installed package in the same slot owns a
6958 protected file that will be merged, bump the mtime on the installed
6959 file in order to ensure that it isn't unmerged."""
6960 if not self._config_protect.isprotected(filename):
6962 if self._installed_instance is None:
6964 mydata = self._installed_instance.getcontents().get(filename, None)
6968 # Bump the mtime in order to ensure that the old config file doesn't
6969 # get unmerged. The user will have an opportunity to merge the new
6970 # config with the old one.
6972 os.utime(filename, None)
6974 if e.errno != errno.ENOENT:
6977 # The file has disappeared, so it's not protected.
6981 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
6982 mydbapi=None, prev_mtimes=None):
6985 This function does the following:
6987 Collision Protection.
6988 calls doebuild(mydo=pkg_preinst)
6989 Merges the package to the livefs
6990 unmerges old version (if required)
6991 calls doebuild(mydo=pkg_postinst)
6994 @param srcroot: Typically this is ${D}
6995 @type srcroot: String (Path)
6996 @param destroot: Path to merge to (usually ${ROOT})
6997 @type destroot: String (Path)
6998 @param inforoot: root of the vardb entry ?
6999 @type inforoot: String (Path)
7000 @param myebuild: path to the ebuild that we are processing
7001 @type myebuild: String (Path)
7002 @param mydbapi: dbapi which is handed to doebuild.
7003 @type mydbapi: portdbapi instance
7004 @param prev_mtimes: { Filename:mtime } mapping for env_update
7005 @type prev_mtimes: Dictionary
7011 secondhand is a list of symlinks that have been skipped due to their target
7012 not existing; we will merge these symlinks at a later time.
7014 if not os.path.isdir(srcroot):
7015 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7019 if not os.path.exists(self.dbcatdir):
7020 os.makedirs(self.dbcatdir)
7023 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7024 otherversions.append(v.split("/")[1])
7026 slot_matches = self.vartree.dbapi.match(
7027 "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7029 # Used by self.isprotected().
7030 self._installed_instance = dblink(self.cat,
7031 catsplit(slot_matches[0])[1], destroot, self.settings,
7032 vartree=self.vartree)
7034 # check for package collisions
7035 if "collision-protect" in self.settings.features:
7036 collision_ignore = set([normalize_path(myignore) for myignore in \
7037 self.settings.get("COLLISION_IGNORE", "").split()])
7038 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7040 # the linkcheck only works if we are in srcroot
7043 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7044 myfilelist.extend(mysymlinks)
7045 mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7050 starttime=time.time()
7056 if self.pkg in otherversions:
7057 otherversions.remove(self.pkg) # we already checked this package
7059 myslot = self.settings["SLOT"]
7060 for v in otherversions:
7061 # only allow versions with same slot to overwrite files
7062 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7064 dblink(self.cat, v, destroot, self.settings,
7065 vartree=self.vartree))
7069 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7070 for f in myfilelist:
7072 # listdir isn't intelligent enough to exclude symlinked dirs,
7073 # so we have to do it ourself
7074 for s in mysymlinked_directories:
7082 print str(i)+" files checked ..."
7086 for ver in [self]+mypkglist:
7087 if (ver.isowner(f, destroot) or ver.isprotected(f)):
7091 collisions.append(f)
7092 print "existing file "+f+" is not owned by this package"
7094 if collision_ignore:
7095 if f in collision_ignore:
7098 for myignore in collision_ignore:
7099 if f.startswith(myignore + os.path.sep):
7102 #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7104 print red("*")+" This package is blocked because it wants to overwrite"
7105 print red("*")+" files belonging to other packages (see messages above)."
7106 print red("*")+" If you have no clue what this is all about report it "
7107 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7109 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7112 print "Searching all installed packages for file collisions..."
7113 print "Press Ctrl-C to Stop"
7115 """ Note: The isowner calls result in a stat call for *every*
7116 single installed file, since the inode numbers are used to work
7117 around the problem of ambiguous paths caused by symlinked files
7118 and/or directories. Though it is slow, it is as accurate as
7121 for cpv in self.vartree.dbapi.cpv_all():
7122 cat, pkg = catsplit(cpv)
7123 mylink = dblink(cat, pkg, destroot, self.settings,
7124 vartree=self.vartree)
7126 for f in collisions:
7127 if mylink.isowner(f, destroot):
7128 mycollisions.append(f)
7131 print " * %s:" % cpv
7133 for f in mycollisions:
7135 os.path.join(destroot, f.lstrip(os.path.sep))
7138 print "None of the installed packages claim the above file(s)."
7146 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7147 """ The merge process may move files out of the image directory,
7148 which causes invalidation of the .installed flag."""
7150 os.unlink(os.path.join(
7151 os.path.dirname(normalize_path(srcroot)), ".installed"))
7153 if e.errno != errno.ENOENT:
7157 # get old contents info for later unmerging
7158 oldcontents = self.getcontents()
7160 self.dbdir = self.dbtmpdir
7162 if not os.path.exists(self.dbtmpdir):
7163 os.makedirs(self.dbtmpdir)
7165 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7167 # run preinst script
7168 if myebuild is None:
7169 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7170 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7171 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7172 vartree=self.vartree)
7174 # XXX: Decide how to handle failures here.
7176 writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7179 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7180 for x in listdir(inforoot):
7181 self.copyfile(inforoot+"/"+x)
7183 # get current counter value (counter_tick also takes care of incrementing it)
7184 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7185 # XXX bis: leads to some invalidentry() call through cp_all().
7186 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7187 # write local package counter for recording
7188 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7189 lcfile.write(str(counter))
7192 # open CONTENTS file (possibly overwriting old one) for recording
7193 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7195 self.updateprotect()
7197 #if we have a file containing previously-merged config file md5sums, grab it.
7198 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7199 cfgfiledict = grabdict(conf_mem_file)
7200 if self.settings.has_key("NOCONFMEM"):
7201 cfgfiledict["IGNORE"]=1
7203 cfgfiledict["IGNORE"]=0
7205 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7206 mymtime = long(time.time())
7207 prevmask = os.umask(0)
7210 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7211 # "second hand" of symlinks to merge later
7212 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7215 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
7216 # broken symlinks. We'll merge them too.
7218 while len(secondhand) and len(secondhand)!=lastlen:
7219 # clear the thirdhand. Anything from our second hand that
7220 # couldn't get merged will be added to thirdhand.
7223 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7226 lastlen=len(secondhand)
7228 # our thirdhand now becomes our secondhand. It's ok to throw
7229 # away secondhand since thirdhand contains all the stuff that
7230 # couldn't be merged.
7231 secondhand = thirdhand
7234 # force merge of remaining symlinks (broken or circular; oh well)
7235 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7240 #if we opened it, close it
7244 if os.path.exists(self.dbpkgdir):
7245 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7246 self.dbdir = self.dbpkgdir
7247 self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7248 self.dbdir = self.dbtmpdir
7249 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7251 # We hold both directory locks.
7252 self.dbdir = self.dbpkgdir
7254 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7255 contents = self.getcontents()
7257 #write out our collection of md5sums
7258 if cfgfiledict.has_key("IGNORE"):
7259 del cfgfiledict["IGNORE"]
7261 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7262 if not os.path.exists(my_private_path):
7263 os.makedirs(my_private_path)
7264 os.chown(my_private_path, os.getuid(), portage_gid)
7265 os.chmod(my_private_path, 02770)
7267 writedict(cfgfiledict, conf_mem_file)
7271 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7272 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7274 # XXX: Decide how to handle failures here.
7276 writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7280 for v in otherversions:
7281 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7284 #update environment settings, library paths. DO NOT change symlinks.
7285 env_update(makelinks=(not downgrade),
7286 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7288 #dircache may break autoclean because it remembers the -MERGING-pkg file
7290 if dircache.has_key(self.dbcatdir):
7291 del dircache[self.dbcatdir]
7292 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7294 # Process ebuild logfiles
7295 elog_process(self.mycpv, self.settings)
7296 if "noclean" not in self.settings.features:
7297 doebuild(myebuild, "clean", destroot, self.settings,
7298 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7301 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7304 This function handles actual merging of the package contents to the livefs.
7305 It also handles config protection.
7307 @param srcroot: Where are we copying files from (usually ${D})
7308 @type srcroot: String (Path)
7309 @param destroot: Typically ${ROOT}
7310 @type destroot: String (Path)
7311 @param outfile: File to log operations to
7312 @type outfile: File Object
7313 @param secondhand: A set of items to merge in pass two (usually
7314 or symlinks that point to non-existing files that may get merged later)
7315 @type secondhand: List
7316 @param stufftomerge: Either a diretory to merge, or a list of items.
7317 @type stufftomerge: String or List
7318 @param cfgfiledict: { File:mtime } mapping for config_protected files
7319 @type cfgfiledict: Dictionary
7320 @param thismtime: The current time (typically long(time.time())
7321 @type thismtime: Long
7322 @rtype: None or Boolean
7328 from os.path import sep, join
7329 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7330 destroot = normalize_path(destroot).rstrip(sep) + sep
7331 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
7332 if type(stufftomerge)==types.StringType:
7333 #A directory is specified. Figure out protection paths, listdir() it and process it.
7334 mergelist = listdir(join(srcroot, stufftomerge))
7337 mergelist=stufftomerge
7340 mysrc = join(srcroot, offset, x)
7341 mydest = join(destroot, offset, x)
7342 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7343 myrealdest = join(sep, offset, x)
7344 # stat file once, test using S_* macros many times (faster that way)
7346 mystat=os.lstat(mysrc)
7349 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7350 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
7351 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
7352 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7353 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
7354 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
7356 except Exception, e:
7358 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7359 writemsg(red("!!! A stat call returned the following error for the following file:"))
7360 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
7361 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7362 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
7363 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
7367 mymode=mystat[stat.ST_MODE]
7368 # handy variables; mydest is the target object on the live filesystems;
7369 # mysrc is the source object in the temporary install dir
7371 mydmode = os.lstat(mydest).st_mode
7373 if e.errno != errno.ENOENT:
7376 #dest file doesn't exist
7379 if stat.S_ISLNK(mymode):
7380 # we are merging a symbolic link
7381 myabsto=abssymlink(mysrc)
7382 if myabsto.startswith(srcroot):
7383 myabsto=myabsto[len(srcroot):]
7384 myabsto = myabsto.lstrip(sep)
7385 myto=os.readlink(mysrc)
7386 if self.settings and self.settings["D"]:
7387 if myto.startswith(self.settings["D"]):
7388 myto=myto[len(self.settings["D"]):]
7389 # myrealto contains the path of the real file to which this symlink points.
7390 # we can simply test for existence of this file to see if the target has been merged yet
7391 myrealto = normalize_path(os.path.join(destroot, myabsto))
7394 if not stat.S_ISLNK(mydmode):
7395 if stat.S_ISDIR(mydmode):
7396 # directory in the way: we can't merge a symlink over a directory
7397 # we won't merge this, continue with next file...
7400 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7401 # Kill file blocking installation of symlink to dir #71787
7403 elif self.isprotected(mydest):
7404 # Use md5 of the target in ${D} if it exists...
7406 newmd5 = portage_checksum.perform_md5(
7407 join(srcroot, myabsto))
7408 except portage_exception.FileNotFound:
7409 # Maybe the target is merged already.
7411 newmd5 = portage_checksum.perform_md5(
7413 except portage_exception.FileNotFound:
7415 mydest = new_protect_filename(mydest,newmd5=newmd5)
7417 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7418 if (secondhand!=None) and (not os.path.exists(myrealto)):
7419 # either the target directory doesn't exist yet or the target file doesn't exist -- or
7420 # the target is a broken symlink. We will add this file to our "second hand" and merge
7422 secondhand.append(mysrc[len(srcroot):])
7424 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7425 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7427 writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7428 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7430 print "!!! Failed to move file."
7431 print "!!!",mydest,"->",myto
7433 elif stat.S_ISDIR(mymode):
7434 # we are merging a directory
7436 # destination exists
7439 # Save then clear flags on dest.
7440 dflags=bsd_chflags.lgetflags(mydest)
7441 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7442 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7445 if not os.access(mydest, os.W_OK):
7446 pkgstuff = pkgsplit(self.pkg)
7447 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7448 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7449 writemsg("!!! You may start the merge process again by using ebuild:\n")
7450 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7451 writemsg("!!! And finish by running this: env-update\n\n")
7454 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7455 # a symlink to an existing directory will work for us; keep it:
7456 writemsg_stdout("--- %s/\n" % mydest)
7458 bsd_chflags.lchflags(mydest, dflags)
7460 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
7461 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7463 print "bak",mydest,mydest+".backup"
7464 #now create our directory
7465 if self.settings.selinux_enabled():
7466 sid = selinux.get_sid(mysrc)
7467 selinux.secure_mkdir(mydest,sid)
7471 bsd_chflags.lchflags(mydest, dflags)
7472 os.chmod(mydest,mystat[0])
7473 os.chown(mydest,mystat[4],mystat[5])
7474 writemsg_stdout(">>> %s/\n" % mydest)
7476 #destination doesn't exist
7477 if self.settings.selinux_enabled():
7478 sid = selinux.get_sid(mysrc)
7479 selinux.secure_mkdir(mydest,sid)
7482 os.chmod(mydest,mystat[0])
7483 os.chown(mydest,mystat[4],mystat[5])
7484 writemsg_stdout(">>> %s/\n" % mydest)
7485 outfile.write("dir "+myrealdest+"\n")
7486 # recurse and merge this directory
7487 if self.mergeme(srcroot, destroot, outfile, secondhand,
7488 join(offset, x), cfgfiledict, thismtime):
7490 elif stat.S_ISREG(mymode):
7491 # we are merging a regular file
7492 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7493 # calculate config file protection stuff
7494 mydestdir=os.path.dirname(mydest)
7498 # destination file exists
7499 if stat.S_ISDIR(mydmode):
7500 # install of destination is blocked by an existing directory with the same name
7502 writemsg_stdout("!!! %s\n" % mydest)
7503 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7505 # install of destination is blocked by an existing regular file,
7506 # or by a symlink to an existing regular file;
7507 # now, config file management may come into play.
7508 # we only need to tweak mydest if cfg file management is in play.
7509 if self.isprotected(mydest):
7510 # we have a protection path; enable config file management.
7511 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7513 #file already in place; simply update mtimes of destination
7514 os.utime(mydest,(thismtime,thismtime))
7518 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7519 """ An identical update has previously been
7520 merged. Skip it unless the user has chosen
7523 moveme = cfgfiledict["IGNORE"]
7524 cfgprot = cfgfiledict["IGNORE"]
7529 # Merging a new file, so update confmem.
7530 cfgfiledict[myrealdest] = [mymd5]
7531 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7532 """A previously remembered update has been
7533 accepted, so it is removed from confmem."""
7534 del cfgfiledict[myrealdest]
7536 mydest = new_protect_filename(mydest, newmd5=mymd5)
7538 # whether config protection or not, we merge the new file the
7539 # same way. Unless moveme=0 (blocking directory)
7541 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7547 # We need to touch the destination so that on --update the
7548 # old package won't yank the file with it. (non-cfgprot related)
7549 os.utime(mydest,(thismtime,thismtime))
7551 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7553 # XXX kludge, can be killed when portage stops relying on
7554 # md5+mtime, and uses refcounts
7555 # alright, we've fooled w/ mtime on the file; this pisses off static archives
7556 # basically internal mtime != file's mtime, so the linker (falsely) thinks
7557 # the archive is stale, and needs to have it's toc rebuilt.
7559 myf = open(mydest, "r+")
7561 # ar mtime field is digits padded with spaces, 12 bytes.
7562 lms=str(thismtime+5).ljust(12)
7565 if magic != "!<arch>\n":
7566 # not an archive (dolib.a from portage.py makes it here fex)
7569 st = os.stat(mydest)
7570 while myf.tell() < st.st_size - 12:
7577 # skip uid/gid/mperm
7580 # read the archive member's size
7581 x=long(myf.read(10))
7583 # skip the trailing newlines, and add the potential
7584 # extra padding byte if it's not an even size
7585 myf.seek(x + 2 + (x % 2),1)
7587 # and now we're at the end. yay.
7589 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7590 os.utime(mydest,(thismtime,thismtime))
7594 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7595 writemsg_stdout("%s %s\n" % (zing,mydest))
7597 # we are merging a fifo or device node
7600 # destination doesn't exist
7601 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7605 if stat.S_ISFIFO(mymode):
7606 outfile.write("fif %s\n" % myrealdest)
7608 outfile.write("dev %s\n" % myrealdest)
7609 writemsg_stdout(zing+" "+mydest+"\n")
7611 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7612 mydbapi=None, prev_mtimes=None):
7615 return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7616 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7620 def getstring(self,name):
7621 "returns contents of a file with whitespace converted to spaces"
7622 if not os.path.exists(self.dbdir+"/"+name):
7624 myfile=open(self.dbdir+"/"+name,"r")
7625 mydata=myfile.read().split()
7627 return " ".join(mydata)
7629 def copyfile(self,fname):
7630 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7632 def getfile(self,fname):
7633 if not os.path.exists(self.dbdir+"/"+fname):
7635 myfile=open(self.dbdir+"/"+fname,"r")
7636 mydata=myfile.read()
7640 def setfile(self,fname,data):
7641 write_atomic(os.path.join(self.dbdir, fname), data)
7643 def getelements(self,ename):
7644 if not os.path.exists(self.dbdir+"/"+ename):
7646 myelement=open(self.dbdir+"/"+ename,"r")
7647 mylines=myelement.readlines()
7650 for y in x[:-1].split():
7655 def setelements(self,mylist,ename):
7656 myelement=open(self.dbdir+"/"+ename,"w")
7658 myelement.write(x+"\n")
7661 def isregular(self):
7662 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
7663 return os.path.exists(self.dbdir+"/CATEGORY")
7665 class FetchlistDict(UserDict.DictMixin):
7666 """This provide a mapping interface to retrieve fetch lists. It's used
7667 to allow portage_manifest.Manifest to access fetch lists via a standard
7668 mapping interface rather than use the dbapi directly."""
7669 def __init__(self, pkgdir, settings, mydbapi):
7670 """pkgdir is a directory containing ebuilds and settings is passed into
7671 portdbapi.getfetchlist for __getitem__ calls."""
7672 self.pkgdir = pkgdir
7673 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7674 self.settings = settings
7675 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7676 self.portdb = mydbapi
7677 def __getitem__(self, pkg_key):
7678 """Returns the complete fetch list for a given package."""
7679 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7680 all=True, mytree=self.mytree)[1]
7681 def has_key(self, pkg_key):
7682 """Returns true if the given package exists within pkgdir."""
7683 return pkg_key in self.keys()
7685 """Returns keys for all packages within pkgdir"""
7686 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7688 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7689 """will merge a .tbz2 file, returning a list of runtime dependencies
7690 that must be satisfied, or None if there was a merge error. This
7691 code assumes the package exists."""
7694 mydbapi = db[myroot]["bintree"].dbapi
7696 vartree = db[myroot]["vartree"]
7697 if mytbz2[-5:]!=".tbz2":
7698 print "!!! Not a .tbz2 file"
7702 builddir_lock = None
7705 """ Don't lock the tbz2 file because the filesytem could be readonly or
7706 shared by a cluster."""
7707 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7709 mypkg = os.path.basename(mytbz2)[:-5]
7710 xptbz2 = xpak.tbz2(mytbz2)
7711 mycat = xptbz2.getfile("CATEGORY")
7713 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7716 mycat = mycat.strip()
7718 # These are the same directories that would be used at build time.
7719 builddir = os.path.join(
7720 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7721 catdir = os.path.dirname(builddir)
7722 pkgloc = os.path.join(builddir, "image")
7723 infloc = os.path.join(builddir, "build-info")
7724 myebuild = os.path.join(
7725 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7726 portage_util.ensure_dirs(os.path.dirname(catdir),
7727 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7728 catdir_lock = portage_locks.lockdir(catdir)
7729 portage_util.ensure_dirs(catdir,
7730 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7731 builddir_lock = portage_locks.lockdir(builddir)
7733 portage_locks.unlockdir(catdir_lock)
7737 shutil.rmtree(builddir)
7738 except (IOError, OSError), e:
7739 if e.errno != errno.ENOENT:
7742 for mydir in (builddir, pkgloc, infloc):
7743 portage_util.ensure_dirs(mydir, uid=portage_uid,
7744 gid=portage_gid, mode=0755)
7745 writemsg_stdout(">>> Extracting info\n")
7746 xptbz2.unpackinfo(infloc)
7747 mysettings.load_infodir(infloc)
7748 # Store the md5sum in the vdb.
7749 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7750 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
7753 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7755 # Eventually we'd like to pass in the saved ebuild env here.
7756 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7757 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7758 if retval != os.EX_OK:
7759 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7762 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7763 retval = portage_exec.spawn_bash(
7764 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7765 env=mysettings.environ())
7766 if retval != os.EX_OK:
7767 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7769 #portage_locks.unlockfile(tbz2_lock)
7772 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7774 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7775 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7779 portage_locks.unlockfile(tbz2_lock)
7782 shutil.rmtree(builddir)
7783 except (IOError, OSError), e:
7784 if e.errno != errno.ENOENT:
7787 portage_locks.unlockdir(builddir_lock)
7790 # Lock catdir for removal if empty.
7791 catdir_lock = portage_locks.lockdir(catdir)
7797 if e.errno != errno.ENOTEMPTY:
7800 portage_locks.unlockdir(catdir_lock)
7802 def deprecated_profile_check():
7803 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7805 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7806 dcontent = deprecatedfile.readlines()
7807 deprecatedfile.close()
7808 newprofile = dcontent[0]
7809 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7811 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7813 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7814 if len(dcontent) > 1:
7815 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7816 for myline in dcontent[1:]:
7817 writemsg(myline, noiselevel=-1)
7818 writemsg("\n\n", noiselevel=-1)
7821 # gets virtual package settings
7822 def getvirtuals(myroot):
7824 writemsg("--- DEPRECATED call to getvirtual\n")
7825 return settings.getvirtuals(myroot)
7827 def commit_mtimedb(mydict=None, filename=None):
7830 if "mtimedb" not in globals() or mtimedb is None:
7834 if filename is None:
7836 filename = mtimedbfile
7837 mydict["version"] = VERSION
7838 d = {} # for full backward compat, pickle it as a plain dict object.
7841 f = atomic_ofstream(filename)
7842 cPickle.dump(d, f, -1)
7844 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7845 except (IOError, OSError), e:
7849 global uid,portage_gid,portdb,db
7850 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
7851 close_portdbapi_caches()
7854 atexit_register(portageexit)
7856 def global_updates(mysettings, trees, prev_mtimes):
7858 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7860 @param mysettings: A config instance for ROOT="/".
7861 @type mysettings: config
7862 @param trees: A dictionary containing portage trees.
7864 @param prev_mtimes: A dictionary containing mtimes of files located in
7865 $PORTDIR/profiles/updates/.
7866 @type prev_mtimes: dict
7867 @rtype: None or List
7868 @return: None if no were no updates, otherwise a list of update commands
7869 that have been performed.
7871 # only do this if we're root and not running repoman/ebuild digest
7873 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7875 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7878 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7879 update_data = grab_updates(updpath)
7881 update_data = grab_updates(updpath, prev_mtimes)
7882 except portage_exception.DirectoryNotFound:
7883 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
7886 if len(update_data) > 0:
7887 do_upgrade_packagesmessage = 0
7890 for mykey, mystat, mycontent in update_data:
7891 writemsg_stdout("\n\n")
7892 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
7893 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7894 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
7895 valid_updates, errors = parse_updates(mycontent)
7896 myupd.extend(valid_updates)
7897 writemsg_stdout(len(valid_updates) * "." + "\n")
7898 if len(errors) == 0:
7899 # Update our internal mtime since we
7900 # processed all of our directives.
7901 timestamps[mykey] = long(mystat.st_mtime)
7904 writemsg("%s\n" % msg, noiselevel=-1)
7906 update_config_files("/",
7907 mysettings.get("CONFIG_PROTECT","").split(),
7908 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7911 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7912 settings=mysettings)
7913 for update_cmd in myupd:
7914 if update_cmd[0] == "move":
7915 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
7916 trees["/"]["bintree"].move_ent(update_cmd)
7917 elif update_cmd[0] == "slotmove":
7918 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
7919 trees["/"]["bintree"].move_slot_ent(update_cmd)
7921 # The above global updates proceed quickly, so they
7922 # are considered a single mtimedb transaction.
7923 if len(timestamps) > 0:
7924 # We do not update the mtime in the mtimedb
7925 # until after _all_ of the above updates have
7926 # been processed because the mtimedb will
7927 # automatically commit when killed by ctrl C.
7928 for mykey, mtime in timestamps.iteritems():
7929 prev_mtimes[mykey] = mtime
7931 # We gotta do the brute force updates for these now.
7932 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7933 "fixpackages" in mysettings.features:
7934 trees["/"]["bintree"].update_ents(myupd)
7936 do_upgrade_packagesmessage = 1
7938 # Update progress above is indicated by characters written to stdout so
7939 # we print a couple new lines here to separate the progress output from
7944 if do_upgrade_packagesmessage and \
7945 listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
7946 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7947 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7948 writemsg_stdout("\n")
7952 #continue setting up other trees
7954 class MtimeDB(dict):
7955 def __init__(self, filename):
7957 self.filename = filename
7958 self._load(filename)
7960 def _load(self, filename):
7963 mypickle = cPickle.Unpickler(f)
7964 mypickle.find_global = None
7968 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
7972 d["updates"] = d["old"]
7977 d.setdefault("starttime", 0)
7978 d.setdefault("version", "")
7979 for k in ("info", "ldpath", "updates"):
7982 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7983 "starttime", "updates", "version"))
7986 if k not in mtimedbkeys:
7987 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7990 self._clean_data = copy.deepcopy(d)
7993 if not self.filename:
7997 # Only commit if the internal state has changed.
7998 if d != self._clean_data:
7999 commit_mtimedb(mydict=d, filename=self.filename)
8000 self._clean_data = copy.deepcopy(d)
8002 def create_trees(config_root=None, target_root=None, trees=None):
8006 # clean up any existing portdbapi instances
8007 for myroot in trees:
8008 portdb = trees[myroot]["porttree"].dbapi
8009 portdb.close_caches()
8010 portdbapi.portdbapi_instances.remove(portdb)
8011 del trees[myroot]["porttree"], myroot, portdb
8013 settings = config(config_root=config_root, target_root=target_root,
8014 config_incrementals=portage_const.INCREMENTALS)
8018 myroots = [(settings["ROOT"], settings)]
8019 if settings["ROOT"] != "/":
8020 settings = config(config_root=None, target_root=None,
8021 config_incrementals=portage_const.INCREMENTALS)
8024 myroots.append((settings["ROOT"], settings))
8026 for myroot, mysettings in myroots:
8027 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8028 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8029 trees[myroot].addLazySingleton(
8030 "vartree", vartree, myroot, categories=mysettings.categories,
8031 settings=mysettings)
8032 trees[myroot].addLazySingleton("porttree",
8033 portagetree, myroot, settings=mysettings)
8034 trees[myroot].addLazySingleton("bintree",
8035 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8038 # Initialization of legacy globals. No functions/classes below this point
8039 # please! When the above functions and classes become independent of the
8040 # below global variables, it will be possible to make the below code
8041 # conditional on a backward compatibility flag (backward compatibility could
8042 # be disabled via an environment variable, for example). This will enable new
8043 # code that is aware of this flag to import portage without the unnecessary
8044 # overhead (and other issues!) of initializing the legacy globals.
8046 def init_legacy_globals():
8047 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8048 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8049 profiledir, flushmtimedb
8051 # Portage needs to ensure a sane umask for the files it creates.
8055 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8056 kwargs[k] = os.environ.get(envvar, "/")
8058 db = create_trees(**kwargs)
8060 settings = db["/"]["vartree"].settings
8061 portdb = db["/"]["porttree"].dbapi
8065 settings = db[myroot]["vartree"].settings
8066 portdb = db[myroot]["porttree"].dbapi
8069 root = settings["ROOT"]
8071 mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8072 mtimedb = MtimeDB(mtimedbfile)
8074 # ========================================================================
8076 # These attributes should not be used
8077 # within Portage under any circumstances.
8078 # ========================================================================
8079 archlist = settings.archlist()
8080 features = settings.features
8081 groups = settings["ACCEPT_KEYWORDS"].split()
8082 pkglines = settings.packages
8083 selinux_enabled = settings.selinux_enabled()
8084 thirdpartymirrors = settings.thirdpartymirrors()
8085 usedefaults = settings.use_defs
8087 if os.path.isdir(PROFILE_PATH):
8088 profiledir = PROFILE_PATH
8089 def flushmtimedb(record):
8090 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8091 # ========================================================================
8093 # These attributes should not be used
8094 # within Portage under any circumstances.
8095 # ========================================================================
8098 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8099 # use within Portage. External use of this variable is unsupported because
8100 # it is experimental and it's behavior is likely to change.
8101 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8102 init_legacy_globals()
8107 # ============================================================================
8108 # ============================================================================