1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
24 import pickle as cPickle
28 from time import sleep
29 from random import shuffle
31 from itertools import chain, izip
32 except ImportError, e:
33 sys.stderr.write("\n\n")
34 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
35 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
36 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
38 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
39 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
40 sys.stderr.write(" "+str(e)+"\n\n");
44 if os.uname()[0] in ["FreeBSD"]:
47 def _chflags(path, flags, opts=""):
48 cmd = "chflags %s %o '%s'" % (opts, flags, path)
49 status, output = commands.getstatusoutput(cmd)
50 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
52 # Try to generate an ENOENT error if appropriate.
57 # Make sure the binary exists.
58 if not portage.process.find_binary("chflags"):
59 raise portage.exception.CommandNotFound("chflags")
60 # Now we're not sure exactly why it failed or what
61 # the real errno was, so just report EPERM.
62 e = OSError(errno.EPERM, output)
67 def _lchflags(path, flags):
68 return _chflags(path, flags, opts="-h")
69 bsd_chflags.chflags = _chflags
70 bsd_chflags.lchflags = _lchflags
73 from portage.cache.cache_errors import CacheError
74 import portage.cvstree
76 import portage.getbinpkg
78 from portage.dep import dep_getcpv, dep_getkey, get_operator, \
79 isjustname, isspecific, isvalidatom, \
80 match_from_list, match_to_list, best_match_to_list
82 # XXX: This needs to get cleaned up.
84 from portage.output import bold, colorize, green, red, yellow
87 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
88 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
89 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
90 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
91 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
92 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
93 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
94 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
96 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
97 portage_uid, portage_gid, userpriv_groups
98 from portage.manifest import Manifest
101 from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
102 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
103 map_dictlist_vals, new_protect_filename, normalize_path, \
104 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
105 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
106 import portage.exception
109 import portage.process
110 from portage.process import atexit_register, run_exitfuncs
111 from portage.locks import unlockfile,unlockdir,lockfile,lockdir
112 import portage.checksum
113 from portage.checksum import perform_md5,perform_checksum,prelink_capable
114 import portage.eclass_cache
115 from portage.localization import _
116 from portage.update import dep_transform, fixdbentries, grab_updates, \
117 parse_updates, update_config_files, update_dbentries
119 # Need these functions directly in portage namespace to not break every external tool in existence
120 from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \
121 pkgsplit, vercmp, ververify
123 # endversion and endversion_keys are for backward compatibility only.
124 from portage.versions import endversion_keys
125 from portage.versions import suffix_value as endversion
127 except ImportError, e:
128 sys.stderr.write("\n\n")
129 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
130 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
131 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
132 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
133 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
134 sys.stderr.write("!!! a recovery of portage.\n")
135 sys.stderr.write(" "+str(e)+"\n\n")
140 import portage.selinux as selinux
142 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
147 # ===========================================================================
148 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
149 # ===========================================================================
153 modname = ".".join(name.split(".")[:-1])
154 mod = __import__(modname)
155 components = name.split('.')
156 for comp in components[1:]:
157 mod = getattr(mod, comp)
160 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
162 if top_dict.has_key(x) and top_dict[x].has_key(key):
164 return copy.deepcopy(top_dict[x][key])
166 return top_dict[x][key]
170 raise KeyError, "Key not found in list; '%s'" % key
173 "this fixes situations where the current directory doesn't exist"
176 except OSError: #dir doesn't exist
181 def abssymlink(symlink):
182 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
183 mylink=os.readlink(symlink)
185 mydir=os.path.dirname(symlink)
186 mylink=mydir+"/"+mylink
187 return os.path.normpath(mylink)
193 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
194 global cacheHit,cacheMiss,cacheStale
195 mypath = normalize_path(my_original_path)
196 if dircache.has_key(mypath):
198 cached_mtime, list, ftype = dircache[mypath]
201 cached_mtime, list, ftype = -1, [], []
203 pathstat = os.stat(mypath)
204 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
205 mtime = pathstat[stat.ST_MTIME]
207 raise portage.exception.DirectoryNotFound(mypath)
208 except EnvironmentError, e:
209 if e.errno == portage.exception.PermissionDenied.errno:
210 raise portage.exception.PermissionDenied(mypath)
215 except portage.exception.PortageException:
219 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
220 if mtime != cached_mtime or time.time() - mtime < 4:
221 if dircache.has_key(mypath):
224 list = os.listdir(mypath)
225 except EnvironmentError, e:
226 if e.errno != errno.EACCES:
229 raise portage.exception.PermissionDenied(mypath)
234 pathstat = os.stat(mypath+"/"+x)
236 pathstat = os.lstat(mypath+"/"+x)
238 if stat.S_ISREG(pathstat[stat.ST_MODE]):
240 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
242 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
246 except (IOError, OSError):
248 dircache[mypath] = mtime, list, ftype
252 for x in range(0, len(list)):
253 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
254 ret_list.append(list[x])
255 ret_ftype.append(ftype[x])
256 elif (list[x] not in ignorelist):
257 ret_list.append(list[x])
258 ret_ftype.append(ftype[x])
260 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
261 return ret_list, ret_ftype
263 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
264 EmptyOnError=False, dirsonly=False):
266 Portage-specific implementation of os.listdir
268 @param mypath: Path whose contents you wish to list
270 @param recursive: Recursively scan directories contained within mypath
271 @type recursive: Boolean
272 @param filesonly; Only return files, not more directories
273 @type filesonly: Boolean
274 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
275 @type ignorecvs: Boolean
276 @param ignorelist: List of filenames/directories to exclude
277 @type ignorelist: List
278 @param followSymlinks: Follow Symlink'd files and directories
279 @type followSymlinks: Boolean
280 @param EmptyOnError: Return [] if an error occurs.
281 @type EmptyOnError: Boolean
282 @param dirsonly: Only return directories.
283 @type dirsonly: Boolean
285 @returns: A list of files and directories (or just files or just directories) or an empty list.
288 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
295 if not (filesonly or dirsonly or recursive):
301 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
302 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
306 for y in range(0,len(l)):
307 l[y]=list[x]+"/"+l[y]
313 for x in range(0,len(ftype)):
315 rlist=rlist+[list[x]]
318 for x in range(0, len(ftype)):
320 rlist = rlist + [list[x]]
326 def flatten(mytokens):
327 """this function now turns a [1,[2,3]] list into
328 a [1,2,3] list and returns it."""
331 if isinstance(x, list):
332 newlist.extend(flatten(x))
337 #beautiful directed graph object
339 class digraph(object):
341 """Create an empty digraph"""
343 # { node : ( { child : priority } , { parent : priority } ) }
347 def add(self, node, parent, priority=0):
348 """Adds the specified node with the specified parent.
350 If the dep is a soft-dep and the node already has a hard
351 relationship to the parent, the relationship is left as hard."""
353 if node not in self.nodes:
354 self.nodes[node] = ({}, {})
355 self.order.append(node)
360 if parent not in self.nodes:
361 self.nodes[parent] = ({}, {})
362 self.order.append(parent)
364 if parent in self.nodes[node][1]:
365 if priority > self.nodes[node][1][parent]:
366 self.nodes[node][1][parent] = priority
368 self.nodes[node][1][parent] = priority
370 if node in self.nodes[parent][0]:
371 if priority > self.nodes[parent][0][node]:
372 self.nodes[parent][0][node] = priority
374 self.nodes[parent][0][node] = priority
376 def remove(self, node):
377 """Removes the specified node from the digraph, also removing
378 and ties to other nodes in the digraph. Raises KeyError if the
379 node doesn't exist."""
381 if node not in self.nodes:
384 for parent in self.nodes[node][1]:
385 del self.nodes[parent][0][node]
386 for child in self.nodes[node][0]:
387 del self.nodes[child][1][node]
390 self.order.remove(node)
392 def contains(self, node):
393 """Checks if the digraph contains mynode"""
394 return node in self.nodes
397 """Return a list of all nodes in the graph"""
400 def child_nodes(self, node, ignore_priority=None):
401 """Return all children of the specified node"""
402 if ignore_priority is None:
403 return self.nodes[node][0].keys()
405 for child, priority in self.nodes[node][0].iteritems():
406 if priority > ignore_priority:
407 children.append(child)
410 def parent_nodes(self, node):
411 """Return all parents of the specified node"""
412 return self.nodes[node][1].keys()
414 def leaf_nodes(self, ignore_priority=None):
415 """Return all nodes that have no children
417 If ignore_soft_deps is True, soft deps are not counted as
418 children in calculations."""
421 for node in self.order:
423 for child in self.nodes[node][0]:
424 if self.nodes[node][0][child] > ignore_priority:
428 leaf_nodes.append(node)
431 def root_nodes(self, ignore_priority=None):
432 """Return all nodes that have no parents.
434 If ignore_soft_deps is True, soft deps are not counted as
435 parents in calculations."""
438 for node in self.order:
440 for parent in self.nodes[node][1]:
441 if self.nodes[node][1][parent] > ignore_priority:
445 root_nodes.append(node)
449 """Checks if the digraph is empty"""
450 return len(self.nodes) == 0
455 for k, v in self.nodes.iteritems():
456 clone.nodes[k] = (v[0].copy(), v[1].copy())
457 clone.order = self.order[:]
460 # Backward compatibility
463 allzeros = leaf_nodes
468 def delnode(self, node):
475 leaf_nodes = self.leaf_nodes()
480 def hasallzeros(self, ignore_priority=None):
481 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
484 def debug_print(self):
485 for node in self.nodes:
487 if self.nodes[node][0]:
490 print "(no children)"
491 for child in self.nodes[node][0]:
493 print "(%s)" % self.nodes[node][0][child]
496 #parse /etc/env.d and generate /etc/profile.env
498 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
500 if target_root is None:
503 if prev_mtimes is None:
505 prev_mtimes = mtimedb["ldpath"]
508 envd_dir = os.path.join(target_root, "etc", "env.d")
509 portage.util.ensure_dirs(envd_dir, mode=0755)
510 fns = listdir(envd_dir, EmptyOnError=1)
516 if not x[0].isdigit() or not x[1].isdigit():
518 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
524 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
525 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
526 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
527 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
528 "PYTHONPATH", "ROOTPATH"])
533 file_path = os.path.join(envd_dir, x)
535 myconfig = getconfig(file_path, expand=False)
536 except portage.exception.ParseError, e:
537 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
541 # broken symlink or file removed by a concurrent process
542 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
544 config_list.append(myconfig)
545 if "SPACE_SEPARATED" in myconfig:
546 space_separated.update(myconfig["SPACE_SEPARATED"].split())
547 del myconfig["SPACE_SEPARATED"]
548 if "COLON_SEPARATED" in myconfig:
549 colon_separated.update(myconfig["COLON_SEPARATED"].split())
550 del myconfig["COLON_SEPARATED"]
554 for var in space_separated:
556 for myconfig in config_list:
558 for item in myconfig[var].split():
559 if item and not item in mylist:
561 del myconfig[var] # prepare for env.update(myconfig)
563 env[var] = " ".join(mylist)
564 specials[var] = mylist
566 for var in colon_separated:
568 for myconfig in config_list:
570 for item in myconfig[var].split(":"):
571 if item and not item in mylist:
573 del myconfig[var] # prepare for env.update(myconfig)
575 env[var] = ":".join(mylist)
576 specials[var] = mylist
578 for myconfig in config_list:
579 """Cumulative variables have already been deleted from myconfig so that
580 they won't be overwritten by this dict.update call."""
583 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
585 myld = open(ldsoconf_path)
586 myldlines=myld.readlines()
590 #each line has at least one char (a newline)
594 except (IOError, OSError), e:
595 if e.errno != errno.ENOENT:
599 ld_cache_update=False
601 newld = specials["LDPATH"]
603 #ld.so.conf needs updating and ldconfig needs to be run
604 myfd = atomic_ofstream(ldsoconf_path)
605 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
606 myfd.write("# contents of /etc/env.d directory\n")
607 for x in specials["LDPATH"]:
612 # Update prelink.conf if we are prelink-enabled
614 newprelink = atomic_ofstream(
615 os.path.join(target_root, "etc", "prelink.conf"))
616 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
617 newprelink.write("# contents of /etc/env.d directory\n")
619 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
620 newprelink.write("-l "+x+"\n");
621 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
627 for y in specials["PRELINK_PATH_MASK"]:
636 newprelink.write("-h "+x+"\n")
637 for x in specials["PRELINK_PATH_MASK"]:
638 newprelink.write("-b "+x+"\n")
641 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
642 # granularity is possible. In order to avoid the potential ambiguity of
643 # mtimes that differ by less than 1 second, sleep here if any of the
644 # directories have been modified during the current second.
645 sleep_for_mtime_granularity = False
646 current_time = long(time.time())
647 mtime_changed = False
649 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
650 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
652 newldpathtime = long(os.stat(x).st_mtime)
653 lib_dirs.add(normalize_path(x))
655 if oe.errno == errno.ENOENT:
660 # ignore this path because it doesn't exist
663 if newldpathtime == current_time:
664 sleep_for_mtime_granularity = True
666 if prev_mtimes[x] == newldpathtime:
669 prev_mtimes[x] = newldpathtime
672 prev_mtimes[x] = newldpathtime
676 ld_cache_update = True
679 not ld_cache_update and \
680 contents is not None:
681 libdir_contents_changed = False
682 for mypath, mydata in contents.iteritems():
683 if mydata[0] not in ("obj","sym"):
685 head, tail = os.path.split(mypath)
687 libdir_contents_changed = True
689 if not libdir_contents_changed:
692 ldconfig = "/sbin/ldconfig"
693 if "CHOST" in env and "CBUILD" in env and \
694 env["CHOST"] != env["CBUILD"]:
695 from portage.process import find_binary
696 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
698 # Only run ldconfig as needed
699 if (ld_cache_update or makelinks) and ldconfig:
700 # ldconfig has very different behaviour between FreeBSD and Linux
701 if ostype=="Linux" or ostype.lower().endswith("gnu"):
702 # We can't update links if we haven't cleaned other versions first, as
703 # an older package installed ON TOP of a newer version will cause ldconfig
704 # to overwrite the symlinks we just made. -X means no links. After 'clean'
705 # we can safely create links.
706 writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
708 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
710 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
711 elif ostype in ("FreeBSD","DragonFly"):
712 writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
714 os.system(("cd / ; %s -elf -i " + \
715 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
716 (ldconfig, target_root, target_root))
718 del specials["LDPATH"]
720 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
721 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
722 cenvnotice = penvnotice[:]
723 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
724 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
726 #create /etc/profile.env for bash support
727 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
728 outfile.write(penvnotice)
730 env_keys = [ x for x in env if x != "LDPATH" ]
734 if v.startswith('$') and not v.startswith('${'):
735 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
737 outfile.write("export %s='%s'\n" % (k, v))
740 #create /etc/csh.env for (t)csh support
741 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
742 outfile.write(cenvnotice)
744 outfile.write("setenv %s '%s'\n" % (x, env[x]))
747 if sleep_for_mtime_granularity:
748 while current_time == long(time.time()):
751 def ExtractKernelVersion(base_dir):
753 Try to figure out what kernel version we are running
754 @param base_dir: Path to sources (usually /usr/src/linux)
755 @type base_dir: string
756 @rtype: tuple( version[string], error[string])
758 1. tuple( version[string], error[string])
759 Either version or error is populated (but never both)
763 pathname = os.path.join(base_dir, 'Makefile')
765 f = open(pathname, 'r')
766 except OSError, details:
767 return (None, str(details))
768 except IOError, details:
769 return (None, str(details))
773 lines.append(f.readline())
774 except OSError, details:
775 return (None, str(details))
776 except IOError, details:
777 return (None, str(details))
779 lines = [l.strip() for l in lines]
783 #XXX: The following code relies on the ordering of vars within the Makefile
785 # split on the '=' then remove annoying whitespace
786 items = line.split("=")
787 items = [i.strip() for i in items]
788 if items[0] == 'VERSION' or \
789 items[0] == 'PATCHLEVEL':
792 elif items[0] == 'SUBLEVEL':
794 elif items[0] == 'EXTRAVERSION' and \
795 items[-1] != items[0]:
798 # Grab a list of files named localversion* and sort them
799 localversions = os.listdir(base_dir)
800 for x in range(len(localversions)-1,-1,-1):
801 if localversions[x][:12] != "localversion":
805 # Append the contents of each to the version string, stripping ALL whitespace
806 for lv in localversions:
807 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
809 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
810 kernelconfig = getconfig(base_dir+"/.config")
811 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
812 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
814 return (version,None)
816 def autouse(myvartree, use_cache=1, mysettings=None):
818 autuse returns a list of USE variables auto-enabled to packages being installed
820 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
821 @type myvartree: vartree
822 @param use_cache: read values from cache
823 @type use_cache: Boolean
824 @param mysettings: Instance of config
825 @type mysettings: config
827 @returns: A string containing a list of USE variables that are enabled via use.defaults
829 if mysettings is None:
831 mysettings = settings
832 if mysettings.profile_path is None:
835 usedefaults = mysettings.use_defs
836 for myuse in usedefaults:
838 for mydep in usedefaults[myuse]:
839 if not myvartree.dep_match(mydep,use_cache=True):
843 myusevars += " "+myuse
846 def check_config_instance(test):
847 if not isinstance(test, config):
848 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
850 class config(object):
852 This class encompasses the main portage configuration. Data is pulled from
853 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
854 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
857 Generally if you need data like USE flags, FEATURES, environment variables,
858 virtuals ...etc you look in here.
861 _environ_whitelist = []
863 # Whitelisted variables are always allowed to enter the ebuild
864 # environment. Generally, this only includes special portage
865 # variables. Ebuilds can unset variables that are not whitelisted
866 # and rely on them remaining unset for future phases, without them
867 # leaking back in from various locations (bug #189417). It's very
868 # important to set our special BASH_ENV variable in the ebuild
869 # environment in order to prevent sandbox from sourcing /etc/profile
870 # in it's bashrc (causing major leakage).
871 _environ_whitelist += [
872 "BASH_ENV", "BUILD_PREFIX", "D",
873 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
874 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
875 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
876 "FEATURES", "FILESDIR", "HOME", "PATH",
878 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
879 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
881 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
882 "PORTAGE_BINPKG_TMPFILE",
884 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
885 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
886 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
888 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
889 "PORTAGE_PYM_PATH", "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
890 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_WORKDIR_MODE",
891 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
892 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
893 "USE_EXPAND", "USE_ORDER", "WORKDIR",
897 _environ_whitelist += [
898 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
901 # misc variables inherited from the calling environment
902 _environ_whitelist += [
903 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
904 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
905 "TERM", "TERMCAP", "USER",
908 # other variables inherited from the calling environment
909 _environ_whitelist += [
910 "CVS_RSH", "ECHANGELOG_USER",
912 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
913 "STY", "WINDOW", "XAUTHORITY",
916 _environ_whitelist = frozenset(_environ_whitelist)
918 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
920 # Filter selected variables in the config.environ() method so that
921 # they don't needlessly propagate down into the ebuild environment.
924 # misc variables inherited from the calling environment
926 "INFOPATH", "MANPATH",
929 # portage config variables and variables set directly by portage
931 "ACCEPT_KEYWORDS", "AUTOCLEAN",
932 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
933 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
934 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
935 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
936 "GENTOO_MIRRORS", "NOCONFMEM", "O",
937 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
938 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
939 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
940 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
941 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
943 "PORTAGE_GPG_KEY", "PORTAGE_PACKAGE_EMPTY_ABORT",
944 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
945 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
946 "QUICKPKG_DEFAULT_OPTS",
947 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
948 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
951 _environ_filter = frozenset(_environ_filter)
953 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
954 config_incrementals=None, config_root=None, target_root=None,
957 @param clone: If provided, init will use deepcopy to copy by value the instance.
958 @type clone: Instance of config class.
959 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
960 and then calling instance.setcpv(mycpv).
962 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
963 @type config_profile_path: String
964 @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS)
965 @type config_incrementals: List
966 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
967 @type config_root: String
968 @param target_root: __init__ override of $ROOT env variable.
969 @type target_root: String
970 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
971 ignore local config (keywording and unmasking)
972 @type local_config: Boolean
975 # When initializing the global portage.settings instance, avoid
976 # raising exceptions whenever possible since exceptions thrown
977 # from 'import portage' or 'import portage.exceptions' statements
978 # can practically render the api unusable for api consumers.
979 tolerant = "_initializing_globals" in globals()
981 self.already_in_regenerate = 0
983 self._filter_calling_env = False
987 self.modifiedkeys = []
992 self.dirVirtuals = None
995 # Virtuals obtained from the vartree
996 self.treeVirtuals = {}
997 # Virtuals by user specification. Includes negatives.
998 self.userVirtuals = {}
999 # Virtual negatives from user specifications.
1000 self.negVirtuals = {}
1001 # Virtuals added by the depgraph via self.setinst().
1002 self._depgraphVirtuals = {}
1004 self.user_profile_dir = None
1005 self.local_config = local_config
1006 self._use_wildcards = False
1009 self._filter_calling_env = copy.deepcopy(clone._filter_calling_env)
1010 self.incrementals = copy.deepcopy(clone.incrementals)
1011 self.profile_path = copy.deepcopy(clone.profile_path)
1012 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1013 self.local_config = copy.deepcopy(clone.local_config)
1015 self.module_priority = copy.deepcopy(clone.module_priority)
1016 self.modules = copy.deepcopy(clone.modules)
1018 self.depcachedir = copy.deepcopy(clone.depcachedir)
1020 self.packages = copy.deepcopy(clone.packages)
1021 self.virtuals = copy.deepcopy(clone.virtuals)
1023 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1024 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1025 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1026 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1027 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1029 self.use_defs = copy.deepcopy(clone.use_defs)
1030 self.usemask = copy.deepcopy(clone.usemask)
1031 self.usemask_list = copy.deepcopy(clone.usemask_list)
1032 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1033 self.useforce = copy.deepcopy(clone.useforce)
1034 self.useforce_list = copy.deepcopy(clone.useforce_list)
1035 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1036 self.puse = copy.deepcopy(clone.puse)
1037 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1038 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1039 self.mycpv = copy.deepcopy(clone.mycpv)
1041 self.configlist = copy.deepcopy(clone.configlist)
1042 self.lookuplist = self.configlist[:]
1043 self.lookuplist.reverse()
1045 "env.d": self.configlist[0],
1046 "pkginternal": self.configlist[1],
1047 "globals": self.configlist[2],
1048 "defaults": self.configlist[3],
1049 "conf": self.configlist[4],
1050 "pkg": self.configlist[5],
1051 "auto": self.configlist[6],
1052 "backupenv": self.configlist[7],
1053 "env": self.configlist[8] }
1054 self.profiles = copy.deepcopy(clone.profiles)
1055 self.backupenv = self.configdict["backupenv"]
1056 self.pusedict = copy.deepcopy(clone.pusedict)
1057 self.categories = copy.deepcopy(clone.categories)
1058 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1059 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1060 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1061 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1062 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1063 self.features = copy.deepcopy(clone.features)
1065 self._accept_license = copy.deepcopy(clone._accept_license)
1066 self._plicensedict = copy.deepcopy(clone._plicensedict)
1067 self._use_wildcards = copy.deepcopy(clone._use_wildcards)
1070 def check_var_directory(varname, var):
1071 if not os.path.isdir(var):
1072 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1073 "Please correct this.\n") % (varname, var),
1075 raise portage.exception.DirectoryNotFound(var)
1077 if config_root is None:
1080 config_root = normalize_path(os.path.abspath(
1081 config_root)).rstrip(os.path.sep) + os.path.sep
1083 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1085 self.depcachedir = DEPCACHE_PATH
1087 if not config_profile_path:
1088 config_profile_path = \
1089 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1090 if os.path.isdir(config_profile_path):
1091 self.profile_path = config_profile_path
1093 self.profile_path = None
1095 self.profile_path = config_profile_path[:]
1097 if not config_incrementals:
1098 writemsg("incrementals not specified to class config\n")
1099 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1101 self.incrementals = copy.deepcopy(config_incrementals)
1103 self.module_priority = ["user","default"]
1105 self.modules["user"] = getconfig(
1106 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1107 if self.modules["user"] is None:
1108 self.modules["user"] = {}
1109 self.modules["default"] = {
1110 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1111 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1117 # back up our incremental variables:
1119 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1120 self.configlist.append({})
1121 self.configdict["env.d"] = self.configlist[-1]
1123 self.configlist.append({})
1124 self.configdict["pkginternal"] = self.configlist[-1]
1126 # The symlink might not exist or might not be a symlink.
1127 if self.profile_path is None:
1131 def addProfile(currentPath):
1132 parentsFile = os.path.join(currentPath, "parent")
1133 if os.path.exists(parentsFile):
1134 parents = grabfile(parentsFile)
1136 raise portage.exception.ParseError(
1137 "Empty parent file: '%s'" % parents_file)
1138 for parentPath in parents:
1139 parentPath = normalize_path(os.path.join(
1140 currentPath, parentPath))
1141 if os.path.exists(parentPath):
1142 addProfile(parentPath)
1144 raise portage.exception.ParseError(
1145 "Parent '%s' not found: '%s'" % \
1146 (parentPath, parentsFile))
1147 self.profiles.append(currentPath)
1149 addProfile(os.path.realpath(self.profile_path))
1150 except portage.exception.ParseError, e:
1151 writemsg("!!! Unable to parse profile: '%s'\n" % \
1152 self.profile_path, noiselevel=-1)
1153 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1157 custom_prof = os.path.join(
1158 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1159 if os.path.exists(custom_prof):
1160 self.user_profile_dir = custom_prof
1161 self.profiles.append(custom_prof)
1164 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1165 self.packages = stack_lists(self.packages_list, incremental=1)
1166 del self.packages_list
1167 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1170 self.prevmaskdict={}
1171 for x in self.packages:
1172 mycatpkg=dep_getkey(x)
1173 if not self.prevmaskdict.has_key(mycatpkg):
1174 self.prevmaskdict[mycatpkg]=[x]
1176 self.prevmaskdict[mycatpkg].append(x)
1178 # get profile-masked use flags -- INCREMENTAL Child over parent
1179 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1180 for x in self.profiles]
1181 self.usemask = set(stack_lists(
1182 self.usemask_list, incremental=True))
1183 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1184 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1187 self.pusemask_list = []
1188 rawpusemask = [grabdict_package(
1189 os.path.join(x, "package.use.mask")) \
1190 for x in self.profiles]
1191 for i in xrange(len(self.profiles)):
1193 for k, v in rawpusemask[i].iteritems():
1194 cpdict.setdefault(dep_getkey(k), {})[k] = v
1195 self.pusemask_list.append(cpdict)
1198 self.pkgprofileuse = []
1199 rawprofileuse = [grabdict_package(
1200 os.path.join(x, "package.use"), juststrings=True) \
1201 for x in self.profiles]
1202 for i in xrange(len(self.profiles)):
1204 for k, v in rawprofileuse[i].iteritems():
1205 cpdict.setdefault(dep_getkey(k), {})[k] = v
1206 self.pkgprofileuse.append(cpdict)
1209 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1210 for x in self.profiles]
1211 self.useforce = set(stack_lists(
1212 self.useforce_list, incremental=True))
1214 self.puseforce_list = []
1215 rawpuseforce = [grabdict_package(
1216 os.path.join(x, "package.use.force")) \
1217 for x in self.profiles]
1218 for i in xrange(len(self.profiles)):
1220 for k, v in rawpuseforce[i].iteritems():
1221 cpdict.setdefault(dep_getkey(k), {})[k] = v
1222 self.puseforce_list.append(cpdict)
1225 # make.globals should not be relative to config_root
1226 # because it only contains constants.
1227 self.mygcfg = getconfig(os.path.join("/etc", "make.globals"))
1229 if self.mygcfg is None:
1232 self.configlist.append(self.mygcfg)
1233 self.configdict["globals"]=self.configlist[-1]
1235 self.make_defaults_use = []
1238 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) \
1239 for x in self.profiles]
1240 for cfg in mygcfg_dlists:
1242 self.make_defaults_use.append(cfg.get("USE", ""))
1244 self.make_defaults_use.append("")
1245 self.mygcfg = stack_dicts(mygcfg_dlists,
1246 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1247 if self.mygcfg is None:
1249 self.configlist.append(self.mygcfg)
1250 self.configdict["defaults"]=self.configlist[-1]
1252 self.mygcfg = getconfig(
1253 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1254 tolerant=tolerant, allow_sourcing=True)
1255 if self.mygcfg is None:
1258 # Don't allow the user to override certain variables in make.conf
1259 profile_only_variables = self.configdict["defaults"].get(
1260 "PROFILE_ONLY_VARIABLES", "").split()
1261 for k in profile_only_variables:
1262 self.mygcfg.pop(k, None)
1264 # Allow ROOT setting to come from make.conf if it's not overridden
1265 # by the constructor argument (from the calling environment).
1266 if target_root is None and "ROOT" in self.mygcfg:
1267 target_root = self.mygcfg["ROOT"]
1269 self.configlist.append(self.mygcfg)
1270 self.configdict["conf"]=self.configlist[-1]
1272 self.configlist.append({})
1273 self.configdict["pkg"]=self.configlist[-1]
1276 self.configlist.append({})
1277 self.configdict["auto"]=self.configlist[-1]
1279 # backupenv is used for calculating incremental variables.
1280 self.backupenv = os.environ.copy()
1281 self.configlist.append(self.backupenv) # XXX Why though?
1282 self.configdict["backupenv"]=self.configlist[-1]
1284 # Don't allow the user to override certain variables in the env
1285 for k in profile_only_variables:
1286 self.backupenv.pop(k, None)
1288 self.configlist.append(self.backupenv.copy())
1289 self.configdict["env"]=self.configlist[-1]
1291 # make lookuplist for loading package.*
1292 self.lookuplist=self.configlist[:]
1293 self.lookuplist.reverse()
1295 # Blacklist vars that could interfere with portage internals.
1296 for blacklisted in "CATEGORY", "PKGUSE", "PORTAGE_CONFIGROOT", \
1297 "PORTAGE_IUSE", "PORTAGE_USE", "ROOT":
1298 for cfg in self.lookuplist:
1299 cfg.pop(blacklisted, None)
1300 del blacklisted, cfg
1302 if target_root is None:
1305 target_root = normalize_path(os.path.abspath(
1306 target_root)).rstrip(os.path.sep) + os.path.sep
1308 portage.util.ensure_dirs(target_root)
1309 check_var_directory("ROOT", target_root)
1312 os.path.join(target_root, "etc", "profile.env"), expand=False)
1313 # env_d will be None if profile.env doesn't exist.
1315 self.configdict["env.d"].update(env_d)
1316 # Remove duplicate values so they don't override updated
1317 # profile.env values later (profile.env is reloaded in each
1318 # call to self.regenerate).
1319 for cfg in (self.configdict["backupenv"],
1320 self.configdict["env"]):
1321 for k, v in env_d.iteritems():
1329 self["PORTAGE_CONFIGROOT"] = config_root
1330 self.backup_changes("PORTAGE_CONFIGROOT")
1331 self["ROOT"] = target_root
1332 self.backup_changes("ROOT")
1335 self.pkeywordsdict = {}
1336 self._plicensedict = {}
1337 self.punmaskdict = {}
1338 abs_user_config = os.path.join(config_root,
1339 USER_CONFIG_PATH.lstrip(os.path.sep))
1341 # locations for "categories" and "arch.list" files
1342 locations = [os.path.join(self["PORTDIR"], "profiles")]
1343 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1344 pmask_locations.extend(self.profiles)
1346 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1347 special cases are needed here."""
1348 overlay_profiles = []
1349 for ov in self["PORTDIR_OVERLAY"].split():
1350 ov = normalize_path(ov)
1351 profiles_dir = os.path.join(ov, "profiles")
1352 if os.path.isdir(profiles_dir):
1353 overlay_profiles.append(profiles_dir)
1354 locations += overlay_profiles
1356 pmask_locations.extend(overlay_profiles)
1359 locations.append(abs_user_config)
1360 pmask_locations.append(abs_user_config)
1361 pusedict = grabdict_package(
1362 os.path.join(abs_user_config, "package.use"), recursive=1)
1363 for key in pusedict.keys():
1364 cp = dep_getkey(key)
1365 if not self.pusedict.has_key(cp):
1366 self.pusedict[cp] = {}
1367 self.pusedict[cp][key] = pusedict[key]
1368 if not self._use_wildcards:
1369 for x in pusedict[key]:
1370 if x.endswith("_*"):
1371 self._use_wildcards = True
1375 pkgdict = grabdict_package(
1376 os.path.join(abs_user_config, "package.keywords"),
1378 for key in pkgdict.keys():
1379 # default to ~arch if no specific keyword is given
1380 if not pkgdict[key]:
1382 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1383 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1386 for keyword in groups:
1387 if not keyword[0] in "~-":
1388 mykeywordlist.append("~"+keyword)
1389 pkgdict[key] = mykeywordlist
1390 cp = dep_getkey(key)
1391 if not self.pkeywordsdict.has_key(cp):
1392 self.pkeywordsdict[cp] = {}
1393 self.pkeywordsdict[cp][key] = pkgdict[key]
1396 licdict = grabdict_package(os.path.join(
1397 abs_user_config, "package.license"), recursive=1)
1398 for k, v in licdict.iteritems():
1400 cp_dict = self._plicensedict.get(cp)
1403 self._plicensedict[cp] = cp_dict
1404 cp_dict[k] = self.expandLicenseTokens(v)
1407 pkgunmasklines = grabfile_package(
1408 os.path.join(abs_user_config, "package.unmask"),
1410 for x in pkgunmasklines:
1411 mycatpkg=dep_getkey(x)
1412 if self.punmaskdict.has_key(mycatpkg):
1413 self.punmaskdict[mycatpkg].append(x)
1415 self.punmaskdict[mycatpkg]=[x]
1417 #getting categories from an external file now
1418 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1419 self.categories = stack_lists(categories, incremental=1)
1422 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1423 archlist = stack_lists(archlist, incremental=1)
1424 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1428 for x in pmask_locations:
1429 pkgmasklines.append(grabfile_package(
1430 os.path.join(x, "package.mask"), recursive=1))
1431 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1434 for x in pkgmasklines:
1435 mycatpkg=dep_getkey(x)
1436 if self.pmaskdict.has_key(mycatpkg):
1437 self.pmaskdict[mycatpkg].append(x)
1439 self.pmaskdict[mycatpkg]=[x]
1441 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1442 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1443 has_invalid_data = False
1444 for x in range(len(pkgprovidedlines)-1, -1, -1):
1445 myline = pkgprovidedlines[x]
1446 if not isvalidatom("=" + myline):
1447 writemsg("Invalid package name in package.provided:" + \
1448 " %s\n" % myline, noiselevel=-1)
1449 has_invalid_data = True
1450 del pkgprovidedlines[x]
1452 cpvr = catpkgsplit(pkgprovidedlines[x])
1453 if not cpvr or cpvr[0] == "null":
1454 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1456 has_invalid_data = True
1457 del pkgprovidedlines[x]
1459 if cpvr[0] == "virtual":
1460 writemsg("Virtual package in package.provided: %s\n" % \
1461 myline, noiselevel=-1)
1462 has_invalid_data = True
1463 del pkgprovidedlines[x]
1465 if has_invalid_data:
1466 writemsg("See portage(5) for correct package.provided usage.\n",
1468 self.pprovideddict = {}
1469 for x in pkgprovidedlines:
1473 mycatpkg=dep_getkey(x)
1474 if self.pprovideddict.has_key(mycatpkg):
1475 self.pprovideddict[mycatpkg].append(x)
1477 self.pprovideddict[mycatpkg]=[x]
1479 # parse licensegroups
1480 self._license_groups = {}
1482 self._license_groups.update(
1483 grabdict(os.path.join(x, "license_groups")))
1485 # reasonable defaults; this is important as without USE_ORDER,
1486 # USE will always be "" (nothing set)!
1487 if "USE_ORDER" not in self:
1488 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1490 self["PORTAGE_GID"] = str(portage_gid)
1491 self.backup_changes("PORTAGE_GID")
1493 if self.get("PORTAGE_DEPCACHEDIR", None):
1494 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1495 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1496 self.backup_changes("PORTAGE_DEPCACHEDIR")
1498 overlays = self.get("PORTDIR_OVERLAY","").split()
1502 ov = normalize_path(ov)
1503 if os.path.isdir(ov):
1506 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1507 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1508 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1509 self.backup_changes("PORTDIR_OVERLAY")
1511 if "CBUILD" not in self and "CHOST" in self:
1512 self["CBUILD"] = self["CHOST"]
1513 self.backup_changes("CBUILD")
1515 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1516 self.backup_changes("PORTAGE_BIN_PATH")
1517 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1518 self.backup_changes("PORTAGE_PYM_PATH")
1520 # Expand license groups
1521 # This has to do be done for each config layer before regenerate()
1522 # in order for incremental negation to work properly.
1524 for c in self.configdict.itervalues():
1525 v = c.get("ACCEPT_LICENSE")
1528 v = " ".join(self.expandLicenseTokens(v.split()))
1529 c["ACCEPT_LICENSE"] = v
1532 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1534 self[var] = str(int(self.get(var, "0")))
1536 writemsg(("!!! %s='%s' is not a valid integer. " + \
1537 "Falling back to '0'.\n") % (var, self[var]),
1540 self.backup_changes(var)
1542 # initialize self.features
1546 self._accept_license = \
1547 set(self.get("ACCEPT_LICENSE", "").split())
1548 # In order to enforce explicit acceptance for restrictive
1549 # licenses that require it, "*" will not be allowed in the
1550 # user config. Don't enforce this until license groups are
1551 # fully implemented in the tree.
1552 #self._accept_license.discard("*")
1553 if not self._accept_license:
1554 self._accept_license = set(["*"])
1556 # repoman will accept any license
1557 self._accept_license = set(["*"])
1559 if "gpg" in self.features:
1560 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1561 not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1562 writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1563 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1564 self.features.remove("gpg")
1566 if not portage.process.sandbox_capable and \
1567 ("sandbox" in self.features or "usersandbox" in self.features):
1568 if self.profile_path is not None and \
1569 os.path.realpath(self.profile_path) == \
1570 os.path.realpath(PROFILE_PATH):
1571 """ Don't show this warning when running repoman and the
1572 sandbox feature came from a profile that doesn't belong to
1574 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1575 " binary. Disabling...\n\n"), noiselevel=-1)
1576 if "sandbox" in self.features:
1577 self.features.remove("sandbox")
1578 if "usersandbox" in self.features:
1579 self.features.remove("usersandbox")
1581 self.features.sort()
1582 if "gpg" in self.features:
1583 writemsg(colorize("WARN", "!!! FEATURES=gpg is unmaintained, incomplete and broken. Disabling it."), noiselevel=-1)
1584 self.features.remove("gpg")
1585 self["FEATURES"] = " ".join(self.features)
1586 self.backup_changes("FEATURES")
1593 def _init_dirs(self):
1595 Create a few directories that are critical to portage operation
1597 if not os.access(self["ROOT"], os.W_OK):
1601 "tmp" : (-1, 01777, 0),
1602 "var/tmp" : (-1, 01777, 0),
1603 PRIVATE_PATH : (portage_gid, 02750, 02),
1604 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02)
1607 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1609 mydir = os.path.join(self["ROOT"], mypath)
1610 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1611 except portage.exception.PortageException, e:
1612 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1614 writemsg("!!! %s\n" % str(e),
1617 def expandLicenseTokens(self, tokens):
1618 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1619 if it's a group token (indicated by @) or just return it if it's not a
1620 group. If a group is negated then negate all group elements."""
1621 expanded_tokens = []
1623 expanded_tokens.extend(self._expandLicenseToken(x, None))
1624 return expanded_tokens
1626 def _expandLicenseToken(self, token, traversed_groups):
1629 if token.startswith("-"):
1631 license_name = token[1:]
1633 license_name = token
1634 if not license_name.startswith("@"):
1635 rValue.append(token)
1637 group_name = license_name[1:]
1638 if not traversed_groups:
1639 traversed_groups = set()
1640 license_group = self._license_groups.get(group_name)
1641 if group_name in traversed_groups:
1642 writemsg(("Circular license group reference" + \
1643 " detected in '%s'\n") % group_name, noiselevel=-1)
1644 rValue.append("@"+group_name)
1646 traversed_groups.add(group_name)
1647 for l in license_group:
1648 if l.startswith("-"):
1649 writemsg(("Skipping invalid element %s" + \
1650 " in license group '%s'\n") % (l, group_name),
1653 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1655 writemsg("Undefined license group '%s'\n" % group_name,
1657 rValue.append("@"+group_name)
1659 rValue = ["-" + token for token in rValue]
1663 """Validate miscellaneous settings and display warnings if necessary.
1664 (This code was previously in the global scope of portage.py)"""
1666 groups = self["ACCEPT_KEYWORDS"].split()
1667 archlist = self.archlist()
1669 writemsg("--- 'profiles/arch.list' is empty or " + \
1670 "not available. Empty portage tree?\n", noiselevel=1)
1672 for group in groups:
1673 if group not in archlist and \
1674 not (group.startswith("-") and group[1:] in archlist) and \
1675 group not in ("*", "~*", "**"):
1676 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1679 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1680 PROFILE_PATH.lstrip(os.path.sep))
1681 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1682 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1683 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1684 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1686 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1687 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1689 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1690 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1691 if os.path.exists(abs_user_virtuals):
1692 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1693 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1694 writemsg("!!! this new location.\n\n")
1696 def loadVirtuals(self,root):
1697 """Not currently used by portage."""
1698 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1699 self.getvirtuals(root)
1701 def load_best_module(self,property_string):
1702 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1705 mod = load_mod(best_mod)
1707 if best_mod.startswith("cache."):
1708 best_mod = "portage." + best_mod
1710 mod = load_mod(best_mod)
1723 def modifying(self):
1725 raise Exception, "Configuration is locked."
1727 def backup_changes(self,key=None):
1729 if key and self.configdict["env"].has_key(key):
1730 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1732 raise KeyError, "No such key defined in environment: %s" % key
1734 def reset(self,keeping_pkg=0,use_cache=1):
1736 Restore environment from self.backupenv, call self.regenerate()
1737 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1738 @type keeping_pkg: Boolean
1739 @param use_cache: Should self.regenerate use the cache or not
1740 @type use_cache: Boolean
1744 self.configdict["env"].clear()
1745 self.configdict["env"].update(self.backupenv)
1747 self.modifiedkeys = []
1751 self.configdict["pkg"].clear()
1752 self.configdict["pkginternal"].clear()
1753 self.configdict["defaults"]["USE"] = \
1754 " ".join(self.make_defaults_use)
1755 self.usemask = set(stack_lists(
1756 self.usemask_list, incremental=True))
1757 self.useforce = set(stack_lists(
1758 self.useforce_list, incremental=True))
1759 self.regenerate(use_cache=use_cache)
1761 def load_infodir(self,infodir):
1763 backup_pkg_metadata = dict(self.configdict["pkg"].iteritems())
1764 if "pkg" in self.configdict and \
1765 "CATEGORY" in self.configdict["pkg"]:
1766 self.configdict["pkg"].clear()
1767 self.configdict["pkg"]["CATEGORY"] = \
1768 backup_pkg_metadata["CATEGORY"]
1770 raise portage.exception.PortageException(
1771 "No pkg setup for settings instance?")
1774 found_category_file = False
1775 if os.path.isdir(infodir):
1776 if os.path.exists(infodir+"/environment"):
1777 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1779 myre = re.compile('^[A-Z]+$')
1781 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1782 if filename == "FEATURES":
1783 # FEATURES from the build host shouldn't be interpreted as
1784 # FEATURES on the client system.
1786 if filename == "CATEGORY":
1787 found_category_file = True
1789 if myre.match(filename):
1791 file_path = os.path.join(infodir, filename)
1792 mydata = open(file_path).read().strip()
1793 if len(mydata) < 2048 or filename == "USE":
1794 if null_byte in mydata:
1795 writemsg("!!! Null byte found in metadata " + \
1796 "file: '%s'\n" % file_path, noiselevel=-1)
1798 if filename == "USE":
1799 binpkg_flags = "-* " + mydata
1800 self.configdict["pkg"][filename] = binpkg_flags
1801 self.configdict["env"][filename] = mydata
1803 self.configdict["pkg"][filename] = mydata
1804 self.configdict["env"][filename] = mydata
1805 except (OSError, IOError):
1806 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1811 # Missing or corrupt CATEGORY will cause problems for
1812 # doebuild(), which uses it to infer the cpv. We already
1813 # know the category, so there's no need to trust this
1814 # file. Show a warning if the file is missing though,
1815 # because it's required (especially for binary packages).
1816 if not found_category_file:
1817 writemsg("!!! CATEGORY file is missing: %s\n" % \
1818 os.path.join(infodir, "CATEGORY"), noiselevel=-1)
1819 self.configdict["pkg"].update(backup_pkg_metadata)
1823 def setcpv(self, mycpv, use_cache=1, mydb=None):
1825 Load a particular CPV into the config, this lets us see the
1826 Default USE flags for a particular ebuild as well as the USE
1827 flags from package.use.
1829 @param mycpv: A cpv to load
1831 @param use_cache: Enables caching
1832 @type use_cache: Boolean
1833 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1834 @type mydb: dbapi or derivative.
1839 if self.mycpv == mycpv:
1841 ebuild_phase = self.get("EBUILD_PHASE")
1844 cp = dep_getkey(mycpv)
1845 cpv_slot = self.mycpv
1849 if isinstance(mydb, dict):
1853 slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"])
1854 cpv_slot = "%s:%s" % (self.mycpv, slot)
1856 for x in iuse.split():
1857 if x.startswith("+"):
1858 pkginternaluse.append(x[1:])
1859 elif x.startswith("-"):
1860 pkginternaluse.append(x)
1861 pkginternaluse = " ".join(pkginternaluse)
1862 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1863 self.configdict["pkginternal"]["USE"] = pkginternaluse
1867 for i in xrange(len(self.profiles)):
1868 cpdict = self.pkgprofileuse[i].get(cp, None)
1870 keys = cpdict.keys()
1872 bestmatch = best_match_to_list(cpv_slot, keys)
1874 keys.remove(bestmatch)
1875 defaults.insert(pos, cpdict[bestmatch])
1879 if self.make_defaults_use[i]:
1880 defaults.insert(pos, self.make_defaults_use[i])
1882 defaults = " ".join(defaults)
1883 if defaults != self.configdict["defaults"].get("USE",""):
1884 self.configdict["defaults"]["USE"] = defaults
1888 for i in xrange(len(self.profiles)):
1889 cpdict = self.puseforce_list[i].get(cp, None)
1891 keys = cpdict.keys()
1893 best_match = best_match_to_list(cpv_slot, keys)
1895 keys.remove(best_match)
1896 useforce.insert(pos, cpdict[best_match])
1900 if self.useforce_list[i]:
1901 useforce.insert(pos, self.useforce_list[i])
1903 useforce = set(stack_lists(useforce, incremental=True))
1904 if useforce != self.useforce:
1905 self.useforce = useforce
1909 for i in xrange(len(self.profiles)):
1910 cpdict = self.pusemask_list[i].get(cp, None)
1912 keys = cpdict.keys()
1914 best_match = best_match_to_list(cpv_slot, keys)
1916 keys.remove(best_match)
1917 usemask.insert(pos, cpdict[best_match])
1921 if self.usemask_list[i]:
1922 usemask.insert(pos, self.usemask_list[i])
1924 usemask = set(stack_lists(usemask, incremental=True))
1925 if usemask != self.usemask:
1926 self.usemask = usemask
1930 cpdict = self.pusedict.get(cp)
1932 keys = cpdict.keys()
1934 self.pusekey = best_match_to_list(cpv_slot, keys)
1936 keys.remove(self.pusekey)
1937 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
1941 if oldpuse != self.puse:
1943 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1944 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1945 if iuse != self.configdict["pkg"].get("IUSE",""):
1946 self.configdict["pkg"]["IUSE"] = iuse
1947 test_use_changed = False
1948 if "test" in self.features:
1949 test_use_changed = \
1950 bool(re.search(r'(^|\s)[-+]?test(\s|$)', iuse)) != \
1951 ("test" in self["USE"].split())
1952 if self.get("EBUILD_PHASE") or \
1953 self._use_wildcards or \
1955 # Without this conditional, regenerate() would be called
1958 # CATEGORY is essential for doebuild calls
1959 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1961 self.reset(keeping_pkg=1,use_cache=use_cache)
1963 # Filter out USE flags that aren't part of IUSE. This has to
1964 # be done for every setcpv() call since practically every
1965 # package has different IUSE. Some flags are considered to
1966 # be implicit members of IUSE:
1968 # * Flags derived from ARCH
1969 # * Flags derived from USE_EXPAND_HIDDEN variables
1970 # * Masked flags, such as those from {,package}use.mask
1971 # * Forced flags, such as those from {,package}use.force
1972 # * build and bootstrap flags used by bootstrap.sh
1974 usesplit = self["USE"].split()
1975 iuse_implicit = set(x.lstrip("+-") for x in iuse.split())
1977 # Flags derived from ARCH.
1978 arch = self.configdict["defaults"].get("ARCH")
1980 iuse_implicit.add(arch)
1981 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
1983 # Flags derived from USE_EXPAND_HIDDEN variables
1984 # such as ELIBC, KERNEL, and USERLAND.
1985 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
1986 use_expand_hidden_raw = use_expand_hidden
1987 if use_expand_hidden:
1988 use_expand_hidden = re.compile("^(%s)_.*" % \
1989 ("|".join(x.lower() for x in use_expand_hidden)))
1991 if use_expand_hidden.match(x):
1992 iuse_implicit.add(x)
1994 # Flags that have been masked or forced.
1995 iuse_implicit.update(self.usemask)
1996 iuse_implicit.update(self.useforce)
1998 # build and bootstrap flags used by bootstrap.sh
1999 iuse_implicit.add("build")
2000 iuse_implicit.add("bootstrap")
2003 iuse_grep = iuse_implicit.copy()
2004 if use_expand_hidden_raw:
2005 for x in use_expand_hidden_raw:
2006 iuse_grep.add(x.lower() + "_.*")
2008 iuse_grep = "^(%s)$" % "|".join(sorted(iuse_grep))
2011 self.configdict["pkg"]["PORTAGE_IUSE"] = iuse_grep
2013 # Filtered for the ebuild environment. Store this in a separate
2014 # attribute since we still want to be able to see global USE
2015 # settings for things like emerge --info.
2016 self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted(
2017 x for x in usesplit if \
2018 x in iuse_implicit))
2020 def getMaskAtom(self, cpv, metadata):
2022 Take a package and return a matching package.mask atom, or None if no
2023 such atom exists or it has been cancelled by package.unmask. PROVIDE
2024 is not checked, so atoms will not be found for old-style virtuals.
2026 @param cpv: The package name
2028 @param metadata: A dictionary of raw package metadata
2029 @type metadata: dict
2031 @return: An matching atom string or None if one is not found.
2034 cp = cpv_getkey(cpv)
2035 mask_atoms = self.pmaskdict.get(cp)
2037 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2038 unmask_atoms = self.punmaskdict.get(cp)
2039 for x in mask_atoms:
2040 if not match_from_list(x, pkg_list):
2043 for y in unmask_atoms:
2044 if match_from_list(y, pkg_list):
2049 def getProfileMaskAtom(self, cpv, metadata):
2051 Take a package and return a matching profile atom, or None if no
2052 such atom exists. Note that a profile atom may or may not have a "*"
2053 prefix. PROVIDE is not checked, so atoms will not be found for
2056 @param cpv: The package name
2058 @param metadata: A dictionary of raw package metadata
2059 @type metadata: dict
2061 @return: An matching profile atom string or None if one is not found.
2064 cp = cpv_getkey(cpv)
2065 profile_atoms = self.prevmaskdict.get(cp)
2067 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2068 for x in profile_atoms:
2069 if match_from_list(x.lstrip("*"), pkg_list):
2074 def getMissingKeywords(self, cpv, metadata):
2076 Take a package and return a list of any KEYWORDS that the user may
2077 may need to accept for the given package. If the KEYWORDS are empty
2078 and the the ** keyword has not been accepted, the returned list will
2079 contain ** alone (in order to distiguish from the case of "none
2082 @param cpv: The package name (for package.keywords support)
2084 @param metadata: A dictionary of raw package metadata
2085 @type metadata: dict
2087 @return: A list of KEYWORDS that have not been accepted.
2090 # Hack: Need to check the env directly here as otherwise stacking
2091 # doesn't work properly as negative values are lost in the config
2092 # object (bug #139600)
2093 egroups = self.configdict["backupenv"].get(
2094 "ACCEPT_KEYWORDS", "").split()
2095 mygroups = metadata["KEYWORDS"].split()
2096 # Repoman may modify this attribute as necessary.
2097 pgroups = self["ACCEPT_KEYWORDS"].split()
2099 cp = dep_getkey(cpv)
2100 pkgdict = self.pkeywordsdict.get(cp)
2103 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2104 for atom, pkgkeywords in pkgdict.iteritems():
2105 if match_from_list(atom, cpv_slot_list):
2107 pgroups.extend(pkgkeywords)
2108 if matches or egroups:
2109 pgroups.extend(egroups)
2112 if x.startswith("-"):
2116 inc_pgroups.discard(x[1:])
2119 pgroups = inc_pgroups
2124 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2125 writemsg(("--- WARNING: Package '%s' uses" + \
2126 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2133 elif gp.startswith("~"):
2135 elif not gp.startswith("-"):
2138 ((hastesting and "~*" in pgroups) or \
2139 (hasstable and "*" in pgroups) or "**" in pgroups):
2145 # If KEYWORDS is empty then we still have to return something
2146 # in order to distiguish from the case of "none missing".
2147 mygroups.append("**")
2151 def getMissingLicenses(self, cpv, metadata):
2153 Take a LICENSE string and return a list any licenses that the user may
2154 may need to accept for the given package. The returned list will not
2155 contain any licenses that have already been accepted. This method
2156 can throw an InvalidDependString exception.
2158 @param cpv: The package name (for package.license support)
2160 @param metadata: A dictionary of raw package metadata
2161 @type metadata: dict
2163 @return: A list of licenses that have not been accepted.
2165 if "*" in self._accept_license:
2167 acceptable_licenses = self._accept_license
2168 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2170 acceptable_licenses = self._accept_license.copy()
2171 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2172 for atom in match_to_list(cpv_slot, cpdict.keys()):
2173 acceptable_licenses.update(cpdict[atom])
2174 license_struct = portage.dep.paren_reduce(metadata["LICENSE"])
2175 license_struct = portage.dep.use_reduce(
2176 license_struct, uselist=metadata["USE"].split())
2177 license_struct = portage.dep.dep_opconvert(license_struct)
2178 return self._getMissingLicenses(license_struct, acceptable_licenses)
2180 def _getMissingLicenses(self, license_struct, acceptable_licenses):
2181 if not license_struct:
2183 if license_struct[0] == "||":
2185 for element in license_struct[1:]:
2186 if isinstance(element, list):
2188 ret.append(self._getMissingLicenses(
2189 element, acceptable_licenses))
2193 if element in acceptable_licenses:
2196 # Return all masked licenses, since we don't know which combination
2197 # (if any) the user will decide to unmask.
2201 for element in license_struct:
2202 if isinstance(element, list):
2204 ret.extend(self._getMissingLicenses(element,
2205 acceptable_licenses))
2207 if element not in acceptable_licenses:
2211 def setinst(self,mycpv,mydbapi):
2212 """This updates the preferences for old-style virtuals,
2213 affecting the behavior of dep_expand() and dep_check()
2214 calls. It can change dbapi.match() behavior since that
2215 calls dep_expand(). However, dbapi instances have
2216 internal match caches that are not invalidated when
2217 preferences are updated here. This can potentially
2218 lead to some inconsistency (relevant to bug #1343)."""
2220 if len(self.virtuals) == 0:
2222 # Grab the virtuals this package provides and add them into the tree virtuals.
2223 if isinstance(mydbapi, dict):
2224 provides = mydbapi["PROVIDE"]
2226 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2229 if isinstance(mydbapi, portdbapi):
2230 self.setcpv(mycpv, mydb=mydbapi)
2231 myuse = self["PORTAGE_USE"]
2232 elif isinstance(mydbapi, dict):
2233 myuse = mydbapi["USE"]
2235 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2236 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2239 cp = dep_getkey(mycpv)
2241 virt = dep_getkey(virt)
2242 providers = self.virtuals.get(virt)
2243 if providers and cp in providers:
2245 providers = self._depgraphVirtuals.get(virt)
2246 if providers is None:
2248 self._depgraphVirtuals[virt] = providers
2249 if cp not in providers:
2250 providers.append(cp)
2254 self.virtuals = self.__getvirtuals_compile()
2257 """Reload things like /etc/profile.env that can change during runtime."""
2258 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2259 self.configdict["env.d"].clear()
2260 env_d = getconfig(env_d_filename, expand=False)
2262 # env_d will be None if profile.env doesn't exist.
2263 self.configdict["env.d"].update(env_d)
2265 def regenerate(self,useonly=0,use_cache=1):
2268 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2269 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2270 variables. This also updates the env.d configdict; useful in case an ebuild
2271 changes the environment.
2273 If FEATURES has already stacked, it is not stacked twice.
2275 @param useonly: Only regenerate USE flags (not any other incrementals)
2276 @type useonly: Boolean
2277 @param use_cache: Enable Caching (only for autouse)
2278 @type use_cache: Boolean
2283 if self.already_in_regenerate:
2284 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2285 writemsg("!!! Looping in regenerate.\n",1)
2288 self.already_in_regenerate = 1
2291 myincrementals=["USE"]
2293 myincrementals = self.incrementals
2294 myincrementals = set(myincrementals)
2295 # If self.features exists, it has already been stacked and may have
2296 # been mutated, so don't stack it again or else any mutations will be
2298 if "FEATURES" in myincrementals and hasattr(self, "features"):
2299 myincrementals.remove("FEATURES")
2301 if "USE" in myincrementals:
2302 # Process USE last because it depends on USE_EXPAND which is also
2304 myincrementals.remove("USE")
2306 for mykey in myincrementals:
2308 mydbs=self.configlist[:-1]
2312 if mykey not in curdb:
2314 #variables are already expanded
2315 mysplit = curdb[mykey].split()
2319 # "-*" is a special "minus" var that means "unset all settings".
2320 # so USE="-* gnome" will have *just* gnome enabled.
2325 # Not legal. People assume too much. Complain.
2326 writemsg(red("USE flags should not start with a '+': %s\n" % x),
2333 if (x[1:] in myflags):
2335 del myflags[myflags.index(x[1:])]
2338 # We got here, so add it now.
2339 if x not in myflags:
2343 #store setting in last element of configlist, the original environment:
2344 if myflags or mykey in self:
2345 self.configlist[-1][mykey] = " ".join(myflags)
2348 # Do the USE calculation last because it depends on USE_EXPAND.
2349 if "auto" in self["USE_ORDER"].split(":"):
2350 self.configdict["auto"]["USE"] = autouse(
2351 vartree(root=self["ROOT"], categories=self.categories,
2353 use_cache=use_cache, mysettings=self)
2355 self.configdict["auto"]["USE"] = ""
2357 use_expand = self.get("USE_EXPAND", "").split()
2360 for x in self["USE_ORDER"].split(":"):
2361 if x in self.configdict:
2362 self.uvlist.append(self.configdict[x])
2363 self.uvlist.reverse()
2366 for curdb in self.uvlist:
2367 cur_use_expand = [x for x in use_expand if x in curdb]
2368 mysplit = curdb.get("USE", "").split()
2369 if not mysplit and not cur_use_expand:
2377 writemsg(colorize("BAD", "USE flags should not start " + \
2378 "with a '+': %s\n" % x), noiselevel=-1)
2384 myflags.discard(x[1:])
2389 for var in cur_use_expand:
2390 var_lower = var.lower()
2391 is_not_incremental = var not in myincrementals
2392 if is_not_incremental:
2393 prefix = var_lower + "_"
2394 for x in list(myflags):
2395 if x.startswith(prefix):
2397 for x in curdb[var].split():
2399 if is_not_incremental:
2400 writemsg(colorize("BAD", "Invalid '+' " + \
2401 "operator in non-incremental variable " + \
2402 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2405 writemsg(colorize("BAD", "Invalid '+' " + \
2406 "operator in incremental variable " + \
2407 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2410 if is_not_incremental:
2411 writemsg(colorize("BAD", "Invalid '-' " + \
2412 "operator in non-incremental variable " + \
2413 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2415 myflags.discard(var_lower + "_" + x[1:])
2417 myflags.add(var_lower + "_" + x)
2419 myflags.update(self.useforce)
2421 iuse = self.configdict["pkg"].get("IUSE","").split()
2422 iuse = [ x.lstrip("+-") for x in iuse ]
2423 # FEATURES=test should imply USE=test
2424 if not hasattr(self, "features"):
2425 self.features = list(sorted(set(
2426 self.configlist[-1].get("FEATURES","").split())))
2427 self["FEATURES"] = " ".join(self.features)
2428 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2429 if ebuild_force_test and \
2430 self.get("EBUILD_PHASE") == "test" and \
2431 not hasattr(self, "_ebuild_force_test_msg_shown"):
2432 self._ebuild_force_test_msg_shown = True
2433 writemsg("Forcing test.\n", noiselevel=-1)
2434 if "test" in self.features and "test" in iuse:
2435 if "test" in self.usemask and not ebuild_force_test:
2436 # "test" is in IUSE and USE=test is masked, so execution
2437 # of src_test() probably is not reliable. Therefore,
2438 # temporarily disable FEATURES=test just for this package.
2439 self["FEATURES"] = " ".join(x for x in self.features \
2441 myflags.discard("test")
2444 if ebuild_force_test:
2445 self.usemask.discard("test")
2447 usesplit = [ x for x in myflags if \
2448 x not in self.usemask]
2450 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2451 # that they are consistent.
2452 for var in use_expand:
2453 prefix = var.lower() + "_"
2454 prefix_len = len(prefix)
2455 expand_flags = set([ x[prefix_len:] for x in usesplit \
2456 if x.startswith(prefix) ])
2457 var_split = self.get(var, "").split()
2458 # Preserve the order of var_split because it can matter for things
2460 var_split = [ x for x in var_split if x in expand_flags ]
2461 var_split.extend(expand_flags.difference(var_split))
2462 has_wildcard = "*" in var_split
2464 var_split = [ x for x in var_split if x != "*" ]
2465 self._use_wildcards = True
2468 if x.startswith(prefix):
2472 # * means to enable everything in IUSE that's not masked
2475 if x.startswith(prefix) and x not in self.usemask:
2476 suffix = x[prefix_len:]
2477 if suffix in var_split:
2479 var_split.append(suffix)
2482 # If there is a wildcard and no matching flags in IUSE then
2483 # LINGUAS should be unset so that all .mo files are
2487 self[var] = " ".join(var_split)
2489 # Don't export empty USE_EXPAND vars unless the user config
2490 # exports them as empty. This is required for vars such as
2491 # LINGUAS, where unset and empty have different meanings.
2493 # ebuild.sh will see this and unset the variable so
2494 # that things like LINGUAS work properly
2500 # It's not in IUSE, so just allow the variable content
2501 # to pass through if it is defined somewhere. This
2502 # allows packages that support LINGUAS but don't
2503 # declare it in IUSE to use the variable outside of the
2504 # USE_EXPAND context.
2507 arch = self.configdict["defaults"].get("ARCH")
2508 if arch and arch not in usesplit:
2509 usesplit.append(arch)
2511 usesplit = [x for x in usesplit if \
2512 x not in self.usemask]
2515 self.configlist[-1]["USE"]= " ".join(usesplit)
2517 self.already_in_regenerate = 0
2519 def get_virts_p(self, myroot):
2522 virts = self.getvirtuals(myroot)
2525 vkeysplit = x.split("/")
2526 if not self.virts_p.has_key(vkeysplit[1]):
2527 self.virts_p[vkeysplit[1]] = virts[x]
2530 def getvirtuals(self, myroot=None):
2531 """myroot is now ignored because, due to caching, it has always been
2532 broken for all but the first call."""
2533 myroot = self["ROOT"]
2535 return self.virtuals
2538 for x in self.profiles:
2539 virtuals_file = os.path.join(x, "virtuals")
2540 virtuals_dict = grabdict(virtuals_file)
2541 for k in virtuals_dict.keys():
2542 if not isvalidatom(k) or dep_getkey(k) != k:
2543 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2544 (virtuals_file, k), noiselevel=-1)
2545 del virtuals_dict[k]
2547 myvalues = virtuals_dict[k]
2550 if x.startswith("-"):
2551 # allow incrementals
2553 if not isvalidatom(myatom):
2554 writemsg("--- Invalid atom in %s: %s\n" % \
2555 (virtuals_file, x), noiselevel=-1)
2558 del virtuals_dict[k]
2560 virtuals_list.append(virtuals_dict)
2562 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2565 for virt in self.dirVirtuals:
2566 # Preference for virtuals decreases from left to right.
2567 self.dirVirtuals[virt].reverse()
2569 # Repoman does not use user or tree virtuals.
2570 if self.local_config and not self.treeVirtuals:
2571 temp_vartree = vartree(myroot, None,
2572 categories=self.categories, settings=self)
2573 # Reduce the provides into a list by CP.
2574 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2576 self.virtuals = self.__getvirtuals_compile()
2577 return self.virtuals
2579 def __getvirtuals_compile(self):
2580 """Stack installed and profile virtuals. Preference for virtuals
2581 decreases from left to right.
2582 Order of preference:
2583 1. installed and in profile
2588 # Virtuals by profile+tree preferences.
2591 for virt, installed_list in self.treeVirtuals.iteritems():
2592 profile_list = self.dirVirtuals.get(virt, None)
2593 if not profile_list:
2595 for cp in installed_list:
2596 if cp in profile_list:
2597 ptVirtuals.setdefault(virt, [])
2598 ptVirtuals[virt].append(cp)
2600 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2601 self.dirVirtuals, self._depgraphVirtuals])
2604 def __delitem__(self,mykey):
2606 for x in self.lookuplist:
2611 def __getitem__(self,mykey):
2612 for d in self.lookuplist:
2615 return '' # for backward compat, don't raise KeyError
2617 def get(self, k, x=None):
2618 for d in self.lookuplist:
2623 def pop(self, key, *args):
2626 "pop expected at most 2 arguments, got " + \
2627 repr(1 + len(args)))
2629 for d in reversed(self.lookuplist):
2637 def has_key(self,mykey):
2638 return mykey in self
2640 def __contains__(self, mykey):
2641 """Called to implement membership test operators (in and not in)."""
2642 for d in self.lookuplist:
2647 def setdefault(self, k, x=None):
2660 for d in self.lookuplist:
2667 def __setitem__(self,mykey,myvalue):
2668 "set a value; will be thrown away at reset() time"
2669 if not isinstance(myvalue, str):
2670 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2672 self.modifiedkeys += [mykey]
2673 self.configdict["env"][mykey]=myvalue
2676 "return our locally-maintained environment"
2678 environ_filter = self._environ_filter
2679 filter_calling_env = self._filter_calling_env
2680 environ_whitelist = self._environ_whitelist
2681 env_d = self.configdict["env.d"]
2683 if x in environ_filter:
2686 if not isinstance(myvalue, basestring):
2687 writemsg("!!! Non-string value in config: %s=%s\n" % \
2688 (x, myvalue), noiselevel=-1)
2690 if filter_calling_env and \
2691 x not in environ_whitelist and \
2692 not self._environ_whitelist_re.match(x):
2693 # Do not allow anything to leak into the ebuild
2694 # environment unless it is explicitly whitelisted.
2695 # This ensures that variables unset by the ebuild
2699 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2700 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2701 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2703 if filter_calling_env:
2704 phase = self.get("EBUILD_PHASE")
2708 whitelist.append("RPMDIR")
2714 # Filtered by IUSE and implicit IUSE.
2715 mydict["USE"] = self.get("PORTAGE_USE", "")
2717 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
2718 # so we have to back it up and restore it.
2719 rootpath = mydict.get("ROOTPATH")
2721 mydict["PORTAGE_ROOTPATH"] = rootpath
2725 def thirdpartymirrors(self):
2726 if getattr(self, "_thirdpartymirrors", None) is None:
2727 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2728 for x in self["PORTDIR_OVERLAY"].split():
2729 profileroots.insert(0, os.path.join(x, "profiles"))
2730 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2731 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2732 return self._thirdpartymirrors
2735 return flatten([[myarch, "~" + myarch] \
2736 for myarch in self["PORTAGE_ARCHLIST"].split()])
2738 def selinux_enabled(self):
2739 if getattr(self, "_selinux_enabled", None) is None:
2740 self._selinux_enabled = 0
2741 if "selinux" in self["USE"].split():
2742 if "selinux" in globals():
2743 if selinux.is_selinux_enabled() == 1:
2744 self._selinux_enabled = 1
2746 self._selinux_enabled = 0
2748 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2750 self._selinux_enabled = 0
2751 if self._selinux_enabled == 0:
2753 del sys.modules["selinux"]
2756 return self._selinux_enabled
2758 def _shell_quote(s):
2760 Quote a string in double-quotes and use backslashes to
2761 escape any backslashes, double-quotes, dollar signs, or
2762 backquotes in the string.
2764 for letter in "\\\"$`":
2766 s = s.replace(letter, "\\" + letter)
2769 # In some cases, openpty can be slow when it fails. Therefore,
2770 # stop trying to use it after the first failure.
2771 _disable_openpty = False
2773 # XXX This would be to replace getstatusoutput completely.
2774 # XXX Issue: cannot block execution. Deadlock condition.
2775 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
2777 Spawn a subprocess with extra portage-specific options.
2780 Sandbox: Sandbox means the spawned process will be limited in its ability t
2781 read and write files (normally this means it is restricted to ${IMAGE}/)
2782 SElinux Sandbox: Enables sandboxing on SElinux
2783 Reduced Privileges: Drops privilages such that the process runs as portage:portage
2786 Notes: os.system cannot be used because it messes with signal handling. Instead we
2787 use the portage.process spawn* family of functions.
2789 This function waits for the process to terminate.
2791 @param mystring: Command to run
2792 @type mystring: String
2793 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2794 @type mysettings: Dictionary or config instance
2795 @param debug: Ignored
2796 @type debug: Boolean
2797 @param free: Enable sandboxing for this process
2799 @param droppriv: Drop to portage:portage when running this command
2800 @type droppriv: Boolean
2801 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2802 @type sesandbox: Boolean
2803 @param fakeroot: Run this command with faked root privileges
2804 @type fakeroot: Boolean
2805 @param keywords: Extra options encoded as a dict, to be passed to spawn
2806 @type keywords: Dictionary
2809 1. The return code of the spawned process.
2812 if isinstance(mysettings, dict):
2814 keywords["opt_name"]="[ %s ]" % "portage"
2816 check_config_instance(mysettings)
2817 env=mysettings.environ()
2818 keywords["opt_name"]="[%s]" % mysettings["PF"]
2820 fd_pipes = keywords.get("fd_pipes")
2821 if fd_pipes is None:
2823 0:sys.stdin.fileno(),
2824 1:sys.stdout.fileno(),
2825 2:sys.stderr.fileno(),
2827 # In some cases the above print statements don't flush stdout, so
2828 # it needs to be flushed before allowing a child process to use it
2829 # so that output always shows in the correct order.
2830 for fd in fd_pipes.itervalues():
2831 if fd == sys.stdout.fileno():
2833 if fd == sys.stderr.fileno():
2836 # The default policy for the sesandbox domain only allows entry (via exec)
2837 # from shells and from binaries that belong to portage (the number of entry
2838 # points is minimized). The "tee" binary is not among the allowed entry
2839 # points, so it is spawned outside of the sesandbox domain and reads from a
2840 # pseudo-terminal that connects two domains.
2841 logfile = keywords.get("logfile")
2845 fd_pipes_orig = None
2848 del keywords["logfile"]
2849 if 1 not in fd_pipes or 2 not in fd_pipes:
2850 raise ValueError(fd_pipes)
2851 global _disable_openpty
2852 if _disable_openpty:
2853 master_fd, slave_fd = os.pipe()
2855 from pty import openpty
2857 master_fd, slave_fd = openpty()
2859 except EnvironmentError, e:
2860 _disable_openpty = True
2861 writemsg("openpty failed: '%s'\n" % str(e), noiselevel=1)
2863 master_fd, slave_fd = os.pipe()
2865 # Disable post-processing of output since otherwise weird
2866 # things like \n -> \r\n transformations may occur.
2868 mode = termios.tcgetattr(slave_fd)
2869 mode[1] &= ~termios.OPOST
2870 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
2872 # We must set non-blocking mode before we close the slave_fd
2873 # since otherwise the fcntl call can fail on FreeBSD (the child
2874 # process might have already exited and closed slave_fd so we
2875 # have to keep it open in order to avoid FreeBSD potentially
2876 # generating an EAGAIN exception).
2878 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2879 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2881 fd_pipes.setdefault(0, sys.stdin.fileno())
2882 fd_pipes_orig = fd_pipes.copy()
2883 if got_pty and os.isatty(fd_pipes_orig[1]):
2884 from portage.output import get_term_size, set_term_size
2885 rows, columns = get_term_size()
2886 set_term_size(rows, columns, slave_fd)
2887 fd_pipes[0] = fd_pipes_orig[0]
2888 fd_pipes[1] = slave_fd
2889 fd_pipes[2] = slave_fd
2890 keywords["fd_pipes"] = fd_pipes
2892 features = mysettings.features
2893 # TODO: Enable fakeroot to be used together with droppriv. The
2894 # fake ownership/permissions will have to be converted to real
2895 # permissions in the merge phase.
2896 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
2897 if droppriv and not uid and portage_gid and portage_uid:
2898 keywords.update({"uid":portage_uid,"gid":portage_gid,
2899 "groups":userpriv_groups,"umask":002})
2901 free=((droppriv and "usersandbox" not in features) or \
2902 (not droppriv and "sandbox" not in features and \
2903 "usersandbox" not in features))
2905 if free or "SANDBOX_ACTIVE" in os.environ:
2906 keywords["opt_name"] += " bash"
2907 spawn_func = portage.process.spawn_bash
2909 keywords["opt_name"] += " fakeroot"
2910 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
2911 spawn_func = portage.process.spawn_fakeroot
2913 keywords["opt_name"] += " sandbox"
2914 spawn_func = portage.process.spawn_sandbox
2917 con = selinux.getcontext()
2918 con = con.replace(mysettings["PORTAGE_T"],
2919 mysettings["PORTAGE_SANDBOX_T"])
2920 selinux.setexec(con)
2922 returnpid = keywords.get("returnpid")
2923 keywords["returnpid"] = True
2925 mypids.extend(spawn_func(mystring, env=env, **keywords))
2930 selinux.setexec(None)
2936 log_file = open(logfile, 'a')
2937 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2938 master_file = os.fdopen(master_fd, 'r')
2939 iwtd = [master_file]
2942 import array, select
2946 events = select.select(iwtd, owtd, ewtd)
2948 # Use non-blocking mode to prevent read
2949 # calls from blocking indefinitely.
2950 buf = array.array('B')
2952 buf.fromfile(f, buffsize)
2958 if f is master_file:
2959 buf.tofile(stdout_file)
2961 buf.tofile(log_file)
2967 retval = os.waitpid(pid, 0)[1]
2968 portage.process.spawned_pids.remove(pid)
2969 if retval != os.EX_OK:
2971 return (retval & 0xff) << 8
2975 def _checksum_failure_temp_file(distdir, basename):
2977 First try to find a duplicate temp file with the same checksum and return
2978 that filename if available. Otherwise, use mkstemp to create a new unique
2979 filename._checksum_failure_.$RANDOM, rename the given file, and return the
2980 new filename. In any case, filename will be renamed or removed before this
2981 function returns a temp filename.
2984 filename = os.path.join(distdir, basename)
2985 size = os.stat(filename).st_size
2987 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
2988 for temp_filename in os.listdir(distdir):
2989 if not tempfile_re.match(temp_filename):
2991 temp_filename = os.path.join(distdir, temp_filename)
2993 if size != os.stat(temp_filename).st_size:
2998 temp_checksum = portage.checksum.perform_md5(temp_filename)
2999 except portage.exception.FileNotFound:
3000 # Apparently the temp file disappeared. Let it go.
3002 if checksum is None:
3003 checksum = portage.checksum.perform_md5(filename)
3004 if checksum == temp_checksum:
3006 return temp_filename
3008 from tempfile import mkstemp
3009 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3011 os.rename(filename, temp_filename)
3012 return temp_filename
3014 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3016 _size_suffix_map = {
3028 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3029 "fetch files. Will use digest file if available."
3031 features = mysettings.features
3032 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3033 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3034 if "mirror" in restrict or \
3035 "nomirror" in restrict:
3036 if ("mirror" in features) and ("lmirror" not in features):
3037 # lmirror should allow you to bypass mirror restrictions.
3038 # XXX: This is not a good thing, and is temporary at best.
3039 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3042 # Generally, downloading the same file repeatedly from
3043 # every single available mirror is a waste of bandwidth
3044 # and time, so there needs to be a cap.
3045 checksum_failure_max_tries = 5
3046 v = checksum_failure_max_tries
3048 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3049 checksum_failure_max_tries))
3050 except (ValueError, OverflowError):
3051 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3052 " contains non-integer value: '%s'\n" % \
3053 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3054 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3055 "default value: %s\n" % checksum_failure_max_tries,
3057 v = checksum_failure_max_tries
3059 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3060 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3061 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3062 "default value: %s\n" % checksum_failure_max_tries,
3064 v = checksum_failure_max_tries
3065 checksum_failure_max_tries = v
3068 fetch_resume_size_default = "350K"
3069 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3070 if fetch_resume_size is not None:
3071 fetch_resume_size = "".join(fetch_resume_size.split())
3072 if not fetch_resume_size:
3073 # If it's undefined or empty, silently use the default.
3074 fetch_resume_size = fetch_resume_size_default
3075 match = _fetch_resume_size_re.match(fetch_resume_size)
3076 if match is None or \
3077 (match.group(2).upper() not in _size_suffix_map):
3078 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3079 " contains an unrecognized format: '%s'\n" % \
3080 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3081 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3082 "default value: %s\n" % fetch_resume_size_default,
3084 fetch_resume_size = None
3085 if fetch_resume_size is None:
3086 fetch_resume_size = fetch_resume_size_default
3087 match = _fetch_resume_size_re.match(fetch_resume_size)
3088 fetch_resume_size = int(match.group(1)) * \
3089 2 ** _size_suffix_map[match.group(2)]
3091 # Behave like the package has RESTRICT="primaryuri" after a
3092 # couple of checksum failures, to increase the probablility
3093 # of success before checksum_failure_max_tries is reached.
3094 checksum_failure_primaryuri = 2
3095 thirdpartymirrors = mysettings.thirdpartymirrors()
3097 # In the background parallel-fetch process, it's safe to skip checksum
3098 # verification of pre-existing files in $DISTDIR that have the correct
3099 # file size. The parent process will verify their checksums prior to
3102 parallel_fetchonly = fetchonly and \
3103 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3105 check_config_instance(mysettings)
3107 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3108 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3112 if listonly or ("distlocks" not in features):
3116 if "skiprocheck" in features:
3119 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3121 writemsg(red("!!! For fetching to a read-only filesystem, " + \
3122 "locking should be turned off.\n"), noiselevel=-1)
3123 writemsg("!!! This can be done by adding -distlocks to " + \
3124 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3127 # local mirrors are always added
3128 if custommirrors.has_key("local"):
3129 mymirrors += custommirrors["local"]
3131 if "nomirror" in restrict or \
3132 "mirror" in restrict:
3133 # We don't add any mirrors.
3137 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3139 pkgdir = mysettings.get("O")
3140 if pkgdir is not None:
3141 mydigests = Manifest(
3142 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3144 # no digests because fetch was not called for a specific package
3148 for x in range(len(mymirrors)-1,-1,-1):
3149 if mymirrors[x] and mymirrors[x][0]=='/':
3150 fsmirrors += [mymirrors[x]]
3153 restrict_fetch = "fetch" in restrict
3154 custom_local_mirrors = custommirrors.get("local", [])
3156 # With fetch restriction, a normal uri may only be fetched from
3157 # custom local mirrors (if available). A mirror:// uri may also
3158 # be fetched from specific mirrors (effectively overriding fetch
3159 # restriction, but only for specific mirrors).
3160 locations = custom_local_mirrors
3162 locations = mymirrors
3165 primaryuri_indexes={}
3166 primaryuri_dict = {}
3167 for myuri in myuris:
3168 myfile=os.path.basename(myuri)
3169 if not filedict.has_key(myfile):
3171 for y in range(0,len(locations)):
3172 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3173 if myuri[:9]=="mirror://":
3174 eidx = myuri.find("/", 9)
3176 mirrorname = myuri[9:eidx]
3178 # Try user-defined mirrors first
3179 if custommirrors.has_key(mirrorname):
3180 for cmirr in custommirrors[mirrorname]:
3181 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
3182 # remove the mirrors we tried from the list of official mirrors
3183 if cmirr.strip() in thirdpartymirrors[mirrorname]:
3184 thirdpartymirrors[mirrorname].remove(cmirr)
3185 # now try the official mirrors
3186 if thirdpartymirrors.has_key(mirrorname):
3187 shuffle(thirdpartymirrors[mirrorname])
3189 for locmirr in thirdpartymirrors[mirrorname]:
3190 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
3192 if not filedict[myfile]:
3193 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3195 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3196 writemsg(" %s\n" % (myuri), noiselevel=-1)
3199 # Only fetch from specific mirrors is allowed.
3201 if "primaryuri" in restrict:
3202 # Use the source site first.
3203 if primaryuri_indexes.has_key(myfile):
3204 primaryuri_indexes[myfile] += 1
3206 primaryuri_indexes[myfile] = 0
3207 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3209 filedict[myfile].append(myuri)
3210 primaryuris = primaryuri_dict.get(myfile)
3211 if primaryuris is None:
3213 primaryuri_dict[myfile] = primaryuris
3214 primaryuris.append(myuri)
3221 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3222 if not mysettings.get(var_name, None):
3230 if "distlocks" in features:
3231 distdir_dirs.append(".locks")
3234 for x in distdir_dirs:
3235 mydir = os.path.join(mysettings["DISTDIR"], x)
3236 if portage.util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
3237 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3240 raise # bail out on the first error that occurs during recursion
3241 if not apply_recursive_permissions(mydir,
3242 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3243 filemode=filemode, filemask=modemask, onerror=onerror):
3244 raise portage.exception.OperationNotPermitted(
3245 "Failed to apply recursive permissions for the portage group.")
3246 except portage.exception.PortageException, e:
3247 if not os.path.isdir(mysettings["DISTDIR"]):
3248 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3249 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3250 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3253 not fetch_to_ro and \
3254 not os.access(mysettings["DISTDIR"], os.W_OK):
3255 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3259 if can_fetch and use_locks and locks_in_subdir:
3260 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3261 if not os.access(distlocks_subdir, os.W_OK):
3262 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3265 del distlocks_subdir
3267 for myfile in filedict:
3271 1 partially downloaded
3272 2 completely downloaded
3274 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3279 writemsg_stdout("\n", noiselevel=-1)
3281 # check if there is enough space in DISTDIR to completely store myfile
3282 # overestimate the filesize so we aren't bitten by FS overhead
3283 if hasattr(os, "statvfs"):
3284 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3286 mysize = os.stat(myfile_path).st_size
3288 if e.errno != errno.ENOENT:
3292 if myfile in mydigests \
3293 and (mydigests[myfile]["size"] - mysize + vfs_stat.f_bsize) >= \
3294 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3295 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3298 if use_locks and can_fetch:
3300 if not parallel_fetchonly and "parallel-fetch" in features:
3301 waiting_msg = ("Downloading '%s'... " + \
3302 "see /var/log/emerge-fetch.log for details.") % myfile
3304 file_lock = portage.locks.lockfile(
3305 os.path.join(mysettings["DISTDIR"],
3306 locks_in_subdir, myfile), wantnewlockfile=1,
3307 waiting_msg=waiting_msg)
3309 file_lock = portage.locks.lockfile(
3310 myfile_path, wantnewlockfile=1,
3311 waiting_msg=waiting_msg)
3314 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3315 for mydir in fsmirrors:
3316 mirror_file = os.path.join(mydir, myfile)
3318 shutil.copyfile(mirror_file, myfile_path)
3319 writemsg(_("Local mirror has file:" + \
3320 " %(file)s\n" % {"file":myfile}))
3322 except (IOError, OSError), e:
3323 if e.errno != errno.ENOENT:
3328 mystat = os.stat(myfile_path)
3330 if e.errno != errno.ENOENT:
3335 apply_secpass_permissions(
3336 myfile_path, gid=portage_gid, mode=0664, mask=02,
3338 except portage.exception.PortageException, e:
3339 if not os.access(myfile_path, os.R_OK):
3340 writemsg("!!! Failed to adjust permissions:" + \
3341 " %s\n" % str(e), noiselevel=-1)
3343 # If the file is empty then it's obviously invalid. Remove
3344 # the empty file and try to download if possible.
3345 if mystat.st_size == 0:
3348 os.unlink(myfile_path)
3349 except EnvironmentError:
3351 elif myfile not in mydigests:
3352 # We don't have a digest, but the file exists. We must
3353 # assume that it is fully downloaded.
3356 if mystat.st_size < mydigests[myfile]["size"] and \
3358 fetched = 1 # Try to resume this download.
3359 elif parallel_fetchonly and \
3360 mystat.st_size == mydigests[myfile]["size"]:
3361 eout = portage.output.EOutput()
3363 mysettings.get("PORTAGE_QUIET") == "1"
3365 "%s size ;-)" % (myfile, ))
3369 verified_ok, reason = portage.checksum.verify_all(
3370 myfile_path, mydigests[myfile])
3372 writemsg("!!! Previously fetched" + \
3373 " file: '%s'\n" % myfile, noiselevel=-1)
3374 writemsg("!!! Reason: %s\n" % reason[0],
3376 writemsg(("!!! Got: %s\n" + \
3377 "!!! Expected: %s\n") % \
3378 (reason[1], reason[2]), noiselevel=-1)
3379 if reason[0] == "Insufficient data for checksum verification":
3381 if can_fetch and not restrict_fetch:
3383 _checksum_failure_temp_file(
3384 mysettings["DISTDIR"], myfile)
3385 writemsg_stdout("Refetching... " + \
3386 "File renamed to '%s'\n\n" % \
3387 temp_filename, noiselevel=-1)
3389 eout = portage.output.EOutput()
3391 mysettings.get("PORTAGE_QUIET", None) == "1"
3392 digests = mydigests.get(myfile)
3394 digests = digests.keys()
3397 "%s %s ;-)" % (myfile, " ".join(digests)))
3399 continue # fetch any remaining files
3401 # Create a reversed list since that is optimal for list.pop().
3402 uri_list = filedict[myfile][:]
3404 checksum_failure_count = 0
3405 tried_locations = set()
3407 loc = uri_list.pop()
3408 # Eliminate duplicates here in case we've switched to
3409 # "primaryuri" mode on the fly due to a checksum failure.
3410 if loc in tried_locations:
3412 tried_locations.add(loc)
3414 writemsg_stdout(loc+" ", noiselevel=-1)
3416 # allow different fetchcommands per protocol
3417 protocol = loc[0:loc.find("://")]
3418 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
3419 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
3421 fetchcommand=mysettings["FETCHCOMMAND"]
3422 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
3423 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
3425 resumecommand=mysettings["RESUMECOMMAND"]
3430 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
3433 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
3435 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3436 if not mysettings.get(var_name, None):
3437 writemsg(("!!! %s is unset. It should " + \
3438 "have been defined in /etc/make.globals.\n") \
3439 % var_name, noiselevel=-1)
3444 if fetched != 2 and has_space:
3445 #we either need to resume or start the download
3448 mystat = os.stat(myfile_path)
3450 if e.errno != errno.ENOENT:
3455 if mystat.st_size < fetch_resume_size:
3456 writemsg((">>> Deleting distfile with size " + \
3457 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3458 "ME_MIN_SIZE)\n") % mystat.st_size)
3460 os.unlink(myfile_path)
3462 if e.errno != errno.ENOENT:
3468 writemsg(">>> Resuming download...\n")
3469 locfetch=resumecommand
3472 locfetch=fetchcommand
3473 writemsg_stdout(">>> Downloading '%s'\n" % \
3474 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
3476 "DISTDIR": mysettings["DISTDIR"],
3480 import shlex, StringIO
3481 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
3482 lexer.whitespace_split = True
3483 myfetch = [varexpand(x, mydict=variables) for x in lexer]
3486 # Redirect all output to stdout since some fetchers like
3487 # wget pollute stderr (if portage detects a problem then it
3488 # can send it's own message to stderr).
3489 spawn_keywords["fd_pipes"] = {
3490 0:sys.stdin.fileno(),
3491 1:sys.stdout.fileno(),
3492 2:sys.stdout.fileno()
3494 if "userfetch" in mysettings.features and \
3495 os.getuid() == 0 and portage_gid and portage_uid:
3496 spawn_keywords.update({
3497 "uid" : portage_uid,
3498 "gid" : portage_gid,
3499 "groups" : userpriv_groups,
3504 if mysettings.selinux_enabled():
3505 con = selinux.getcontext()
3506 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
3507 selinux.setexec(con)
3508 # bash is an allowed entrypoint, while most binaries are not
3509 myfetch = ["bash", "-c", "exec \"$@\"", myfetch[0]] + myfetch
3511 myret = portage.process.spawn(myfetch,
3512 env=mysettings.environ(), **spawn_keywords)
3514 if mysettings.selinux_enabled():
3515 selinux.setexec(None)
3519 apply_secpass_permissions(myfile_path,
3520 gid=portage_gid, mode=0664, mask=02)
3521 except portage.exception.FileNotFound, e:
3523 except portage.exception.PortageException, e:
3524 if not os.access(myfile_path, os.R_OK):
3525 writemsg("!!! Failed to adjust permissions:" + \
3526 " %s\n" % str(e), noiselevel=-1)
3528 # If the file is empty then it's obviously invalid. Don't
3529 # trust the return value from the fetcher. Remove the
3530 # empty file and try to download again.
3532 if os.stat(myfile_path).st_size == 0:
3533 os.unlink(myfile_path)
3536 except EnvironmentError:
3539 if mydigests!=None and mydigests.has_key(myfile):
3541 mystat = os.stat(myfile_path)
3543 if e.errno != errno.ENOENT:
3548 # no exception? file exists. let digestcheck() report
3549 # an appropriately for size or checksum errors
3551 # If the fetcher reported success and the file is
3552 # too small, it's probably because the digest is
3553 # bad (upstream changed the distfile). In this
3554 # case we don't want to attempt to resume. Show a
3555 # digest verification failure to that the user gets
3556 # a clue about what just happened.
3557 if myret != os.EX_OK and \
3558 mystat.st_size < mydigests[myfile]["size"]:
3559 # Fetch failed... Try the next one... Kill 404 files though.
3560 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
3561 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
3562 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
3564 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
3565 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
3568 except (IOError, OSError):
3573 # File is the correct size--check the checksums for the fetched
3574 # file NOW, for those users who don't have a stable/continuous
3575 # net connection. This way we have a chance to try to download
3576 # from another mirror...
3577 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
3580 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
3582 writemsg("!!! Reason: "+reason[0]+"\n",
3584 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
3585 (reason[1], reason[2]), noiselevel=-1)
3586 if reason[0] == "Insufficient data for checksum verification":
3589 _checksum_failure_temp_file(
3590 mysettings["DISTDIR"], myfile)
3591 writemsg_stdout("Refetching... " + \
3592 "File renamed to '%s'\n\n" % \
3593 temp_filename, noiselevel=-1)
3595 checksum_failure_count += 1
3596 if checksum_failure_count == \
3597 checksum_failure_primaryuri:
3598 # Switch to "primaryuri" mode in order
3599 # to increase the probablility of
3602 primaryuri_dict.get(myfile)
3605 reversed(primaryuris))
3606 if checksum_failure_count >= \
3607 checksum_failure_max_tries:
3610 eout = portage.output.EOutput()
3611 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
3612 digests = mydigests.get(myfile)
3614 eout.ebegin("%s %s ;-)" % \
3615 (myfile, " ".join(sorted(digests))))
3623 elif mydigests!=None:
3624 writemsg("No digest file available and download failed.\n\n",
3627 if use_locks and file_lock:
3628 portage.locks.unlockfile(file_lock)
3631 writemsg_stdout("\n", noiselevel=-1)
3634 print "\n!!!", mysettings["CATEGORY"] + "/" + \
3635 mysettings["PF"], "has fetch restriction turned on."
3636 print "!!! This probably means that this " + \
3637 "ebuild's files must be downloaded"
3638 print "!!! manually. See the comments in" + \
3639 " the ebuild for more information.\n"
3640 mysettings["EBUILD_PHASE"] = "unpack"
3641 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
3644 elif not filedict[myfile]:
3645 writemsg("Warning: No mirrors available for file" + \
3646 " '%s'\n" % (myfile), noiselevel=-1)
3648 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
3653 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
3655 Generates a digest file if missing. Assumes all files are available.
3656 DEPRECATED: this now only is a compability wrapper for
3657 portage.manifest.Manifest()
3658 NOTE: manifestonly and overwrite are useless with manifest2 and
3659 are therefore ignored."""
3660 if myportdb is None:
3661 writemsg("Warning: myportdb not specified to digestgen\n")
3664 global _doebuild_manifest_exempt_depend
3666 _doebuild_manifest_exempt_depend += 1
3668 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
3669 for cpv in fetchlist_dict:
3671 for myfile in fetchlist_dict[cpv]:
3672 distfiles_map.setdefault(myfile, []).append(cpv)
3673 except portage.exception.InvalidDependString, e:
3674 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3677 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
3678 manifest1_compat = False
3679 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
3680 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
3681 # Don't require all hashes since that can trigger excessive
3682 # fetches when sufficient digests already exist. To ease transition
3683 # while Manifest 1 is being removed, only require hashes that will
3684 # exist before and after the transition.
3685 required_hash_types = set()
3686 required_hash_types.add("size")
3687 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
3688 dist_hashes = mf.fhashdict.get("DIST", {})
3689 missing_hashes = set()
3690 for myfile in distfiles_map:
3691 myhashes = dist_hashes.get(myfile)
3693 missing_hashes.add(myfile)
3695 if required_hash_types.difference(myhashes):
3696 missing_hashes.add(myfile)
3698 if myhashes["size"] == 0:
3699 missing_hashes.add(myfile)
3702 for myfile in missing_hashes:
3704 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
3706 if e.errno != errno.ENOENT:
3709 missing_files.append(myfile)
3711 # If the file is empty then it's obviously invalid.
3713 missing_files.append(myfile)
3715 mytree = os.path.realpath(os.path.dirname(
3716 os.path.dirname(mysettings["O"])))
3717 fetch_settings = config(clone=mysettings)
3718 debug = mysettings.get("PORTAGE_DEBUG") == "1"
3719 for myfile in missing_files:
3721 for cpv in distfiles_map[myfile]:
3722 myebuild = os.path.join(mysettings["O"],
3723 catsplit(cpv)[1] + ".ebuild")
3724 # for RESTRICT=fetch, mirror, etc...
3725 doebuild_environment(myebuild, "fetch",
3726 mysettings["ROOT"], fetch_settings,
3728 alluris, aalist = myportdb.getfetchlist(
3729 cpv, mytree=mytree, all=True,
3730 mysettings=fetch_settings)
3731 myuris = [uri for uri in alluris \
3732 if os.path.basename(uri) == myfile]
3733 fetch_settings["A"] = myfile # for use by pkg_nofetch()
3734 if fetch(myuris, fetch_settings):
3738 writemsg(("!!! File %s doesn't exist, can't update " + \
3739 "Manifest\n") % myfile, noiselevel=-1)
3741 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
3743 mf.create(requiredDistfiles=myarchives,
3744 assumeDistHashesSometimes=True,
3745 assumeDistHashesAlways=(
3746 "assume-digests" in mysettings.features))
3747 except portage.exception.FileNotFound, e:
3748 writemsg(("!!! File %s doesn't exist, can't update " + \
3749 "Manifest\n") % e, noiselevel=-1)
3751 mf.write(sign=False)
3752 if "assume-digests" not in mysettings.features:
3753 distlist = mf.fhashdict.get("DIST", {}).keys()
3756 for filename in distlist:
3757 if not os.path.exists(
3758 os.path.join(mysettings["DISTDIR"], filename)):
3759 auto_assumed.append(filename)
3761 mytree = os.path.realpath(
3762 os.path.dirname(os.path.dirname(mysettings["O"])))
3763 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
3764 pkgs = myportdb.cp_list(cp, mytree=mytree)
3766 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
3767 str(len(auto_assumed)).rjust(18)) + "\n")
3768 for pkg_key in pkgs:
3769 fetchlist = myportdb.getfetchlist(pkg_key,
3770 mysettings=mysettings, all=True, mytree=mytree)[1]
3771 pv = pkg_key.split("/")[1]
3772 for filename in auto_assumed:
3773 if filename in fetchlist:
3775 " %s::%s\n" % (pv, filename))
3778 _doebuild_manifest_exempt_depend -= 1
3780 def digestParseFile(myfilename, mysettings=None):
3781 """(filename) -- Parses a given file for entries matching:
3782 <checksumkey> <checksum_hex_string> <filename> <filesize>
3783 Ignores lines that don't start with a valid checksum identifier
3784 and returns a dict with the filenames as keys and {checksumkey:checksum}
3786 DEPRECATED: this function is now only a compability wrapper for
3787 portage.manifest.Manifest()."""
3789 mysplit = myfilename.split(os.sep)
3790 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
3791 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
3792 elif mysplit[-1] == "Manifest":
3793 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
3795 if mysettings is None:
3797 mysettings = config(clone=settings)
3799 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
3801 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
3802 """Verifies checksums. Assumes all files have been downloaded.
3803 DEPRECATED: this is now only a compability wrapper for
3804 portage.manifest.Manifest()."""
3805 pkgdir = mysettings["O"]
3806 manifest_path = os.path.join(pkgdir, "Manifest")
3807 if not os.path.exists(manifest_path):
3808 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3812 mf = Manifest(pkgdir, mysettings["DISTDIR"])
3813 eout = portage.output.EOutput()
3814 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
3817 eout.ebegin("checking ebuild checksums ;-)")
3818 mf.checkTypeHashes("EBUILD")
3820 eout.ebegin("checking auxfile checksums ;-)")
3821 mf.checkTypeHashes("AUX")
3823 eout.ebegin("checking miscfile checksums ;-)")
3824 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
3827 eout.ebegin("checking %s ;-)" % f)
3828 mf.checkFileHashes(mf.findFile(f), f)
3832 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
3834 except portage.exception.FileNotFound, e:
3836 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
3839 except portage.exception.DigestException, e:
3841 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
3842 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3843 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3844 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3845 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3847 # Make sure that all of the ebuilds are actually listed in the Manifest.
3848 for f in os.listdir(pkgdir):
3849 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3850 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
3851 os.path.join(pkgdir, f), noiselevel=-1)
3854 """ epatch will just grab all the patches out of a directory, so we have to
3855 make sure there aren't any foreign files that it might grab."""
3856 filesdir = os.path.join(pkgdir, "files")
3857 for parent, dirs, files in os.walk(filesdir):
3859 if d.startswith(".") or d == "CVS":
3862 if f.startswith("."):
3864 f = os.path.join(parent, f)[len(filesdir) + 1:]
3865 file_type = mf.findFile(f)
3866 if file_type != "AUX" and not f.startswith("digest-"):
3867 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
3868 os.path.join(filesdir, f), noiselevel=-1)
3873 # parse actionmap to spawn ebuild with the appropriate args
3874 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
3875 if alwaysdep or "noauto" not in mysettings.features:
3876 # process dependency first
3877 if "dep" in actionmap[mydo]:
3878 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
3881 kwargs = actionmap[mydo]["args"]
3882 mysettings["EBUILD_PHASE"] = mydo
3883 _doebuild_exit_status_unlink(
3884 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
3885 filter_calling_env_state = mysettings._filter_calling_env
3886 if os.path.exists(os.path.join(mysettings["T"], "environment")):
3887 mysettings._filter_calling_env = True
3889 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
3890 mysettings, debug=debug, logfile=logfile, **kwargs)
3892 mysettings["EBUILD_PHASE"] = ""
3893 mysettings._filter_calling_env = filter_calling_env_state
3894 msg = _doebuild_exit_status_check(mydo, mysettings)
3897 from textwrap import wrap
3898 from portage.elog.messages import eerror
3899 for l in wrap(msg, 72):
3900 eerror(l, phase=mydo, key=mysettings.mycpv)
3902 if "userpriv" in mysettings.features and \
3903 not kwargs["droppriv"] and secpass >= 2:
3904 """ Privileged phases may have left files that need to be made
3905 writable to a less privileged user."""
3906 apply_recursive_permissions(mysettings["T"],
3907 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3908 filemode=060, filemask=0)
3910 if phase_retval == os.EX_OK:
3911 if mydo == "install":
3912 # User and group bits that match the "portage" user or group are
3913 # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
3914 # necessary. The chown system call may clear S_ISUID and S_ISGID
3915 # bits, so those bits are restored if necessary.
3916 inst_uid = int(mysettings["PORTAGE_INST_UID"])
3917 inst_gid = int(mysettings["PORTAGE_INST_GID"])
3918 for parent, dirs, files in os.walk(mysettings["D"]):
3919 for fname in chain(dirs, files):
3920 fpath = os.path.join(parent, fname)
3921 mystat = os.lstat(fpath)
3922 if mystat.st_uid != portage_uid and \
3923 mystat.st_gid != portage_gid:
3927 if mystat.st_uid == portage_uid:
3929 if mystat.st_gid == portage_gid:
3931 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
3932 mode=mystat.st_mode, stat_cached=mystat,
3934 # Note: PORTAGE_BIN_PATH may differ from the global
3935 # constant when portage is reinstalling itself.
3936 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
3937 misc_sh_binary = os.path.join(portage_bin_path,
3938 os.path.basename(MISC_SH_BINARY))
3939 mycommand = " ".join([_shell_quote(misc_sh_binary),
3940 "install_qa_check", "install_symlink_html_docs"])
3941 _doebuild_exit_status_unlink(
3942 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
3943 filter_calling_env_state = mysettings._filter_calling_env
3944 if os.path.exists(os.path.join(mysettings["T"], "environment")):
3945 mysettings._filter_calling_env = True
3947 qa_retval = spawn(mycommand, mysettings, debug=debug,
3948 logfile=logfile, **kwargs)
3950 mysettings._filter_calling_env = filter_calling_env_state
3951 msg = _doebuild_exit_status_check(mydo, mysettings)
3954 from textwrap import wrap
3955 from portage.elog.messages import eerror
3956 for l in wrap(msg, 72):
3957 eerror(l, phase=mydo, key=mysettings.mycpv)
3958 if qa_retval != os.EX_OK:
3959 writemsg("!!! install_qa_check failed; exiting.\n",
3965 def eapi_is_supported(eapi):
3967 eapi = int(str(eapi).strip())
3972 return eapi <= portage.const.EAPI
3974 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
3976 ebuild_path = os.path.abspath(myebuild)
3977 pkg_dir = os.path.dirname(ebuild_path)
3979 if mysettings.configdict["pkg"].has_key("CATEGORY"):
3980 cat = mysettings.configdict["pkg"]["CATEGORY"]
3982 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
3983 mypv = os.path.basename(ebuild_path)[:-7]
3984 mycpv = cat+"/"+mypv
3985 mysplit=pkgsplit(mypv,silent=0)
3987 raise portage.exception.IncorrectParameter(
3988 "Invalid ebuild path: '%s'" % myebuild)
3990 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
3991 # so that the caller can override it.
3992 tmpdir = mysettings["PORTAGE_TMPDIR"]
3994 # This variable is a signal to setcpv where it triggers
3995 # filtering of USE for the ebuild environment.
3996 mysettings["EBUILD_PHASE"] = mydo
3997 mysettings.backup_changes("EBUILD_PHASE")
3999 if mydo != "depend":
4000 """For performance reasons, setcpv only triggers reset when it
4001 detects a package-specific change in config. For the ebuild
4002 environment, a reset call is forced in order to ensure that the
4003 latest env.d variables are used."""
4005 mysettings.reset(use_cache=use_cache)
4006 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
4008 # config.reset() might have reverted a change made by the caller,
4009 # so restore it to it's original value.
4010 mysettings["PORTAGE_TMPDIR"] = tmpdir
4012 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
4013 mysettings["EBUILD_PHASE"] = mydo
4015 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
4017 # We are disabling user-specific bashrc files.
4018 mysettings["BASH_ENV"] = INVALID_ENV_FILE
4020 if debug: # Otherwise it overrides emerge's settings.
4021 # We have no other way to set debug... debug can't be passed in
4022 # due to how it's coded... Don't overwrite this so we can use it.
4023 mysettings["PORTAGE_DEBUG"] = "1"
4025 mysettings["ROOT"] = myroot
4026 mysettings["STARTDIR"] = getcwd()
4028 mysettings["PORTAGE_REPO_NAME"] = ""
4029 # bindbapi has no getRepositories() method
4030 if mydbapi and hasattr(mydbapi, "getRepositories"):
4031 # do we have a origin repository name for the current package
4032 repopath = os.sep.join(pkg_dir.split(os.path.sep)[:-2])
4033 for reponame in mydbapi.getRepositories():
4034 if mydbapi.getRepositoryPath(reponame) == repopath:
4035 mysettings["PORTAGE_REPO_NAME"] = reponame
4038 mysettings["EBUILD"] = ebuild_path
4039 mysettings["O"] = pkg_dir
4040 mysettings.configdict["pkg"]["CATEGORY"] = cat
4041 mysettings["FILESDIR"] = pkg_dir+"/files"
4042 mysettings["PF"] = mypv
4044 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
4045 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
4046 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
4048 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
4049 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
4051 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
4052 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
4053 mysettings["PN"] = mysplit[0]
4054 mysettings["PV"] = mysplit[1]
4055 mysettings["PR"] = mysplit[2]
4057 if portage.util.noiselimit < 0:
4058 mysettings["PORTAGE_QUIET"] = "1"
4060 if mydo != "depend":
4061 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
4062 mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
4063 if not eapi_is_supported(eapi):
4064 # can't do anything with this.
4065 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
4067 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
4068 portage.dep.use_reduce(portage.dep.paren_reduce(
4069 mysettings.get("RESTRICT","")),
4070 uselist=mysettings.get("USE","").split())))
4071 except portage.exception.InvalidDependString:
4072 # RESTRICT is validated again inside doebuild, so let this go
4073 mysettings["PORTAGE_RESTRICT"] = ""
4075 if mysplit[2] == "r0":
4076 mysettings["PVR"]=mysplit[1]
4078 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
4080 if mysettings.has_key("PATH"):
4081 mysplit=mysettings["PATH"].split(":")
4084 # Note: PORTAGE_BIN_PATH may differ from the global constant
4085 # when portage is reinstalling itself.
4086 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4087 if portage_bin_path not in mysplit:
4088 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
4090 # Sandbox needs cannonical paths.
4091 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
4092 mysettings["PORTAGE_TMPDIR"])
4093 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
4094 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
4096 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
4097 # locations in order to prevent interference.
4098 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
4099 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4100 mysettings["PKG_TMPDIR"],
4101 mysettings["CATEGORY"], mysettings["PF"])
4103 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4104 mysettings["BUILD_PREFIX"],
4105 mysettings["CATEGORY"], mysettings["PF"])
4107 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
4108 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
4109 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
4110 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
4112 mysettings["PORTAGE_BASHRC"] = os.path.join(
4113 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
4114 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
4115 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
4117 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
4118 if mydo != "depend" and "KV" not in mysettings:
4119 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
4121 # Regular source tree
4122 mysettings["KV"]=mykv
4125 mysettings.backup_changes("KV")
4127 # Allow color.map to control colors associated with einfo, ewarn, etc...
4129 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
4130 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
4131 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
4133 def prepare_build_dirs(myroot, mysettings, cleanup):
4135 clean_dirs = [mysettings["HOME"]]
4137 # We enable cleanup when we want to make sure old cruft (such as the old
4138 # environment) doesn't interfere with the current phase.
4140 clean_dirs.append(mysettings["T"])
4142 for clean_dir in clean_dirs:
4144 shutil.rmtree(clean_dir)
4146 if errno.ENOENT == oe.errno:
4148 elif errno.EPERM == oe.errno:
4149 writemsg("%s\n" % oe, noiselevel=-1)
4150 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
4151 clean_dir, noiselevel=-1)
4156 def makedirs(dir_path):
4158 os.makedirs(dir_path)
4160 if errno.EEXIST == oe.errno:
4162 elif errno.EPERM == oe.errno:
4163 writemsg("%s\n" % oe, noiselevel=-1)
4164 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
4165 dir_path, noiselevel=-1)
4171 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
4173 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
4174 mydirs.append(os.path.dirname(mydirs[-1]))
4177 for mydir in mydirs:
4178 portage.util.ensure_dirs(mydir)
4179 portage.util.apply_secpass_permissions(mydir,
4180 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
4181 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
4182 """These directories don't necessarily need to be group writable.
4183 However, the setup phase is commonly run as a privileged user prior
4184 to the other phases being run by an unprivileged user. Currently,
4185 we use the portage group to ensure that the unprivleged user still
4186 has write access to these directories in any case."""
4187 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
4188 portage.util.apply_secpass_permissions(mysettings[dir_key],
4189 uid=portage_uid, gid=portage_gid)
4190 except portage.exception.PermissionDenied, e:
4191 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
4193 except portage.exception.OperationNotPermitted, e:
4194 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
4196 except portage.exception.FileNotFound, e:
4197 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
4202 "basedir_var":"CCACHE_DIR",
4203 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
4204 "always_recurse":False},
4206 "basedir_var":"CONFCACHE_DIR",
4207 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
4208 "always_recurse":False},
4210 "basedir_var":"DISTCC_DIR",
4211 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
4212 "subdirs":("lock", "state"),
4213 "always_recurse":True}
4218 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4219 from portage.data import secpass
4220 droppriv = secpass >= 2 and \
4221 "userpriv" in mysettings.features and \
4222 "userpriv" not in restrict
4223 for myfeature, kwargs in features_dirs.iteritems():
4224 if myfeature in mysettings.features:
4225 basedir = mysettings[kwargs["basedir_var"]]
4227 basedir = kwargs["default_dir"]
4228 mysettings[kwargs["basedir_var"]] = basedir
4230 mydirs = [mysettings[kwargs["basedir_var"]]]
4231 if "subdirs" in kwargs:
4232 for subdir in kwargs["subdirs"]:
4233 mydirs.append(os.path.join(basedir, subdir))
4234 for mydir in mydirs:
4235 modified = portage.util.ensure_dirs(mydir)
4236 # Generally, we only want to apply permissions for
4237 # initial creation. Otherwise, we don't know exactly what
4238 # permissions the user wants, so should leave them as-is.
4239 droppriv_fix = False
4242 if st.st_gid != portage_gid or \
4243 not stat.S_IMODE(st.st_mode) & dirmode:
4245 if modified or kwargs["always_recurse"] or droppriv_fix:
4247 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
4250 raise # The feature is disabled if a single error
4251 # occurs during permissions adjustment.
4252 if not apply_recursive_permissions(mydir,
4253 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
4254 filemode=filemode, filemask=modemask, onerror=onerror):
4255 raise portage.exception.OperationNotPermitted(
4256 "Failed to apply recursive permissions for the portage group.")
4257 except portage.exception.PortageException, e:
4258 mysettings.features.remove(myfeature)
4259 mysettings["FEATURES"] = " ".join(mysettings.features)
4260 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4261 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
4262 (kwargs["basedir_var"], basedir), noiselevel=-1)
4263 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
4269 mode = mysettings["PORTAGE_WORKDIR_MODE"]
4271 parsed_mode = int(mode, 8)
4276 if parsed_mode & 07777 != parsed_mode:
4277 raise ValueError("Invalid file mode: %s" % mode)
4279 workdir_mode = parsed_mode
4281 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
4282 except ValueError, e:
4284 writemsg("%s\n" % e)
4285 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
4286 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
4287 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
4289 apply_secpass_permissions(mysettings["WORKDIR"],
4290 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
4291 except portage.exception.FileNotFound:
4292 pass # ebuild.sh will create it
4294 if mysettings.get("PORT_LOGDIR", "") == "":
4295 while "PORT_LOGDIR" in mysettings:
4296 del mysettings["PORT_LOGDIR"]
4297 if "PORT_LOGDIR" in mysettings:
4299 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
4301 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
4302 uid=portage_uid, gid=portage_gid, mode=02770)
4303 except portage.exception.PortageException, e:
4304 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4305 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
4306 mysettings["PORT_LOGDIR"], noiselevel=-1)
4307 writemsg("!!! Disabling logging.\n", noiselevel=-1)
4308 while "PORT_LOGDIR" in mysettings:
4309 del mysettings["PORT_LOGDIR"]
4310 if "PORT_LOGDIR" in mysettings:
4311 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
4312 if not os.path.exists(logid_path):
4313 f = open(logid_path, "w")
4316 logid_time = time.strftime("%Y%m%d-%H%M%S",
4317 time.gmtime(os.stat(logid_path).st_mtime))
4318 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
4319 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
4320 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
4321 del logid_path, logid_time
4323 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
4324 # enabled since it is possible that local SELinux security policies
4325 # do not allow ouput to be piped out of the sesandbox domain.
4326 if not (mysettings.selinux_enabled() and \
4327 "sesandbox" in mysettings.features):
4328 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
4329 mysettings["T"], "build.log")
4331 def _doebuild_exit_status_check(mydo, settings):
4333 Returns an error string if the shell appeared
4334 to exit unsuccessfully, None otherwise.
4336 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
4337 if not exit_status_file or \
4338 os.path.exists(exit_status_file):
4340 msg = ("The ebuild phase '%s' has exited " % mydo) + \
4341 "unexpectedly. This type of behavior " + \
4342 "is known to be triggered " + \
4343 "by things such as failed variable " + \
4344 "assignments (bug #190128) or bad substitution " + \
4345 "errors (bug #200313)."
4348 def _doebuild_exit_status_unlink(exit_status_file):
4350 Double check to make sure it really doesn't exist
4351 and raise an OSError if it still does (it shouldn't).
4352 OSError if necessary.
4354 if not exit_status_file:
4357 os.unlink(exit_status_file)
4360 if os.path.exists(exit_status_file):
4361 os.unlink(exit_status_file)
4363 _doebuild_manifest_exempt_depend = 0
4364 _doebuild_manifest_checked = None
4365 _doebuild_broken_manifests = set()
4367 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
4368 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
4369 mydbapi=None, vartree=None, prev_mtimes=None):
4372 Wrapper function that invokes specific ebuild phases through the spawning
4375 @param myebuild: name of the ebuild to invoke the phase on (CPV)
4376 @type myebuild: String
4377 @param mydo: Phase to run
4379 @param myroot: $ROOT (usually '/', see man make.conf)
4380 @type myroot: String
4381 @param mysettings: Portage Configuration
4382 @type mysettings: instance of portage.config
4383 @param debug: Turns on various debug information (eg, debug for spawn)
4384 @type debug: Boolean
4385 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
4386 @type listonly: Boolean
4387 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
4388 @type fetchonly: Boolean
4389 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
4390 @type cleanup: Boolean
4391 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
4392 @type dbkey: Dict or String
4393 @param use_cache: Enables the cache
4394 @type use_cache: Boolean
4395 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
4396 @type fetchall: Boolean
4397 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
4399 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
4400 @type mydbapi: portdbapi instance
4401 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
4402 @type vartree: vartree instance
4403 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
4404 @type prev_mtimes: dictionary
4410 Most errors have an accompanying error message.
4412 listonly and fetchonly are only really necessary for operations involving 'fetch'
4413 prev_mtimes are only necessary for merge operations.
4414 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
4419 writemsg("Warning: tree not specified to doebuild\n")
4423 # chunked out deps for each phase, so that ebuild binary can use it
4424 # to collapse targets down.
4427 "unpack": ["setup"],
4428 "compile":["unpack"],
4429 "test": ["compile"],
4432 "package":["install"],
4436 mydbapi = db[myroot][tree].dbapi
4438 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
4439 vartree = db[myroot]["vartree"]
4441 features = mysettings.features
4443 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
4444 "config","info","setup","depend","fetch","digest",
4445 "unpack","compile","test","install","rpm","qmerge","merge",
4446 "package","unmerge", "manifest"]
4448 if mydo not in validcommands:
4449 validcommands.sort()
4450 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
4452 for vcount in range(len(validcommands)):
4454 writemsg("\n!!! ", noiselevel=-1)
4455 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
4456 writemsg("\n", noiselevel=-1)
4459 if not os.path.exists(myebuild):
4460 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
4464 global _doebuild_manifest_exempt_depend
4466 if "strict" in features and \
4467 "digest" not in features and \
4468 tree == "porttree" and \
4469 mydo not in ("digest", "manifest", "help") and \
4470 not _doebuild_manifest_exempt_depend:
4471 # Always verify the ebuild checksums before executing it.
4472 pkgdir = os.path.dirname(myebuild)
4473 manifest_path = os.path.join(pkgdir, "Manifest")
4474 global _doebuild_manifest_checked, _doebuild_broken_manifests
4475 if manifest_path in _doebuild_broken_manifests:
4477 # Avoid checking the same Manifest several times in a row during a
4478 # regen with an empty cache.
4479 if _doebuild_manifest_checked != manifest_path:
4480 if not os.path.exists(manifest_path):
4481 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4483 _doebuild_broken_manifests.add(manifest_path)
4485 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4487 mf.checkTypeHashes("EBUILD")
4488 except portage.exception.FileNotFound, e:
4489 writemsg("!!! A file listed in the Manifest " + \
4490 "could not be found: %s\n" % str(e), noiselevel=-1)
4491 _doebuild_broken_manifests.add(manifest_path)
4493 except portage.exception.DigestException, e:
4494 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
4495 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4496 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4497 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4498 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4499 _doebuild_broken_manifests.add(manifest_path)
4501 # Make sure that all of the ebuilds are actually listed in the
4503 for f in os.listdir(pkgdir):
4504 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4505 writemsg("!!! A file is not listed in the " + \
4506 "Manifest: '%s'\n" % os.path.join(pkgdir, f),
4508 _doebuild_broken_manifests.add(manifest_path)
4510 _doebuild_manifest_checked = manifest_path
4512 def exit_status_check(retval):
4513 if retval != os.EX_OK:
4515 msg = _doebuild_exit_status_check(mydo, mysettings)
4518 from textwrap import wrap
4519 from portage.elog.messages import eerror
4520 for l in wrap(msg, 72):
4521 eerror(l, phase=mydo, key=mysettings.mycpv)
4524 # Note: PORTAGE_BIN_PATH may differ from the global
4525 # constant when portage is reinstalling itself.
4526 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4527 ebuild_sh_binary = os.path.join(portage_bin_path,
4528 os.path.basename(EBUILD_SH_BINARY))
4529 misc_sh_binary = os.path.join(portage_bin_path,
4530 os.path.basename(MISC_SH_BINARY))
4533 builddir_lock = None
4536 filter_calling_env_state = mysettings._filter_calling_env
4538 if mydo in ("digest", "manifest", "help"):
4539 # Temporarily exempt the depend phase from manifest checks, in case
4540 # aux_get calls trigger cache generation.
4541 _doebuild_manifest_exempt_depend += 1
4543 # If we don't need much space and we don't need a constant location,
4544 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
4545 # so that there's no need for locking and it can be used even if the
4546 # user isn't in the portage group.
4547 if mydo in ("info",):
4548 from tempfile import mkdtemp
4550 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
4551 mysettings["PORTAGE_TMPDIR"] = tmpdir
4553 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
4556 # get possible slot information from the deps file
4557 if mydo == "depend":
4558 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
4559 droppriv = "userpriv" in mysettings.features
4560 if isinstance(dbkey, dict):
4561 mysettings["dbkey"] = ""
4564 0:sys.stdin.fileno(),
4565 1:sys.stdout.fileno(),
4566 2:sys.stderr.fileno(),
4568 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
4570 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
4571 os.close(pw) # belongs exclusively to the child process now
4575 mybytes.append(os.read(pr, maxbytes))
4579 mybytes = "".join(mybytes)
4581 for k, v in izip(auxdbkeys, mybytes.splitlines()):
4583 retval = os.waitpid(mypids[0], 0)[1]
4584 portage.process.spawned_pids.remove(mypids[0])
4585 # If it got a signal, return the signal that was sent, but
4586 # shift in order to distinguish it from a return value. (just
4587 # like portage.process.spawn() would do).
4589 return (retval & 0xff) << 8
4590 # Otherwise, return its exit code.
4593 mysettings["dbkey"] = dbkey
4595 mysettings["dbkey"] = \
4596 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
4598 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
4602 # Validate dependency metadata here to ensure that ebuilds with invalid
4603 # data are never installed (even via the ebuild command).
4604 invalid_dep_exempt_phases = \
4605 set(["clean", "cleanrm", "help", "prerm", "postrm"])
4606 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
4607 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4608 misc_keys = ["LICENSE", "PROVIDE", "RESTRICT", "SRC_URI"]
4609 other_keys = ["SLOT"]
4610 all_keys = dep_keys + misc_keys + other_keys
4611 metadata = dict(izip(all_keys, mydbapi.aux_get(mycpv, all_keys)))
4612 class FakeTree(object):
4613 def __init__(self, mydb):
4615 dep_check_trees = {myroot:{}}
4616 dep_check_trees[myroot]["porttree"] = \
4617 FakeTree(fakedbapi(settings=mysettings))
4618 for dep_type in dep_keys:
4619 mycheck = dep_check(metadata[dep_type], None, mysettings,
4620 myuse="all", myroot=myroot, trees=dep_check_trees)
4622 writemsg("%s: %s\n%s\n" % (
4623 dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
4624 if mydo not in invalid_dep_exempt_phases:
4626 del dep_type, mycheck
4629 portage.dep.use_reduce(
4630 portage.dep.paren_reduce(metadata[k]), matchall=True)
4631 except portage.exception.InvalidDependString, e:
4632 writemsg("%s: %s\n%s\n" % (
4633 k, metadata[k], str(e)), noiselevel=-1)
4635 if mydo not in invalid_dep_exempt_phases:
4638 if not metadata["SLOT"]:
4639 writemsg("SLOT is undefined\n", noiselevel=-1)
4640 if mydo not in invalid_dep_exempt_phases:
4642 del mycpv, dep_keys, metadata, misc_keys, FakeTree, dep_check_trees
4644 if "PORTAGE_TMPDIR" not in mysettings or \
4645 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
4646 writemsg("The directory specified in your " + \
4647 "PORTAGE_TMPDIR variable, '%s',\n" % \
4648 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
4649 writemsg("does not exist. Please create this directory or " + \
4650 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
4653 if mydo == "unmerge":
4654 return unmerge(mysettings["CATEGORY"],
4655 mysettings["PF"], myroot, mysettings, vartree=vartree)
4657 # Build directory creation isn't required for any of these.
4658 have_build_dirs = False
4659 if mydo not in ("clean", "cleanrm", "digest",
4660 "fetch", "help", "manifest"):
4661 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
4664 have_build_dirs = True
4665 # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
4666 logfile = mysettings.get("PORTAGE_LOG_FILE")
4667 if logfile and not os.access(os.path.dirname(logfile), os.W_OK):
4670 env_file = os.path.join(mysettings["T"], "environment")
4674 env_stat = os.stat(env_file)
4676 if e.errno != errno.ENOENT:
4680 saved_env = os.path.join(
4681 os.path.dirname(myebuild), "environment.bz2")
4682 if not os.path.isfile(saved_env):
4686 "bzip2 -dc %s > %s" % \
4687 (_shell_quote(saved_env),
4688 _shell_quote(env_file)))
4690 env_stat = os.stat(env_file)
4692 if e.errno != errno.ENOENT:
4695 if os.WIFEXITED(retval) and \
4696 os.WEXITSTATUS(retval) == os.EX_OK and \
4697 env_stat and env_stat.st_size > 0:
4698 # This is a signal to ebuild.sh, so that it knows to filter
4699 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
4700 # would be preserved between normal phases.
4701 open(env_file + ".raw", "w")
4703 writemsg(("!!! Error extracting saved " + \
4704 "environment: '%s'\n") % \
4705 saved_env, noiselevel=-1)
4709 if e.errno != errno.ENOENT:
4714 mysettings._filter_calling_env = True
4716 for var in ("ARCH", ):
4717 value = mysettings.get(var)
4718 if value and value.strip():
4720 msg = ("%s is not set... " % var) + \
4721 ("Are you missing the '%setc/make.profile' symlink? " % \
4722 mysettings["PORTAGE_CONFIGROOT"]) + \
4723 "Is the symlink correct? " + \
4724 "Is your portage tree complete?"
4725 from portage.elog.messages import eerror
4726 from textwrap import wrap
4727 for line in wrap(msg, 70):
4728 eerror(line, phase="setup", key=mysettings.mycpv)
4729 from portage.elog import elog_process
4730 elog_process(mysettings.mycpv, mysettings)
4732 del env_file, env_stat, saved_env
4733 _doebuild_exit_status_unlink(
4734 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4736 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
4738 # if any of these are being called, handle them -- running them out of
4739 # the sandbox -- and stop now.
4740 if mydo in ["clean","cleanrm"]:
4741 return spawn(_shell_quote(ebuild_sh_binary) + " clean", mysettings,
4742 debug=debug, free=1, logfile=None)
4743 elif mydo == "help":
4744 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
4745 mysettings, debug=debug, free=1, logfile=logfile)
4746 elif mydo == "setup":
4747 infodir = os.path.join(
4748 mysettings["PORTAGE_BUILDDIR"], "build-info")
4749 if os.path.isdir(infodir):
4750 """Load USE flags for setup phase of a binary package.
4751 Ideally, the environment.bz2 would be used instead."""
4752 mysettings.load_infodir(infodir)
4754 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
4755 debug=debug, free=1, logfile=logfile)
4756 retval = exit_status_check(retval)
4758 """ Privileged phases may have left files that need to be made
4759 writable to a less privileged user."""
4760 apply_recursive_permissions(mysettings["T"],
4761 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4762 filemode=060, filemask=0)
4764 elif mydo == "preinst":
4765 phase_retval = spawn(
4766 _shell_quote(ebuild_sh_binary) + " " + mydo,
4767 mysettings, debug=debug, free=1, logfile=logfile)
4768 phase_retval = exit_status_check(phase_retval)
4769 if phase_retval == os.EX_OK:
4770 # Post phase logic and tasks that have been factored out of
4771 # ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4772 # can be used to wipe out any gmon.out files created during
4773 # previous functions (in case any tools were built with -pg
4775 myargs = [_shell_quote(misc_sh_binary),
4777 "preinst_sfperms", "preinst_selinux_labels",
4778 "preinst_suid_scan", "preinst_mask"]
4779 _doebuild_exit_status_unlink(
4780 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4781 mysettings["EBUILD_PHASE"] = ""
4782 phase_retval = spawn(" ".join(myargs),
4783 mysettings, debug=debug, free=1, logfile=logfile)
4784 phase_retval = exit_status_check(phase_retval)
4785 if phase_retval != os.EX_OK:
4786 writemsg("!!! post preinst failed; exiting.\n",
4789 elif mydo == "postinst":
4790 phase_retval = spawn(
4791 _shell_quote(ebuild_sh_binary) + " " + mydo,
4792 mysettings, debug=debug, free=1, logfile=logfile)
4793 phase_retval = exit_status_check(phase_retval)
4794 if phase_retval == os.EX_OK:
4795 # Post phase logic and tasks that have been factored out of
4797 myargs = [_shell_quote(misc_sh_binary), "postinst_bsdflags"]
4798 _doebuild_exit_status_unlink(
4799 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4800 mysettings["EBUILD_PHASE"] = ""
4801 phase_retval = spawn(" ".join(myargs),
4802 mysettings, debug=debug, free=1, logfile=logfile)
4803 phase_retval = exit_status_check(phase_retval)
4804 if phase_retval != os.EX_OK:
4805 writemsg("!!! post postinst failed; exiting.\n",
4808 elif mydo in ("prerm", "postrm", "config", "info"):
4809 mysettings.load_infodir(mysettings["O"])
4811 _shell_quote(ebuild_sh_binary) + " " + mydo,
4812 mysettings, debug=debug, free=1, logfile=logfile)
4813 retval = exit_status_check(retval)
4816 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
4818 # Make sure we get the correct tree in case there are overlays.
4819 mytree = os.path.realpath(
4820 os.path.dirname(os.path.dirname(mysettings["O"])))
4822 newuris, alist = mydbapi.getfetchlist(
4823 mycpv, mytree=mytree, mysettings=mysettings)
4824 alluris, aalist = mydbapi.getfetchlist(
4825 mycpv, mytree=mytree, all=True, mysettings=mysettings)
4826 except portage.exception.InvalidDependString, e:
4827 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4828 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
4831 mysettings["A"] = " ".join(alist)
4832 mysettings["AA"] = " ".join(aalist)
4833 if ("mirror" in features) or fetchall:
4834 fetchme = alluris[:]
4836 elif mydo == "digest":
4837 fetchme = alluris[:]
4839 # Skip files that we already have digests for.
4840 mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
4841 mydigests = mf.getTypeDigests("DIST")
4842 required_hash_types = set()
4843 required_hash_types.add("size")
4844 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4845 for filename, hashes in mydigests.iteritems():
4846 if not required_hash_types.difference(hashes):
4847 checkme = [i for i in checkme if i != filename]
4848 fetchme = [i for i in fetchme \
4849 if os.path.basename(i) != filename]
4850 del filename, hashes
4852 fetchme = newuris[:]
4856 # Files are already checked inside fetch(),
4857 # so do not check them again.
4860 # Only try and fetch the files if we are going to need them ...
4861 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
4862 # unpack compile install`, we will try and fetch 4 times :/
4863 need_distfiles = (mydo in ("fetch", "unpack") or \
4864 mydo not in ("digest", "manifest") and "noauto" not in features)
4865 if need_distfiles and not fetch(
4866 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
4868 # Create an elog message for this fetch failure since the
4869 # mod_echo module might push the original message off of the
4870 # top of the terminal and prevent the user from being able to
4872 from portage.elog.messages import eerror
4873 eerror("Fetch failed for '%s'" % mycpv,
4874 phase="unpack", key=mycpv)
4875 from portage.elog import elog_process
4876 elog_process(mysettings.mycpv, mysettings)
4879 if mydo == "fetch" and listonly:
4883 if mydo == "manifest":
4884 return not digestgen(aalist, mysettings, overwrite=1,
4885 manifestonly=1, myportdb=mydbapi)
4886 elif mydo == "digest":
4887 return not digestgen(aalist, mysettings, overwrite=1,
4889 elif "digest" in mysettings.features:
4890 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
4891 except portage.exception.PermissionDenied, e:
4892 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4893 if mydo in ("digest", "manifest"):
4896 # See above comment about fetching only when needed
4897 if not digestcheck(checkme, mysettings, ("strict" in features),
4898 (mydo not in ["digest","fetch","unpack"] and \
4899 mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
4900 "noauto" in features)):
4906 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
4907 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
4908 orig_distdir = mysettings["DISTDIR"]
4909 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
4910 edpath = mysettings["DISTDIR"] = \
4911 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
4912 if os.path.exists(edpath):
4914 if os.path.isdir(edpath) and not os.path.islink(edpath):
4915 shutil.rmtree(edpath)
4919 print "!!! Failed reseting ebuild distdir path, " + edpath
4922 apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
4925 os.symlink(os.path.join(orig_distdir, file),
4926 os.path.join(edpath, file))
4928 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
4931 #initial dep checks complete; time to process main commands
4933 restrict = mysettings["PORTAGE_RESTRICT"].split()
4934 nosandbox = (("userpriv" in features) and \
4935 ("usersandbox" not in features) and \
4936 "userpriv" not in restrict and \
4937 "nouserpriv" not in restrict)
4938 if nosandbox and ("userpriv" not in features or \
4939 "userpriv" in restrict or \
4940 "nouserpriv" in restrict):
4941 nosandbox = ("sandbox" not in features and \
4942 "usersandbox" not in features)
4944 sesandbox = mysettings.selinux_enabled() and \
4945 "sesandbox" in mysettings.features
4947 droppriv = "userpriv" in mysettings.features and \
4948 "userpriv" not in restrict
4950 fakeroot = "fakeroot" in mysettings.features
4952 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
4953 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
4955 # args are for the to spawn function
4957 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
4958 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
4959 "compile":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
4960 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
4961 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
4962 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
4963 "package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
4966 # merge the deps in so we have again a 'full' actionmap
4967 # be glad when this can die.
4969 if len(actionmap_deps.get(x, [])):
4970 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
4972 if mydo in actionmap:
4973 retval = spawnebuild(mydo,
4974 actionmap, mysettings, debug, logfile=logfile)
4975 elif mydo=="qmerge":
4976 # check to ensure install was run. this *only* pops up when users
4977 # forget it and are using ebuild
4978 if not os.path.exists(
4979 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
4980 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
4983 # qmerge is a special phase that implies noclean.
4984 if "noclean" not in mysettings.features:
4985 mysettings.features.append("noclean")
4986 #qmerge is specifically not supposed to do a runtime dep check
4988 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
4989 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
4990 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
4991 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
4993 retval = spawnebuild("install", actionmap, mysettings, debug,
4994 alwaysdep=1, logfile=logfile)
4995 retval = exit_status_check(retval)
4996 if retval != os.EX_OK:
4997 # The merge phase handles this already. Callers don't know how
4998 # far this function got, so we have to call elog_process() here
4999 # so that it's only called once.
5000 from portage.elog import elog_process
5001 elog_process(mysettings.mycpv, mysettings)
5002 if retval == os.EX_OK:
5003 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
5004 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
5005 "build-info"), myroot, mysettings,
5006 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
5007 vartree=vartree, prev_mtimes=prev_mtimes)
5009 print "!!! Unknown mydo:",mydo
5015 mysettings._filter_calling_env = filter_calling_env_state
5017 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
5018 shutil.rmtree(tmpdir)
5020 portage.locks.unlockdir(builddir_lock)
5022 # Make sure that DISTDIR is restored to it's normal value before we return!
5023 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
5024 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
5025 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
5029 if os.stat(logfile).st_size == 0:
5034 if mydo in ("digest", "manifest", "help"):
5035 # If necessary, depend phase has been triggered by aux_get calls
5036 # and the exemption is no longer needed.
5037 _doebuild_manifest_exempt_depend -= 1
5041 def _movefile(src, dest, **kwargs):
5042 """Calls movefile and raises a PortageException if an error occurs."""
5043 if movefile(src, dest, **kwargs) is None:
5044 raise portage.exception.PortageException(
5045 "mv '%s' '%s'" % (src, dest))
5047 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
5048 """moves a file from src to dest, preserving all permissions and attributes; mtime will
5049 be preserved even when moving across filesystems. Returns true on success and false on
5050 failure. Move is atomic."""
5051 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
5053 if mysettings is None:
5055 mysettings = settings
5056 selinux_enabled = mysettings.selinux_enabled()
5061 except SystemExit, e:
5063 except Exception, e:
5064 print "!!! Stating source file failed... movefile()"
5070 dstat=os.lstat(dest)
5071 except (OSError, IOError):
5072 dstat=os.lstat(os.path.dirname(dest))
5076 if destexists and dstat.st_flags != 0:
5077 bsd_chflags.lchflags(dest, 0)
5078 # Use normal stat/chflags for the parent since we want to
5079 # follow any symlinks to the real parent directory.
5080 pflags = os.stat(os.path.dirname(dest)).st_flags
5082 bsd_chflags.chflags(os.path.dirname(dest), 0)
5085 if stat.S_ISLNK(dstat[stat.ST_MODE]):
5089 except SystemExit, e:
5091 except Exception, e:
5094 if stat.S_ISLNK(sstat[stat.ST_MODE]):
5096 target=os.readlink(src)
5097 if mysettings and mysettings["D"]:
5098 if target.find(mysettings["D"])==0:
5099 target=target[len(mysettings["D"]):]
5100 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
5103 sid = selinux.get_lsid(src)
5104 selinux.secure_symlink(target,dest,sid)
5106 os.symlink(target,dest)
5107 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
5108 # utime() only works on the target of a symlink, so it's not
5109 # possible to perserve mtime on symlinks.
5110 return os.lstat(dest)[stat.ST_MTIME]
5111 except SystemExit, e:
5113 except Exception, e:
5114 print "!!! failed to properly create symlink:"
5115 print "!!!",dest,"->",target
5120 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
5123 ret=selinux.secure_rename(src,dest)
5125 ret=os.rename(src,dest)
5127 except SystemExit, e:
5129 except Exception, e:
5130 if e[0]!=errno.EXDEV:
5131 # Some random error.
5132 print "!!! Failed to move",src,"to",dest
5135 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
5138 if stat.S_ISREG(sstat[stat.ST_MODE]):
5139 try: # For safety copy then move it over.
5141 selinux.secure_copy(src,dest+"#new")
5142 selinux.secure_rename(dest+"#new",dest)
5144 shutil.copyfile(src,dest+"#new")
5145 os.rename(dest+"#new",dest)
5147 except SystemExit, e:
5149 except Exception, e:
5150 print '!!! copy',src,'->',dest,'failed.'
5154 #we don't yet handle special, so we need to fall back to /bin/mv
5156 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
5158 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
5160 print "!!! Failed to move special file:"
5161 print "!!! '"+src+"' to '"+dest+"'"
5163 return None # failure
5166 if stat.S_ISLNK(sstat[stat.ST_MODE]):
5167 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
5169 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
5170 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
5172 except SystemExit, e:
5174 except Exception, e:
5175 print "!!! Failed to chown/chmod/unlink in movefile()"
5181 os.utime(dest,(newmtime,newmtime))
5183 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
5184 newmtime=sstat[stat.ST_MTIME]
5187 # Restore the flags we saved before moving
5189 bsd_chflags.chflags(os.path.dirname(dest), pflags)
5193 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
5194 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
5195 if not os.access(myroot, os.W_OK):
5196 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
5199 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
5201 return mylink.merge(pkgloc, infloc, myroot, myebuild,
5202 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
5204 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
5206 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
5210 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
5211 ldpath_mtimes=ldpath_mtimes)
5212 if retval == os.EX_OK:
5219 def getCPFromCPV(mycpv):
5220 """Calls pkgsplit on a cpv and returns only the cp."""
5221 return pkgsplit(mycpv)[0]
5223 def dep_virtual(mysplit, mysettings):
5224 "Does virtual dependency conversion"
5226 myvirtuals = mysettings.getvirtuals()
5228 if isinstance(x, list):
5229 newsplit.append(dep_virtual(x, mysettings))
5232 mychoices = myvirtuals.get(mykey, None)
5234 if len(mychoices) == 1:
5235 a = x.replace(mykey, mychoices[0])
5238 # blocker needs "and" not "or(||)".
5243 a.append(x.replace(mykey, y))
5249 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
5250 trees=None, **kwargs):
5251 """Recursively expand new-style virtuals so as to collapse one or more
5252 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
5253 zero cost regardless of whether or not they are currently installed. Virtual
5254 blockers are supported but only when the virtual expands to a single
5255 atom because it wouldn't necessarily make sense to block all the components
5256 of a compound virtual. When more than one new-style virtual is matched,
5257 the matches are sorted from highest to lowest versions and the atom is
5258 expanded to || ( highest match ... lowest match )."""
5260 # According to GLEP 37, RDEPEND is the only dependency type that is valid
5261 # for new-style virtuals. Repoman should enforce this.
5262 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
5263 def compare_pkgs(a, b):
5264 return pkgcmp(b[1], a[1])
5265 portdb = trees[myroot]["porttree"].dbapi
5266 if kwargs["use_binaries"]:
5267 portdb = trees[myroot]["bintree"].dbapi
5268 myvirtuals = mysettings.getvirtuals()
5273 elif isinstance(x, list):
5274 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
5275 mysettings, myroot=myroot, trees=trees, **kwargs))
5277 if portage.dep._dep_check_strict and \
5278 not isvalidatom(x, allow_blockers=True):
5279 raise portage.exception.ParseError(
5280 "invalid atom: '%s'" % x)
5281 mykey = dep_getkey(x)
5282 if not mykey.startswith("virtual/"):
5285 mychoices = myvirtuals.get(mykey, [])
5286 isblocker = x.startswith("!")
5291 for cpv in portdb.match(match_atom):
5292 # only use new-style matches
5293 if cpv.startswith("virtual/"):
5294 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], portdb)
5295 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
5296 vardb = trees[myroot]["vartree"].dbapi
5297 for cpv in vardb.match(match_atom):
5298 # only use new-style matches
5299 if cpv.startswith("virtual/"):
5302 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], vardb)
5303 if not (pkgs or mychoices):
5304 # This one couldn't be expanded as a new-style virtual. Old-style
5305 # virtuals have already been expanded by dep_virtual, so this one
5306 # is unavailable and dep_zapdeps will identify it as such. The
5307 # atom is not eliminated here since it may still represent a
5308 # dependency that needs to be satisfied.
5311 if not pkgs and len(mychoices) == 1:
5312 newsplit.append(x.replace(mykey, mychoices[0]))
5314 pkgs = pkgs.values()
5315 pkgs.sort(compare_pkgs) # Prefer higher versions.
5321 depstring = " ".join(y[2].aux_get(y[0], dep_keys))
5323 print "Virtual Parent: ", y[0]
5324 print "Virtual Depstring:", depstring
5325 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
5326 trees=trees, **kwargs)
5328 raise portage.exception.ParseError(
5329 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
5331 virtual_atoms = [atom for atom in mycheck[1] \
5332 if not atom.startswith("!")]
5333 if len(virtual_atoms) == 1:
5334 # It wouldn't make sense to block all the components of a
5335 # compound virtual, so only a single atom block is allowed.
5336 a.append("!" + virtual_atoms[0])
5338 mycheck[1].append("="+y[0]) # pull in the new-style virtual
5339 a.append(mycheck[1])
5340 # Plain old-style virtuals. New-style virtuals are preferred.
5342 a.append(x.replace(mykey, y))
5343 if isblocker and not a:
5344 # Probably a compound virtual. Pass the atom through unprocessed.
5350 def dep_eval(deplist):
5353 if deplist[0]=="||":
5354 #or list; we just need one "1"
5355 for x in deplist[1:]:
5356 if isinstance(x, list):
5361 #XXX: unless there's no available atoms in the list
5362 #in which case we need to assume that everything is
5363 #okay as some ebuilds are relying on an old bug.
5364 if len(deplist) == 1:
5369 if isinstance(x, list):
5376 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
5377 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
5378 Returned deplist contains steps that must be taken to satisfy dependencies."""
5382 selective = trees[myroot].get("selective", False)
5383 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
5384 if not reduced or unreduced == ["||"] or dep_eval(reduced):
5387 if unreduced[0] != "||":
5389 for dep, satisfied in izip(unreduced, reduced):
5390 if isinstance(dep, list):
5391 unresolved += dep_zapdeps(dep, satisfied, myroot,
5392 use_binaries=use_binaries, trees=trees)
5394 unresolved.append(dep)
5397 # We're at a ( || atom ... ) type level and need to make a choice
5398 deps = unreduced[1:]
5399 satisfieds = reduced[1:]
5401 # Our preference order is for an the first item that:
5402 # a) contains all unmasked packages with the same key as installed packages
5403 # b) contains all unmasked packages
5404 # c) contains masked installed packages
5405 # d) is the first item
5408 preferred_any_slot = []
5409 possible_upgrades = []
5412 # Alias the trees we'll be checking availability against
5414 if "vartree" in trees[myroot]:
5415 vardb = trees[myroot]["vartree"].dbapi
5417 mydbapi = trees[myroot]["bintree"].dbapi
5419 mydbapi = trees[myroot]["porttree"].dbapi
5421 # Sort the deps into preferred (installed) and other
5422 # with values of [[required_atom], availablility]
5423 for dep, satisfied in izip(deps, satisfieds):
5424 if isinstance(dep, list):
5425 atoms = dep_zapdeps(dep, satisfied, myroot,
5426 use_binaries=use_binaries, trees=trees)
5432 other.append((atoms, None, False))
5435 all_available = True
5438 avail_pkg = mydbapi.match(atom)
5440 avail_pkg = avail_pkg[-1] # highest (ascending order)
5441 avail_slot = "%s:%s" % (dep_getkey(atom),
5442 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
5445 avail_pkg = vardb.match(atom)
5447 avail_pkg = avail_pkg[-1] # highest (ascending order)
5448 avail_slot = "%s:%s" % (dep_getkey(atom),
5449 vardb.aux_get(avail_pkg, ["SLOT"])[0])
5451 all_available = False
5454 versions[avail_slot] = avail_pkg
5456 this_choice = (atoms, versions, all_available)
5458 # The "all installed" criterion is not version or slot specific.
5459 # If any version of a package is installed then we assume that it
5460 # is preferred over other possible packages choices.
5461 all_installed = True
5462 for atom in set([dep_getkey(atom) for atom in atoms]):
5463 # New-style virtuals have zero cost to install.
5464 if not vardb.match(atom) and not atom.startswith("virtual/"):
5465 all_installed = False
5467 all_installed_slots = False
5469 all_installed_slots = True
5470 for slot_atom in versions:
5471 # New-style virtuals have zero cost to install.
5472 if not vardb.match(slot_atom) and \
5473 not slot_atom.startswith("virtual/"):
5474 all_installed_slots = False
5477 if all_installed_slots:
5478 preferred.append(this_choice)
5480 preferred_any_slot.append(this_choice)
5482 possible_upgrades.append(this_choice)
5484 other.append(this_choice)
5486 # Compare the "all_installed" choices against the "all_available" choices
5487 # for possible missed upgrades. The main purpose of this code is to find
5488 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
5489 # into || ( highest version ... lowest version ). We want to prefer the
5490 # highest all_available version of the new-style virtual when there is a
5491 # lower all_installed version.
5492 preferred.extend(preferred_any_slot)
5493 preferred.extend(possible_upgrades)
5494 possible_upgrades = preferred[1:]
5495 for possible_upgrade in possible_upgrades:
5496 atoms, versions, all_available = possible_upgrade
5497 myslots = set(versions)
5498 for other_choice in preferred:
5499 if possible_upgrade is other_choice:
5500 # possible_upgrade will not be promoted, so move on
5502 o_atoms, o_versions, o_all_available = other_choice
5503 intersecting_slots = myslots.intersection(o_versions)
5504 if not intersecting_slots:
5507 has_downgrade = False
5508 for myslot in intersecting_slots:
5509 myversion = versions[myslot]
5510 o_version = o_versions[myslot]
5511 difference = pkgcmp(catpkgsplit(myversion)[1:],
5512 catpkgsplit(o_version)[1:])
5517 has_downgrade = True
5519 if has_upgrade and not has_downgrade:
5520 preferred.remove(possible_upgrade)
5521 o_index = preferred.index(other_choice)
5522 preferred.insert(o_index, possible_upgrade)
5525 # preferred now contains a) and c) from the order above with
5526 # the masked flag differentiating the two. other contains b)
5527 # and d) so adding other to preferred will give us a suitable
5528 # list to iterate over.
5529 preferred.extend(other)
5531 for allow_masked in (False, True):
5532 for atoms, versions, all_available in preferred:
5533 if all_available or allow_masked:
5536 assert(False) # This point should not be reachable
5539 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
5545 mydep = dep_getcpv(orig_dep)
5546 myindex = orig_dep.index(mydep)
5547 prefix = orig_dep[:myindex]
5548 postfix = orig_dep[myindex+len(mydep):]
5549 return prefix + cpv_expand(
5550 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
5552 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
5553 use_cache=1, use_binaries=0, myroot="/", trees=None):
5554 """Takes a depend string and parses the condition."""
5555 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
5556 #check_config_instance(mysettings)
5558 trees = globals()["db"]
5562 myusesplit = mysettings["PORTAGE_USE"].split()
5565 # We've been given useflags to use.
5566 #print "USE FLAGS PASSED IN."
5568 #if "bindist" in myusesplit:
5569 # print "BINDIST is set!"
5571 # print "BINDIST NOT set."
5573 #we are being run by autouse(), don't consult USE vars yet.
5574 # WE ALSO CANNOT USE SETTINGS
5577 #convert parenthesis to sublists
5579 mysplit = portage.dep.paren_reduce(depstring)
5580 except portage.exception.InvalidDependString, e:
5585 useforce.add(mysettings["ARCH"])
5587 # This masking/forcing is only for repoman. In other cases, relevant
5588 # masking/forcing should have already been applied via
5589 # config.regenerate(). Also, binary or installed packages may have
5590 # been built with flags that are now masked, and it would be
5591 # inconsistent to mask them now. Additionally, myuse may consist of
5592 # flags from a parent package that is being merged to a $ROOT that is
5593 # different from the one that mysettings represents.
5594 mymasks.update(mysettings.usemask)
5595 mymasks.update(mysettings.archlist())
5596 mymasks.discard(mysettings["ARCH"])
5597 useforce.update(mysettings.useforce)
5598 useforce.difference_update(mymasks)
5600 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
5601 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
5602 except portage.exception.InvalidDependString, e:
5605 # Do the || conversions
5606 mysplit=portage.dep.dep_opconvert(mysplit)
5609 #dependencies were reduced to nothing
5612 # Recursively expand new-style virtuals so as to
5613 # collapse one or more levels of indirection.
5615 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
5616 use=use, mode=mode, myuse=myuse, use_cache=use_cache,
5617 use_binaries=use_binaries, myroot=myroot, trees=trees)
5618 except portage.exception.ParseError, e:
5622 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
5623 if mysplit2 is None:
5624 return [0,"Invalid token"]
5626 writemsg("\n\n\n", 1)
5627 writemsg("mysplit: %s\n" % (mysplit), 1)
5628 writemsg("mysplit2: %s\n" % (mysplit2), 1)
5630 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
5631 use_binaries=use_binaries, trees=trees)
5632 mylist = flatten(myzaps)
5633 writemsg("myzaps: %s\n" % (myzaps), 1)
5634 writemsg("mylist: %s\n" % (mylist), 1)
5639 writemsg("mydict: %s\n" % (mydict), 1)
5640 return [1,mydict.keys()]
5642 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
5643 "Reduces the deplist to ones and zeros"
5644 deplist=mydeplist[:]
5645 for mypos in xrange(len(deplist)):
5646 if isinstance(deplist[mypos], list):
5648 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
5649 elif deplist[mypos]=="||":
5652 mykey = dep_getkey(deplist[mypos])
5653 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
5654 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
5656 elif mydbapi is None:
5657 # Assume nothing is satisfied. This forces dep_zapdeps to
5658 # return all of deps the deps that have been selected
5659 # (excluding those satisfied by package.provided).
5660 deplist[mypos] = False
5663 x = mydbapi.xmatch(mode, deplist[mypos])
5664 if mode.startswith("minimum-"):
5671 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
5674 if deplist[mypos][0]=="!":
5678 #encountered invalid string
5682 def cpv_getkey(mycpv):
5683 myslash=mycpv.split("/")
5684 mysplit=pkgsplit(myslash[-1])
5687 return myslash[0]+"/"+mysplit[0]
5693 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
5694 mysplit=mykey.split("/")
5695 if settings is None:
5696 settings = globals()["settings"]
5697 virts = settings.getvirtuals("/")
5698 virts_p = settings.get_virts_p("/")
5700 if hasattr(mydb, "cp_list"):
5701 for x in mydb.categories:
5702 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
5704 if virts_p.has_key(mykey):
5705 return(virts_p[mykey][0])
5706 return "null/"+mykey
5708 if hasattr(mydb, "cp_list"):
5709 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
5710 return virts[mykey][0]
5713 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
5714 """Given a string (packagename or virtual) expand it into a valid
5715 cat/package string. Virtuals use the mydb to determine which provided
5716 virtual is a valid choice and defaults to the first element when there
5717 are no installed/available candidates."""
5718 myslash=mycpv.split("/")
5719 mysplit=pkgsplit(myslash[-1])
5720 if settings is None:
5721 settings = globals()["settings"]
5722 virts = settings.getvirtuals("/")
5723 virts_p = settings.get_virts_p("/")
5725 # this is illegal case.
5728 elif len(myslash)==2:
5730 mykey=myslash[0]+"/"+mysplit[0]
5733 if mydb and virts and mykey in virts:
5734 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
5735 if hasattr(mydb, "cp_list"):
5736 if not mydb.cp_list(mykey, use_cache=use_cache):
5737 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
5738 mykey_orig = mykey[:]
5739 for vkey in virts[mykey]:
5740 # The virtuals file can contain a versioned atom, so
5741 # it may be necessary to remove the operator and
5742 # version from the atom before it is passed into
5744 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
5746 writemsg("virts chosen: %s\n" % (mykey), 1)
5748 if mykey == mykey_orig:
5749 mykey=virts[mykey][0]
5750 writemsg("virts defaulted: %s\n" % (mykey), 1)
5751 #we only perform virtual expansion if we are passed a dbapi
5753 #specific cpv, no category, ie. "foo-1.0"
5761 if mydb and hasattr(mydb, "categories"):
5762 for x in mydb.categories:
5763 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
5764 matches.append(x+"/"+myp)
5765 if len(matches) > 1:
5766 virtual_name_collision = False
5767 if len(matches) == 2:
5769 if not x.startswith("virtual/"):
5770 # Assume that the non-virtual is desired. This helps
5771 # avoid the ValueError for invalid deps that come from
5772 # installed packages (during reverse blocker detection,
5776 virtual_name_collision = True
5777 if not virtual_name_collision:
5778 raise ValueError, matches
5782 if not mykey and not isinstance(mydb, list):
5783 if virts_p.has_key(myp):
5784 mykey=virts_p[myp][0]
5785 #again, we only perform virtual expansion if we have a dbapi (not a list)
5789 if mysplit[2]=="r0":
5790 return mykey+"-"+mysplit[1]
5792 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
5796 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
5797 from portage.util import grablines
5798 if settings is None:
5799 settings = globals()["settings"]
5801 portdb = globals()["portdb"]
5802 mysplit = catpkgsplit(mycpv)
5804 raise ValueError("invalid CPV: %s" % mycpv)
5805 if metadata is None:
5806 db_keys = list(portdb._aux_cache_keys)
5808 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
5810 if not portdb.cpv_exists(mycpv):
5812 if metadata is None:
5813 # Can't access SLOT due to corruption.
5814 cpv_slot_list = [mycpv]
5816 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
5817 mycp=mysplit[0]+"/"+mysplit[1]
5819 # XXX- This is a temporary duplicate of code from the config constructor.
5820 locations = [os.path.join(settings["PORTDIR"], "profiles")]
5821 locations.extend(settings.profiles)
5822 for ov in settings["PORTDIR_OVERLAY"].split():
5823 profdir = os.path.join(normalize_path(ov), "profiles")
5824 if os.path.isdir(profdir):
5825 locations.append(profdir)
5826 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
5827 USER_CONFIG_PATH.lstrip(os.path.sep)))
5829 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
5831 if settings.pmaskdict.has_key(mycp):
5832 for x in settings.pmaskdict[mycp]:
5833 if match_from_list(x, cpv_slot_list):
5837 for pmask in pmasklists:
5838 pmask_filename = os.path.join(pmask[0], "package.mask")
5839 for i in xrange(len(pmask[1])):
5840 l = pmask[1][i].strip()
5846 comment_valid = i + 1
5848 if comment_valid != i:
5851 return (comment, pmask_filename)
5854 elif comment_valid != -1:
5855 # Apparently this comment applies to muliple masks, so
5856 # it remains valid until a blank line is encountered.
5863 def getmaskingstatus(mycpv, settings=None, portdb=None):
5864 if settings is None:
5865 settings = config(clone=globals()["settings"])
5867 portdb = globals()["portdb"]
5871 if not isinstance(mycpv, basestring):
5872 # emerge passed in a Package instance
5875 metadata = pkg.metadata
5876 installed = pkg.installed
5878 mysplit = catpkgsplit(mycpv)
5880 raise ValueError("invalid CPV: %s" % mycpv)
5881 if metadata is None:
5882 db_keys = list(portdb._aux_cache_keys)
5884 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
5886 if not portdb.cpv_exists(mycpv):
5888 return ["corruption"]
5889 if "?" in metadata["LICENSE"]:
5890 settings.setcpv(p, mydb=metadata)
5891 metadata["USE"] = settings["PORTAGE_USE"]
5893 metadata["USE"] = ""
5894 mycp=mysplit[0]+"/"+mysplit[1]
5899 if settings.getProfileMaskAtom(mycpv, metadata):
5900 rValue.append("profile")
5902 # package.mask checking
5903 if settings.getMaskAtom(mycpv, metadata):
5904 rValue.append("package.mask")
5907 eapi = metadata["EAPI"]
5908 mygroups = metadata["KEYWORDS"]
5909 licenses = metadata["LICENSE"]
5910 slot = metadata["SLOT"]
5911 if eapi.startswith("-"):
5913 if not eapi_is_supported(eapi):
5914 return ["EAPI %s" % eapi]
5915 egroups = settings.configdict["backupenv"].get(
5916 "ACCEPT_KEYWORDS", "").split()
5917 mygroups = mygroups.split()
5918 pgroups = settings["ACCEPT_KEYWORDS"].split()
5919 myarch = settings["ARCH"]
5920 if pgroups and myarch not in pgroups:
5921 """For operating systems other than Linux, ARCH is not necessarily a
5923 myarch = pgroups[0].lstrip("~")
5925 cp = dep_getkey(mycpv)
5926 pkgdict = settings.pkeywordsdict.get(cp)
5929 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
5930 for atom, pkgkeywords in pkgdict.iteritems():
5931 if match_from_list(atom, cpv_slot_list):
5933 pgroups.extend(pkgkeywords)
5934 if matches or egroups:
5935 pgroups.extend(egroups)
5938 if x.startswith("-"):
5942 inc_pgroups.discard(x[1:])
5945 pgroups = inc_pgroups
5950 for keyword in pgroups:
5951 if keyword in mygroups:
5960 elif gp=="-"+myarch and myarch in pgroups:
5963 elif gp=="~"+myarch and myarch in pgroups:
5967 # Assume that the user doesn't want to be bothered about
5968 # KEYWORDS of packages that are already installed.
5969 if kmask and not installed:
5970 rValue.append(kmask+" keyword")
5973 missing_licenses = settings.getMissingLicenses(mycpv, metadata)
5974 if missing_licenses:
5975 allowed_tokens = set(["||", "(", ")"])
5976 allowed_tokens.update(missing_licenses)
5977 license_split = licenses.split()
5978 license_split = [x for x in license_split \
5979 if x in allowed_tokens]
5980 msg = license_split[:]
5981 msg.append("license(s)")
5982 rValue.append(" ".join(msg))
5983 except portage.exception.InvalidDependString, e:
5984 rValue.append("LICENSE: "+str(e))
5990 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
5991 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
5992 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
5993 'PDEPEND', 'PROVIDE', 'EAPI',
5994 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5995 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5997 auxdbkeylen=len(auxdbkeys)
5999 from portage.dbapi import dbapi
6000 from portage.dbapi.virtual import fakedbapi
6001 from portage.dbapi.bintree import bindbapi, binarytree
6002 from portage.dbapi.vartree import vardbapi, vartree, dblink
6003 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
6005 class FetchlistDict(UserDict.DictMixin):
6006 """This provide a mapping interface to retrieve fetch lists. It's used
6007 to allow portage.manifest.Manifest to access fetch lists via a standard
6008 mapping interface rather than use the dbapi directly."""
6009 def __init__(self, pkgdir, settings, mydbapi):
6010 """pkgdir is a directory containing ebuilds and settings is passed into
6011 portdbapi.getfetchlist for __getitem__ calls."""
6012 self.pkgdir = pkgdir
6013 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
6014 self.settings = settings
6015 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
6016 self.portdb = mydbapi
6017 def __getitem__(self, pkg_key):
6018 """Returns the complete fetch list for a given package."""
6019 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
6020 all=True, mytree=self.mytree)[1]
6021 def __contains__(self):
6022 return pkg_key in self.keys()
6023 def has_key(self, pkg_key):
6024 """Returns true if the given package exists within pkgdir."""
6025 return pkg_key in self
6027 """Returns keys for all packages within pkgdir"""
6028 return self.portdb.cp_list(self.cp, mytree=self.mytree)
6030 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
6031 """will merge a .tbz2 file, returning a list of runtime dependencies
6032 that must be satisfied, or None if there was a merge error. This
6033 code assumes the package exists."""
6036 mydbapi = db[myroot]["bintree"].dbapi
6038 vartree = db[myroot]["vartree"]
6039 if mytbz2[-5:]!=".tbz2":
6040 print "!!! Not a .tbz2 file"
6044 builddir_lock = None
6048 did_merge_phase = False
6051 """ Don't lock the tbz2 file because the filesytem could be readonly or
6052 shared by a cluster."""
6053 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
6055 mypkg = os.path.basename(mytbz2)[:-5]
6056 xptbz2 = portage.xpak.tbz2(mytbz2)
6057 mycat = xptbz2.getfile("CATEGORY")
6059 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
6062 mycat = mycat.strip()
6064 # These are the same directories that would be used at build time.
6065 builddir = os.path.join(
6066 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
6067 catdir = os.path.dirname(builddir)
6068 pkgloc = os.path.join(builddir, "image")
6069 infloc = os.path.join(builddir, "build-info")
6070 myebuild = os.path.join(
6071 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
6072 portage.util.ensure_dirs(os.path.dirname(catdir),
6073 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6074 catdir_lock = portage.locks.lockdir(catdir)
6075 portage.util.ensure_dirs(catdir,
6076 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6077 builddir_lock = portage.locks.lockdir(builddir)
6079 portage.locks.unlockdir(catdir_lock)
6083 shutil.rmtree(builddir)
6084 except (IOError, OSError), e:
6085 if e.errno != errno.ENOENT:
6088 for mydir in (builddir, pkgloc, infloc):
6089 portage.util.ensure_dirs(mydir, uid=portage_uid,
6090 gid=portage_gid, mode=0755)
6091 writemsg_stdout(">>> Extracting info\n")
6092 xptbz2.unpackinfo(infloc)
6093 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
6094 # Store the md5sum in the vdb.
6095 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
6096 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
6099 # This gives bashrc users an opportunity to do various things
6100 # such as remove binary packages after they're installed.
6101 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
6102 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
6103 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
6105 # Eventually we'd like to pass in the saved ebuild env here.
6106 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
6107 tree="bintree", mydbapi=mydbapi, vartree=vartree)
6108 if retval != os.EX_OK:
6109 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
6112 writemsg_stdout(">>> Extracting %s\n" % mypkg)
6113 retval = portage.process.spawn_bash(
6114 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
6115 env=mysettings.environ())
6116 if retval != os.EX_OK:
6117 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
6119 #portage.locks.unlockfile(tbz2_lock)
6122 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
6124 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
6125 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6126 did_merge_phase = True
6127 success = retval == os.EX_OK
6130 mysettings.pop("PORTAGE_BINPKG_FILE", None)
6132 portage.locks.unlockfile(tbz2_lock)
6134 if not did_merge_phase:
6135 # The merge phase handles this already. Callers don't know how
6136 # far this function got, so we have to call elog_process() here
6137 # so that it's only called once.
6138 from portage.elog import elog_process
6139 elog_process(mycat + "/" + mypkg, mysettings)
6142 shutil.rmtree(builddir)
6143 except (IOError, OSError), e:
6144 if e.errno != errno.ENOENT:
6147 portage.locks.unlockdir(builddir_lock)
6150 # Lock catdir for removal if empty.
6151 catdir_lock = portage.locks.lockdir(catdir)
6157 if e.errno not in (errno.ENOENT,
6158 errno.ENOTEMPTY, errno.EEXIST):
6161 portage.locks.unlockdir(catdir_lock)
6163 def deprecated_profile_check():
6164 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
6166 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
6167 dcontent = deprecatedfile.readlines()
6168 deprecatedfile.close()
6169 newprofile = dcontent[0]
6170 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
6172 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
6174 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
6175 if len(dcontent) > 1:
6176 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
6177 for myline in dcontent[1:]:
6178 writemsg(myline, noiselevel=-1)
6179 writemsg("\n\n", noiselevel=-1)
6182 # gets virtual package settings
6183 def getvirtuals(myroot):
6185 writemsg("--- DEPRECATED call to getvirtual\n")
6186 return settings.getvirtuals(myroot)
6188 def commit_mtimedb(mydict=None, filename=None):
6191 if "mtimedb" not in globals() or mtimedb is None:
6195 if filename is None:
6197 filename = mtimedbfile
6198 mydict["version"] = VERSION
6199 d = {} # for full backward compat, pickle it as a plain dict object.
6202 f = atomic_ofstream(filename)
6203 cPickle.dump(d, f, -1)
6205 portage.util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
6206 except (IOError, OSError), e:
6210 global uid,portage_gid,portdb,db
6211 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
6212 close_portdbapi_caches()
6215 atexit_register(portageexit)
6217 def _global_updates(trees, prev_mtimes):
6219 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
6221 @param trees: A dictionary containing portage trees.
6223 @param prev_mtimes: A dictionary containing mtimes of files located in
6224 $PORTDIR/profiles/updates/.
6225 @type prev_mtimes: dict
6226 @rtype: None or List
6227 @return: None if no were no updates, otherwise a list of update commands
6228 that have been performed.
6230 # only do this if we're root and not running repoman/ebuild digest
6232 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
6234 mysettings = trees["/"]["vartree"].settings
6235 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
6238 if mysettings["PORTAGE_CALLER"] == "fixpackages":
6239 update_data = grab_updates(updpath)
6241 update_data = grab_updates(updpath, prev_mtimes)
6242 except portage.exception.DirectoryNotFound:
6243 writemsg("--- 'profiles/updates' is empty or " + \
6244 "not available. Empty portage tree?\n", noiselevel=1)
6247 if len(update_data) > 0:
6248 do_upgrade_packagesmessage = 0
6251 for mykey, mystat, mycontent in update_data:
6252 writemsg_stdout("\n\n")
6253 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
6254 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
6255 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
6256 valid_updates, errors = parse_updates(mycontent)
6257 myupd.extend(valid_updates)
6258 writemsg_stdout(len(valid_updates) * "." + "\n")
6259 if len(errors) == 0:
6260 # Update our internal mtime since we
6261 # processed all of our directives.
6262 timestamps[mykey] = long(mystat.st_mtime)
6265 writemsg("%s\n" % msg, noiselevel=-1)
6267 update_config_files("/",
6268 mysettings.get("CONFIG_PROTECT","").split(),
6269 mysettings.get("CONFIG_PROTECT_MASK","").split(),
6272 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
6273 settings=mysettings)
6274 vardb = trees["/"]["vartree"].dbapi
6275 bindb = trees["/"]["bintree"].dbapi
6276 if not os.access(bindb.bintree.pkgdir, os.W_OK):
6278 for update_cmd in myupd:
6279 if update_cmd[0] == "move":
6280 moves = vardb.move_ent(update_cmd)
6282 writemsg_stdout(moves * "@")
6284 moves = bindb.move_ent(update_cmd)
6286 writemsg_stdout(moves * "%")
6287 elif update_cmd[0] == "slotmove":
6288 moves = vardb.move_slot_ent(update_cmd)
6290 writemsg_stdout(moves * "s")
6292 moves = bindb.move_slot_ent(update_cmd)
6294 writemsg_stdout(moves * "S")
6296 # The above global updates proceed quickly, so they
6297 # are considered a single mtimedb transaction.
6298 if len(timestamps) > 0:
6299 # We do not update the mtime in the mtimedb
6300 # until after _all_ of the above updates have
6301 # been processed because the mtimedb will
6302 # automatically commit when killed by ctrl C.
6303 for mykey, mtime in timestamps.iteritems():
6304 prev_mtimes[mykey] = mtime
6306 # We gotta do the brute force updates for these now.
6307 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
6308 "fixpackages" in mysettings.features:
6309 def onProgress(maxval, curval):
6310 writemsg_stdout("*")
6311 vardb.update_ents(myupd, onProgress=onProgress)
6313 bindb.update_ents(myupd, onProgress=onProgress)
6315 do_upgrade_packagesmessage = 1
6317 # Update progress above is indicated by characters written to stdout so
6318 # we print a couple new lines here to separate the progress output from
6323 if do_upgrade_packagesmessage and bindb and \
6325 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
6326 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
6327 writemsg_stdout("\n")
6331 #continue setting up other trees
6333 class MtimeDB(dict):
6334 def __init__(self, filename):
6336 self.filename = filename
6337 self._load(filename)
6339 def _load(self, filename):
6342 mypickle = cPickle.Unpickler(f)
6343 mypickle.find_global = None
6347 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
6351 d["updates"] = d["old"]
6356 d.setdefault("starttime", 0)
6357 d.setdefault("version", "")
6358 for k in ("info", "ldpath", "updates"):
6361 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
6362 "starttime", "updates", "version"))
6365 if k not in mtimedbkeys:
6366 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
6369 self._clean_data = copy.deepcopy(d)
6372 if not self.filename:
6376 # Only commit if the internal state has changed.
6377 if d != self._clean_data:
6378 commit_mtimedb(mydict=d, filename=self.filename)
6379 self._clean_data = copy.deepcopy(d)
6381 def create_trees(config_root=None, target_root=None, trees=None):
6385 # clean up any existing portdbapi instances
6386 for myroot in trees:
6387 portdb = trees[myroot]["porttree"].dbapi
6388 portdb.close_caches()
6389 portdbapi.portdbapi_instances.remove(portdb)
6390 del trees[myroot]["porttree"], myroot, portdb
6392 settings = config(config_root=config_root, target_root=target_root,
6393 config_incrementals=portage.const.INCREMENTALS)
6396 myroots = [(settings["ROOT"], settings)]
6397 if settings["ROOT"] != "/":
6398 settings = config(config_root=None, target_root=None,
6399 config_incrementals=portage.const.INCREMENTALS)
6400 # When ROOT != "/" we only want overrides from the calling
6401 # environment to apply to the config that's associated
6402 # with ROOT != "/", so we wipe out the "backupenv" for the
6403 # config that is associated with ROOT == "/" and regenerate
6404 # it's incrementals.
6405 # Preserve backupenv values that are initialized in the config
6406 # constructor. Also, preserve XARGS since it is set by the
6407 # portage.data module.
6409 backupenv_whitelist = settings._environ_whitelist
6410 backupenv = settings.configdict["backupenv"]
6411 env_d = settings.configdict["env.d"]
6412 for k, v in os.environ.iteritems():
6413 if k in backupenv_whitelist:
6416 v == backupenv.get(k):
6417 backupenv.pop(k, None)
6418 settings.regenerate()
6420 myroots.append((settings["ROOT"], settings))
6422 for myroot, mysettings in myroots:
6423 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, None))
6424 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
6425 trees[myroot].addLazySingleton(
6426 "vartree", vartree, myroot, categories=mysettings.categories,
6427 settings=mysettings)
6428 trees[myroot].addLazySingleton("porttree",
6429 portagetree, myroot, settings=mysettings)
6430 trees[myroot].addLazySingleton("bintree",
6431 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
6434 # Initialization of legacy globals. No functions/classes below this point
6435 # please! When the above functions and classes become independent of the
6436 # below global variables, it will be possible to make the below code
6437 # conditional on a backward compatibility flag (backward compatibility could
6438 # be disabled via an environment variable, for example). This will enable new
6439 # code that is aware of this flag to import portage without the unnecessary
6440 # overhead (and other issues!) of initializing the legacy globals.
6442 def init_legacy_globals():
6443 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
6444 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
6445 profiledir, flushmtimedb
6447 # Portage needs to ensure a sane umask for the files it creates.
6451 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
6452 kwargs[k] = os.environ.get(envvar, "/")
6454 global _initializing_globals
6455 _initializing_globals = True
6456 db = create_trees(**kwargs)
6457 del _initializing_globals
6459 settings = db["/"]["vartree"].settings
6460 portdb = db["/"]["porttree"].dbapi
6464 settings = db[myroot]["vartree"].settings
6465 portdb = db[myroot]["porttree"].dbapi
6468 root = settings["ROOT"]
6470 mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
6471 mtimedb = MtimeDB(mtimedbfile)
6473 # ========================================================================
6475 # These attributes should not be used
6476 # within Portage under any circumstances.
6477 # ========================================================================
6478 archlist = settings.archlist()
6479 features = settings.features
6480 groups = settings["ACCEPT_KEYWORDS"].split()
6481 pkglines = settings.packages
6482 selinux_enabled = settings.selinux_enabled()
6483 thirdpartymirrors = settings.thirdpartymirrors()
6484 usedefaults = settings.use_defs
6486 if os.path.isdir(PROFILE_PATH):
6487 profiledir = PROFILE_PATH
6488 def flushmtimedb(record):
6489 writemsg("portage.flushmtimedb() is DEPRECATED\n")
6490 # ========================================================================
6492 # These attributes should not be used
6493 # within Portage under any circumstances.
6494 # ========================================================================
6497 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
6498 # use within Portage. External use of this variable is unsupported because
6499 # it is experimental and it's behavior is likely to change.
6500 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
6501 init_legacy_globals()
6506 # ============================================================================
6507 # ============================================================================