1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
23 import cPickle as pickle
29 from time import sleep
30 from random import shuffle
32 from itertools import chain, izip
35 except ImportError, e:
36 sys.stderr.write("\n\n")
37 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
38 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
39 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
41 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
42 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
43 sys.stderr.write(" "+str(e)+"\n\n");
47 if platform.system() in ["FreeBSD"]:
50 def _chflags(path, flags, opts=""):
51 cmd = "chflags %s %o '%s'" % (opts, flags, path)
52 status, output = commands.getstatusoutput(cmd)
53 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
55 # Try to generate an ENOENT error if appropriate.
60 # Make sure the binary exists.
61 if not portage.process.find_binary("chflags"):
62 raise portage.exception.CommandNotFound("chflags")
63 # Now we're not sure exactly why it failed or what
64 # the real errno was, so just report EPERM.
65 e = OSError(errno.EPERM, output)
70 def _lchflags(path, flags):
71 return _chflags(path, flags, opts="-h")
72 bsd_chflags.chflags = _chflags
73 bsd_chflags.lchflags = _lchflags
76 from portage.cache.cache_errors import CacheError
77 import portage.cvstree
79 import portage.getbinpkg
81 from portage.dep import dep_getcpv, dep_getkey, get_operator, \
82 isjustname, isspecific, isvalidatom, \
83 match_from_list, match_to_list, best_match_to_list
85 # XXX: This needs to get cleaned up.
87 from portage.output import bold, colorize, green, red, yellow
90 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
91 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
92 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
93 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
94 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
95 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
96 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
97 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
99 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
100 portage_uid, portage_gid, userpriv_groups
101 from portage.manifest import Manifest
104 from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
105 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
106 map_dictlist_vals, new_protect_filename, normalize_path, \
107 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
108 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
109 import portage.exception
111 import portage.process
112 from portage.process import atexit_register, run_exitfuncs
113 from portage.locks import unlockfile,unlockdir,lockfile,lockdir
114 import portage.checksum
115 from portage.checksum import perform_md5,perform_checksum,prelink_capable
116 import portage.eclass_cache
117 from portage.localization import _
118 from portage.update import dep_transform, fixdbentries, grab_updates, \
119 parse_updates, update_config_files, update_dbentries, update_dbentry
121 # Need these functions directly in portage namespace to not break every external tool in existence
122 from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \
123 pkgsplit, vercmp, ververify
125 # endversion and endversion_keys are for backward compatibility only.
126 from portage.versions import endversion_keys
127 from portage.versions import suffix_value as endversion
129 except ImportError, e:
130 sys.stderr.write("\n\n")
131 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
132 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
133 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
134 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
135 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
136 sys.stderr.write("!!! a recovery of portage.\n")
137 sys.stderr.write(" "+str(e)+"\n\n")
142 import portage._selinux as selinux
144 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
149 # ===========================================================================
150 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
151 # ===========================================================================
155 modname = ".".join(name.split(".")[:-1])
156 mod = __import__(modname)
157 components = name.split('.')
158 for comp in components[1:]:
159 mod = getattr(mod, comp)
162 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
164 if x in top_dict and key in top_dict[x]:
166 return copy.deepcopy(top_dict[x][key])
168 return top_dict[x][key]
172 raise KeyError("Key not found in list; '%s'" % key)
175 "this fixes situations where the current directory doesn't exist"
178 except OSError: #dir doesn't exist
183 def abssymlink(symlink):
184 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
185 mylink=os.readlink(symlink)
187 mydir=os.path.dirname(symlink)
188 mylink=mydir+"/"+mylink
189 return os.path.normpath(mylink)
195 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
196 global cacheHit,cacheMiss,cacheStale
197 mypath = normalize_path(my_original_path)
198 if mypath in dircache:
200 cached_mtime, list, ftype = dircache[mypath]
203 cached_mtime, list, ftype = -1, [], []
205 pathstat = os.stat(mypath)
206 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
207 mtime = pathstat.st_mtime
209 raise portage.exception.DirectoryNotFound(mypath)
210 except EnvironmentError, e:
211 if e.errno == portage.exception.PermissionDenied.errno:
212 raise portage.exception.PermissionDenied(mypath)
217 except portage.exception.PortageException:
221 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
222 if mtime != cached_mtime or time.time() - mtime < 4:
223 if mypath in dircache:
226 list = os.listdir(mypath)
227 except EnvironmentError, e:
228 if e.errno != errno.EACCES:
231 raise portage.exception.PermissionDenied(mypath)
236 pathstat = os.stat(mypath+"/"+x)
238 pathstat = os.lstat(mypath+"/"+x)
240 if stat.S_ISREG(pathstat[stat.ST_MODE]):
242 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
244 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
248 except (IOError, OSError):
250 dircache[mypath] = mtime, list, ftype
254 for x in range(0, len(list)):
255 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
256 ret_list.append(list[x])
257 ret_ftype.append(ftype[x])
258 elif (list[x] not in ignorelist):
259 ret_list.append(list[x])
260 ret_ftype.append(ftype[x])
262 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
263 return ret_list, ret_ftype
265 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
266 EmptyOnError=False, dirsonly=False):
268 Portage-specific implementation of os.listdir
270 @param mypath: Path whose contents you wish to list
272 @param recursive: Recursively scan directories contained within mypath
273 @type recursive: Boolean
274 @param filesonly; Only return files, not more directories
275 @type filesonly: Boolean
276 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
277 @type ignorecvs: Boolean
278 @param ignorelist: List of filenames/directories to exclude
279 @type ignorelist: List
280 @param followSymlinks: Follow Symlink'd files and directories
281 @type followSymlinks: Boolean
282 @param EmptyOnError: Return [] if an error occurs.
283 @type EmptyOnError: Boolean
284 @param dirsonly: Only return directories.
285 @type dirsonly: Boolean
287 @returns: A list of files and directories (or just files or just directories) or an empty list.
290 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
297 if not (filesonly or dirsonly or recursive):
303 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
304 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
308 for y in range(0,len(l)):
309 l[y]=list[x]+"/"+l[y]
315 for x in range(0,len(ftype)):
317 rlist=rlist+[list[x]]
320 for x in range(0, len(ftype)):
322 rlist = rlist + [list[x]]
328 def flatten(mytokens):
329 """this function now turns a [1,[2,3]] list into
330 a [1,2,3] list and returns it."""
333 if isinstance(x, list):
334 newlist.extend(flatten(x))
339 #beautiful directed graph object
341 class digraph(object):
343 """Create an empty digraph"""
345 # { node : ( { child : priority } , { parent : priority } ) }
349 def add(self, node, parent, priority=0):
350 """Adds the specified node with the specified parent.
352 If the dep is a soft-dep and the node already has a hard
353 relationship to the parent, the relationship is left as hard."""
355 if node not in self.nodes:
356 self.nodes[node] = ({}, {}, node)
357 self.order.append(node)
362 if parent not in self.nodes:
363 self.nodes[parent] = ({}, {}, parent)
364 self.order.append(parent)
366 if parent in self.nodes[node][1]:
367 if priority > self.nodes[node][1][parent]:
368 self.nodes[node][1][parent] = priority
370 self.nodes[node][1][parent] = priority
372 if node in self.nodes[parent][0]:
373 if priority > self.nodes[parent][0][node]:
374 self.nodes[parent][0][node] = priority
376 self.nodes[parent][0][node] = priority
378 def remove(self, node):
379 """Removes the specified node from the digraph, also removing
380 and ties to other nodes in the digraph. Raises KeyError if the
381 node doesn't exist."""
383 if node not in self.nodes:
386 for parent in self.nodes[node][1]:
387 del self.nodes[parent][0][node]
388 for child in self.nodes[node][0]:
389 del self.nodes[child][1][node]
392 self.order.remove(node)
394 def difference_update(self, t):
396 Remove all given nodes from node_set. This is more efficient
397 than multiple calls to the remove() method.
399 if isinstance(t, (list, tuple)) or \
400 not hasattr(t, "__contains__"):
403 for node in self.order:
407 for parent in self.nodes[node][1]:
408 del self.nodes[parent][0][node]
409 for child in self.nodes[node][0]:
410 del self.nodes[child][1][node]
414 def remove_edge(self, child, parent):
416 Remove edge in the direction from child to parent. Note that it is
417 possible for a remaining edge to exist in the opposite direction.
418 Any endpoint vertices that become isolated will remain in the graph.
421 # Nothing should be modified when a KeyError is raised.
422 for k in parent, child:
423 if k not in self.nodes:
426 # Make sure the edge exists.
427 if child not in self.nodes[parent][0]:
428 raise KeyError(child)
429 if parent not in self.nodes[child][1]:
430 raise KeyError(parent)
433 del self.nodes[child][1][parent]
434 del self.nodes[parent][0][child]
437 return iter(self.order)
439 def contains(self, node):
440 """Checks if the digraph contains mynode"""
441 return node in self.nodes
443 def get(self, key, default=None):
444 node_data = self.nodes.get(key, self)
445 if node_data is self:
450 """Return a list of all nodes in the graph"""
453 def child_nodes(self, node, ignore_priority=None):
454 """Return all children of the specified node"""
455 if ignore_priority is None:
456 return self.nodes[node][0].keys()
458 for child, priority in self.nodes[node][0].iteritems():
459 if priority > ignore_priority:
460 children.append(child)
463 def parent_nodes(self, node):
464 """Return all parents of the specified node"""
465 return self.nodes[node][1].keys()
467 def leaf_nodes(self, ignore_priority=None):
468 """Return all nodes that have no children
470 If ignore_soft_deps is True, soft deps are not counted as
471 children in calculations."""
474 for node in self.order:
476 for child in self.nodes[node][0]:
477 if self.nodes[node][0][child] > ignore_priority:
481 leaf_nodes.append(node)
484 def root_nodes(self, ignore_priority=None):
485 """Return all nodes that have no parents.
487 If ignore_soft_deps is True, soft deps are not counted as
488 parents in calculations."""
491 for node in self.order:
493 for parent in self.nodes[node][1]:
494 if self.nodes[node][1][parent] > ignore_priority:
498 root_nodes.append(node)
502 """Checks if the digraph is empty"""
503 return len(self.nodes) == 0
508 for k, v in self.nodes.iteritems():
509 clone.nodes[k] = (v[0].copy(), v[1].copy(), v[2])
510 clone.order = self.order[:]
513 # Backward compatibility
516 allzeros = leaf_nodes
518 __contains__ = contains
522 def delnode(self, node):
529 leaf_nodes = self.leaf_nodes()
534 def hasallzeros(self, ignore_priority=None):
535 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
538 def debug_print(self):
539 for node in self.nodes:
541 if self.nodes[node][0]:
544 print "(no children)"
545 for child in self.nodes[node][0]:
547 print "(%s)" % self.nodes[node][0][child]
549 #parse /etc/env.d and generate /etc/profile.env
551 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
552 env=None, writemsg_level=portage.util.writemsg_level):
553 if target_root is None:
555 target_root = settings["ROOT"]
556 if prev_mtimes is None:
558 prev_mtimes = mtimedb["ldpath"]
561 envd_dir = os.path.join(target_root, "etc", "env.d")
562 portage.util.ensure_dirs(envd_dir, mode=0755)
563 fns = listdir(envd_dir, EmptyOnError=1)
569 if not x[0].isdigit() or not x[1].isdigit():
571 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
577 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
578 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
579 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
580 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
581 "PYTHONPATH", "ROOTPATH"])
586 file_path = os.path.join(envd_dir, x)
588 myconfig = getconfig(file_path, expand=False)
589 except portage.exception.ParseError, e:
590 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
594 # broken symlink or file removed by a concurrent process
595 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
597 config_list.append(myconfig)
598 if "SPACE_SEPARATED" in myconfig:
599 space_separated.update(myconfig["SPACE_SEPARATED"].split())
600 del myconfig["SPACE_SEPARATED"]
601 if "COLON_SEPARATED" in myconfig:
602 colon_separated.update(myconfig["COLON_SEPARATED"].split())
603 del myconfig["COLON_SEPARATED"]
607 for var in space_separated:
609 for myconfig in config_list:
611 for item in myconfig[var].split():
612 if item and not item in mylist:
614 del myconfig[var] # prepare for env.update(myconfig)
616 env[var] = " ".join(mylist)
617 specials[var] = mylist
619 for var in colon_separated:
621 for myconfig in config_list:
623 for item in myconfig[var].split(":"):
624 if item and not item in mylist:
626 del myconfig[var] # prepare for env.update(myconfig)
628 env[var] = ":".join(mylist)
629 specials[var] = mylist
631 for myconfig in config_list:
632 """Cumulative variables have already been deleted from myconfig so that
633 they won't be overwritten by this dict.update call."""
636 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
638 myld = open(ldsoconf_path)
639 myldlines=myld.readlines()
643 #each line has at least one char (a newline)
647 except (IOError, OSError), e:
648 if e.errno != errno.ENOENT:
652 ld_cache_update=False
654 newld = specials["LDPATH"]
656 #ld.so.conf needs updating and ldconfig needs to be run
657 myfd = atomic_ofstream(ldsoconf_path)
658 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
659 myfd.write("# contents of /etc/env.d directory\n")
660 for x in specials["LDPATH"]:
665 # Update prelink.conf if we are prelink-enabled
667 newprelink = atomic_ofstream(
668 os.path.join(target_root, "etc", "prelink.conf"))
669 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
670 newprelink.write("# contents of /etc/env.d directory\n")
672 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
673 newprelink.write("-l "+x+"\n");
674 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
680 for y in specials["PRELINK_PATH_MASK"]:
689 newprelink.write("-h "+x+"\n")
690 for x in specials["PRELINK_PATH_MASK"]:
691 newprelink.write("-b "+x+"\n")
694 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
695 # granularity is possible. In order to avoid the potential ambiguity of
696 # mtimes that differ by less than 1 second, sleep here if any of the
697 # directories have been modified during the current second.
698 sleep_for_mtime_granularity = False
699 current_time = long(time.time())
700 mtime_changed = False
702 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
703 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
705 newldpathtime = long(os.stat(x).st_mtime)
706 lib_dirs.add(normalize_path(x))
708 if oe.errno == errno.ENOENT:
713 # ignore this path because it doesn't exist
716 if newldpathtime == current_time:
717 sleep_for_mtime_granularity = True
719 if prev_mtimes[x] == newldpathtime:
722 prev_mtimes[x] = newldpathtime
725 prev_mtimes[x] = newldpathtime
729 ld_cache_update = True
732 not ld_cache_update and \
733 contents is not None:
734 libdir_contents_changed = False
735 for mypath, mydata in contents.iteritems():
736 if mydata[0] not in ("obj","sym"):
738 head, tail = os.path.split(mypath)
740 libdir_contents_changed = True
742 if not libdir_contents_changed:
745 ldconfig = "/sbin/ldconfig"
746 if "CHOST" in env and "CBUILD" in env and \
747 env["CHOST"] != env["CBUILD"]:
748 from portage.process import find_binary
749 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
751 # Only run ldconfig as needed
752 if (ld_cache_update or makelinks) and ldconfig:
753 # ldconfig has very different behaviour between FreeBSD and Linux
754 if ostype=="Linux" or ostype.lower().endswith("gnu"):
755 # We can't update links if we haven't cleaned other versions first, as
756 # an older package installed ON TOP of a newer version will cause ldconfig
757 # to overwrite the symlinks we just made. -X means no links. After 'clean'
758 # we can safely create links.
759 writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \
762 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
764 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
765 elif ostype in ("FreeBSD","DragonFly"):
766 writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
768 os.system(("cd / ; %s -elf -i " + \
769 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
770 (ldconfig, target_root, target_root))
772 del specials["LDPATH"]
774 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
775 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
776 cenvnotice = penvnotice[:]
777 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
778 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
780 #create /etc/profile.env for bash support
781 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
782 outfile.write(penvnotice)
784 env_keys = [ x for x in env if x != "LDPATH" ]
788 if v.startswith('$') and not v.startswith('${'):
789 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
791 outfile.write("export %s='%s'\n" % (k, v))
794 #create /etc/csh.env for (t)csh support
795 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
796 outfile.write(cenvnotice)
798 outfile.write("setenv %s '%s'\n" % (x, env[x]))
801 if sleep_for_mtime_granularity:
802 while current_time == long(time.time()):
805 def ExtractKernelVersion(base_dir):
807 Try to figure out what kernel version we are running
808 @param base_dir: Path to sources (usually /usr/src/linux)
809 @type base_dir: string
810 @rtype: tuple( version[string], error[string])
812 1. tuple( version[string], error[string])
813 Either version or error is populated (but never both)
817 pathname = os.path.join(base_dir, 'Makefile')
819 f = open(pathname, 'r')
820 except OSError, details:
821 return (None, str(details))
822 except IOError, details:
823 return (None, str(details))
827 lines.append(f.readline())
828 except OSError, details:
829 return (None, str(details))
830 except IOError, details:
831 return (None, str(details))
833 lines = [l.strip() for l in lines]
837 #XXX: The following code relies on the ordering of vars within the Makefile
839 # split on the '=' then remove annoying whitespace
840 items = line.split("=")
841 items = [i.strip() for i in items]
842 if items[0] == 'VERSION' or \
843 items[0] == 'PATCHLEVEL':
846 elif items[0] == 'SUBLEVEL':
848 elif items[0] == 'EXTRAVERSION' and \
849 items[-1] != items[0]:
852 # Grab a list of files named localversion* and sort them
853 localversions = os.listdir(base_dir)
854 for x in range(len(localversions)-1,-1,-1):
855 if localversions[x][:12] != "localversion":
859 # Append the contents of each to the version string, stripping ALL whitespace
860 for lv in localversions:
861 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
863 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
864 kernelconfig = getconfig(base_dir+"/.config")
865 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
866 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
868 return (version,None)
870 def autouse(myvartree, use_cache=1, mysettings=None):
872 autuse returns a list of USE variables auto-enabled to packages being installed
874 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
875 @type myvartree: vartree
876 @param use_cache: read values from cache
877 @type use_cache: Boolean
878 @param mysettings: Instance of config
879 @type mysettings: config
881 @returns: A string containing a list of USE variables that are enabled via use.defaults
883 if mysettings is None:
885 mysettings = settings
886 if mysettings.profile_path is None:
889 usedefaults = mysettings.use_defs
890 for myuse in usedefaults:
892 for mydep in usedefaults[myuse]:
893 if not myvartree.dep_match(mydep,use_cache=True):
897 myusevars += " "+myuse
900 def check_config_instance(test):
901 if not isinstance(test, config):
902 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
904 class config(object):
906 This class encompasses the main portage configuration. Data is pulled from
907 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
908 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
911 Generally if you need data like USE flags, FEATURES, environment variables,
912 virtuals ...etc you look in here.
915 _environ_whitelist = []
917 # Whitelisted variables are always allowed to enter the ebuild
918 # environment. Generally, this only includes special portage
919 # variables. Ebuilds can unset variables that are not whitelisted
920 # and rely on them remaining unset for future phases, without them
921 # leaking back in from various locations (bug #189417). It's very
922 # important to set our special BASH_ENV variable in the ebuild
923 # environment in order to prevent sandbox from sourcing /etc/profile
924 # in it's bashrc (causing major leakage).
925 _environ_whitelist += [
926 "BASH_ENV", "BUILD_PREFIX", "D",
927 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
928 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
929 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
930 "FEATURES", "FILESDIR", "HOME", "PATH",
932 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
933 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
935 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
936 "PORTAGE_BINPKG_TMPFILE",
938 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
939 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
940 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
942 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
943 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
944 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
945 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
946 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
947 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
948 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
949 "USE_EXPAND", "USE_ORDER", "WORKDIR",
953 # user config variables
954 _environ_whitelist += [
955 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
958 _environ_whitelist += [
959 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
962 # misc variables inherited from the calling environment
963 _environ_whitelist += [
964 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
965 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
966 "TERM", "TERMCAP", "USER",
969 # other variables inherited from the calling environment
970 _environ_whitelist += [
971 "CVS_RSH", "ECHANGELOG_USER",
973 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
974 "STY", "WINDOW", "XAUTHORITY",
977 _environ_whitelist = frozenset(_environ_whitelist)
979 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
981 # Filter selected variables in the config.environ() method so that
982 # they don't needlessly propagate down into the ebuild environment.
985 # misc variables inherited from the calling environment
987 "INFOPATH", "MANPATH",
990 # variables that break bash
995 # portage config variables and variables set directly by portage
997 "ACCEPT_KEYWORDS", "AUTOCLEAN",
998 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
999 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
1000 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1001 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1002 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1003 "PORTAGE_BACKGROUND",
1004 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1005 "PORTAGE_COUNTER_HASH",
1006 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
1007 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1008 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1009 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1011 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1012 "PORTAGE_PACKAGE_EMPTY_ABORT",
1013 "PORTAGE_RO_DISTDIRS",
1014 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1015 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1016 "QUICKPKG_DEFAULT_OPTS",
1017 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1018 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1021 _environ_filter = frozenset(_environ_filter)
1023 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1024 config_incrementals=None, config_root=None, target_root=None,
1027 @param clone: If provided, init will use deepcopy to copy by value the instance.
1028 @type clone: Instance of config class.
1029 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1030 and then calling instance.setcpv(mycpv).
1032 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1033 @type config_profile_path: String
1034 @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS)
1035 @type config_incrementals: List
1036 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1037 @type config_root: String
1038 @param target_root: __init__ override of $ROOT env variable.
1039 @type target_root: String
1040 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1041 ignore local config (keywording and unmasking)
1042 @type local_config: Boolean
1045 # When initializing the global portage.settings instance, avoid
1046 # raising exceptions whenever possible since exceptions thrown
1047 # from 'import portage' or 'import portage.exceptions' statements
1048 # can practically render the api unusable for api consumers.
1049 tolerant = "_initializing_globals" in globals()
1051 self.already_in_regenerate = 0
1056 self.modifiedkeys = []
1058 self._accept_chost_re = None
1062 self.dirVirtuals = None
1065 # Virtuals obtained from the vartree
1066 self.treeVirtuals = {}
1067 # Virtuals by user specification. Includes negatives.
1068 self.userVirtuals = {}
1069 # Virtual negatives from user specifications.
1070 self.negVirtuals = {}
1071 # Virtuals added by the depgraph via self.setinst().
1072 self._depgraphVirtuals = {}
1074 self.user_profile_dir = None
1075 self.local_config = local_config
1078 self.incrementals = copy.deepcopy(clone.incrementals)
1079 self.profile_path = copy.deepcopy(clone.profile_path)
1080 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1081 self.local_config = copy.deepcopy(clone.local_config)
1083 self.module_priority = copy.deepcopy(clone.module_priority)
1084 self.modules = copy.deepcopy(clone.modules)
1086 self.depcachedir = copy.deepcopy(clone.depcachedir)
1088 self.packages = copy.deepcopy(clone.packages)
1089 self.virtuals = copy.deepcopy(clone.virtuals)
1091 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1092 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1093 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1094 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1095 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1097 self.use_defs = copy.deepcopy(clone.use_defs)
1098 self.usemask = copy.deepcopy(clone.usemask)
1099 self.usemask_list = copy.deepcopy(clone.usemask_list)
1100 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1101 self.useforce = copy.deepcopy(clone.useforce)
1102 self.useforce_list = copy.deepcopy(clone.useforce_list)
1103 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1104 self.puse = copy.deepcopy(clone.puse)
1105 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1106 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1107 self.mycpv = copy.deepcopy(clone.mycpv)
1109 self.configlist = copy.deepcopy(clone.configlist)
1110 self.lookuplist = self.configlist[:]
1111 self.lookuplist.reverse()
1113 "env.d": self.configlist[0],
1114 "pkginternal": self.configlist[1],
1115 "globals": self.configlist[2],
1116 "defaults": self.configlist[3],
1117 "conf": self.configlist[4],
1118 "pkg": self.configlist[5],
1119 "auto": self.configlist[6],
1120 "backupenv": self.configlist[7],
1121 "env": self.configlist[8] }
1122 self.profiles = copy.deepcopy(clone.profiles)
1123 self.backupenv = self.configdict["backupenv"]
1124 self.pusedict = copy.deepcopy(clone.pusedict)
1125 self.categories = copy.deepcopy(clone.categories)
1126 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1127 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1128 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1129 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1130 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1131 self.features = copy.deepcopy(clone.features)
1133 self._accept_license = copy.deepcopy(clone._accept_license)
1134 self._plicensedict = copy.deepcopy(clone._plicensedict)
1137 def check_var_directory(varname, var):
1138 if not os.path.isdir(var):
1139 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1140 "Please correct this.\n") % (varname, var),
1142 raise portage.exception.DirectoryNotFound(var)
1144 if config_root is None:
1147 config_root = normalize_path(os.path.abspath(
1148 config_root)).rstrip(os.path.sep) + os.path.sep
1150 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1152 self.depcachedir = DEPCACHE_PATH
1154 if not config_profile_path:
1155 config_profile_path = \
1156 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1157 if os.path.isdir(config_profile_path):
1158 self.profile_path = config_profile_path
1160 self.profile_path = None
1162 self.profile_path = config_profile_path[:]
1164 if not config_incrementals:
1165 writemsg("incrementals not specified to class config\n")
1166 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1168 self.incrementals = copy.deepcopy(config_incrementals)
1170 self.module_priority = ["user","default"]
1172 self.modules["user"] = getconfig(
1173 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1174 if self.modules["user"] is None:
1175 self.modules["user"] = {}
1176 self.modules["default"] = {
1177 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1178 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1184 # back up our incremental variables:
1186 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1187 self.configlist.append({})
1188 self.configdict["env.d"] = self.configlist[-1]
1190 self.configlist.append({})
1191 self.configdict["pkginternal"] = self.configlist[-1]
1193 # The symlink might not exist or might not be a symlink.
1194 if self.profile_path is None:
1198 def addProfile(currentPath):
1199 parentsFile = os.path.join(currentPath, "parent")
1200 if os.path.exists(parentsFile):
1201 parents = grabfile(parentsFile)
1203 raise portage.exception.ParseError(
1204 "Empty parent file: '%s'" % parentsFile)
1205 for parentPath in parents:
1206 parentPath = normalize_path(os.path.join(
1207 currentPath, parentPath))
1208 if os.path.exists(parentPath):
1209 addProfile(parentPath)
1211 raise portage.exception.ParseError(
1212 "Parent '%s' not found: '%s'" % \
1213 (parentPath, parentsFile))
1214 self.profiles.append(currentPath)
1216 addProfile(os.path.realpath(self.profile_path))
1217 except portage.exception.ParseError, e:
1218 writemsg("!!! Unable to parse profile: '%s'\n" % \
1219 self.profile_path, noiselevel=-1)
1220 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1224 custom_prof = os.path.join(
1225 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1226 if os.path.exists(custom_prof):
1227 self.user_profile_dir = custom_prof
1228 self.profiles.append(custom_prof)
1231 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1232 self.packages = stack_lists(self.packages_list, incremental=1)
1233 del self.packages_list
1234 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1237 self.prevmaskdict={}
1238 for x in self.packages:
1239 mycatpkg=dep_getkey(x)
1240 if mycatpkg not in self.prevmaskdict:
1241 self.prevmaskdict[mycatpkg]=[x]
1243 self.prevmaskdict[mycatpkg].append(x)
1245 # get profile-masked use flags -- INCREMENTAL Child over parent
1246 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1247 for x in self.profiles]
1248 self.usemask = set(stack_lists(
1249 self.usemask_list, incremental=True))
1250 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1251 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1254 self.pusemask_list = []
1255 rawpusemask = [grabdict_package(
1256 os.path.join(x, "package.use.mask")) \
1257 for x in self.profiles]
1258 for i in xrange(len(self.profiles)):
1260 for k, v in rawpusemask[i].iteritems():
1261 cpdict.setdefault(dep_getkey(k), {})[k] = v
1262 self.pusemask_list.append(cpdict)
1265 self.pkgprofileuse = []
1266 rawprofileuse = [grabdict_package(
1267 os.path.join(x, "package.use"), juststrings=True) \
1268 for x in self.profiles]
1269 for i in xrange(len(self.profiles)):
1271 for k, v in rawprofileuse[i].iteritems():
1272 cpdict.setdefault(dep_getkey(k), {})[k] = v
1273 self.pkgprofileuse.append(cpdict)
1276 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1277 for x in self.profiles]
1278 self.useforce = set(stack_lists(
1279 self.useforce_list, incremental=True))
1281 self.puseforce_list = []
1282 rawpuseforce = [grabdict_package(
1283 os.path.join(x, "package.use.force")) \
1284 for x in self.profiles]
1285 for i in xrange(len(self.profiles)):
1287 for k, v in rawpuseforce[i].iteritems():
1288 cpdict.setdefault(dep_getkey(k), {})[k] = v
1289 self.puseforce_list.append(cpdict)
1292 make_conf = getconfig(
1293 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1294 tolerant=tolerant, allow_sourcing=True)
1295 if make_conf is None:
1298 # Allow ROOT setting to come from make.conf if it's not overridden
1299 # by the constructor argument (from the calling environment).
1300 if target_root is None and "ROOT" in make_conf:
1301 target_root = make_conf["ROOT"]
1302 if not target_root.strip():
1304 if target_root is None:
1307 target_root = normalize_path(os.path.abspath(
1308 target_root)).rstrip(os.path.sep) + os.path.sep
1310 portage.util.ensure_dirs(target_root)
1311 check_var_directory("ROOT", target_root)
1313 # The expand_map is used for variable substitution
1314 # in getconfig() calls, and the getconfig() calls
1315 # update expand_map with the value of each variable
1316 # assignment that occurs. Variable substitution occurs
1317 # in the following order, which corresponds to the
1318 # order of appearance in self.lookuplist:
1325 # Notably absent is "env", since we want to avoid any
1326 # interaction with the calling environment that might
1327 # lead to unexpected results.
1330 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1332 # env_d will be None if profile.env doesn't exist.
1334 self.configdict["env.d"].update(env_d)
1335 expand_map.update(env_d)
1337 # backupenv is used for calculating incremental variables.
1338 self.backupenv = os.environ.copy()
1341 # Remove duplicate values so they don't override updated
1342 # profile.env values later (profile.env is reloaded in each
1343 # call to self.regenerate).
1344 for k, v in env_d.iteritems():
1346 if self.backupenv[k] == v:
1347 del self.backupenv[k]
1352 self.configdict["env"] = self.backupenv.copy()
1354 # make.globals should not be relative to config_root
1355 # because it only contains constants.
1356 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1357 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1362 if self.mygcfg is None:
1365 self.configlist.append(self.mygcfg)
1366 self.configdict["globals"]=self.configlist[-1]
1368 self.make_defaults_use = []
1371 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1372 expand=expand_map) for x in self.profiles]
1374 for cfg in mygcfg_dlists:
1376 self.make_defaults_use.append(cfg.get("USE", ""))
1378 self.make_defaults_use.append("")
1379 self.mygcfg = stack_dicts(mygcfg_dlists,
1380 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1381 if self.mygcfg is None:
1383 self.configlist.append(self.mygcfg)
1384 self.configdict["defaults"]=self.configlist[-1]
1386 self.mygcfg = getconfig(
1387 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1388 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1389 if self.mygcfg is None:
1392 # Don't allow the user to override certain variables in make.conf
1393 profile_only_variables = self.configdict["defaults"].get(
1394 "PROFILE_ONLY_VARIABLES", "").split()
1395 for k in profile_only_variables:
1396 self.mygcfg.pop(k, None)
1398 self.configlist.append(self.mygcfg)
1399 self.configdict["conf"]=self.configlist[-1]
1401 self.configlist.append({})
1402 self.configdict["pkg"]=self.configlist[-1]
1405 self.configlist.append({})
1406 self.configdict["auto"]=self.configlist[-1]
1408 self.configlist.append(self.backupenv) # XXX Why though?
1409 self.configdict["backupenv"]=self.configlist[-1]
1411 # Don't allow the user to override certain variables in the env
1412 for k in profile_only_variables:
1413 self.backupenv.pop(k, None)
1415 self.configlist.append(self.configdict["env"])
1417 # make lookuplist for loading package.*
1418 self.lookuplist=self.configlist[:]
1419 self.lookuplist.reverse()
1421 # Blacklist vars that could interfere with portage internals.
1422 for blacklisted in "CATEGORY", "EBUILD_PHASE", \
1423 "EMERGE_FROM", "PKGUSE", "PORTAGE_CONFIGROOT", \
1424 "PORTAGE_IUSE", "PORTAGE_USE", "ROOT":
1425 for cfg in self.lookuplist:
1426 cfg.pop(blacklisted, None)
1427 del blacklisted, cfg
1429 self["PORTAGE_CONFIGROOT"] = config_root
1430 self.backup_changes("PORTAGE_CONFIGROOT")
1431 self["ROOT"] = target_root
1432 self.backup_changes("ROOT")
1435 self.pkeywordsdict = {}
1436 self._plicensedict = {}
1437 self.punmaskdict = {}
1438 abs_user_config = os.path.join(config_root,
1439 USER_CONFIG_PATH.lstrip(os.path.sep))
1441 # locations for "categories" and "arch.list" files
1442 locations = [os.path.join(self["PORTDIR"], "profiles")]
1443 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1444 pmask_locations.extend(self.profiles)
1446 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1447 special cases are needed here."""
1448 overlay_profiles = []
1449 for ov in self["PORTDIR_OVERLAY"].split():
1450 ov = normalize_path(ov)
1451 profiles_dir = os.path.join(ov, "profiles")
1452 if os.path.isdir(profiles_dir):
1453 overlay_profiles.append(profiles_dir)
1454 locations += overlay_profiles
1456 pmask_locations.extend(overlay_profiles)
1459 locations.append(abs_user_config)
1460 pmask_locations.append(abs_user_config)
1461 pusedict = grabdict_package(
1462 os.path.join(abs_user_config, "package.use"), recursive=1)
1463 for key in pusedict.keys():
1464 cp = dep_getkey(key)
1465 if cp not in self.pusedict:
1466 self.pusedict[cp] = {}
1467 self.pusedict[cp][key] = pusedict[key]
1470 pkgdict = grabdict_package(
1471 os.path.join(abs_user_config, "package.keywords"),
1473 for key in pkgdict.keys():
1474 # default to ~arch if no specific keyword is given
1475 if not pkgdict[key]:
1477 if self.configdict["defaults"] and \
1478 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1479 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1482 for keyword in groups:
1483 if not keyword[0] in "~-":
1484 mykeywordlist.append("~"+keyword)
1485 pkgdict[key] = mykeywordlist
1486 cp = dep_getkey(key)
1487 if cp not in self.pkeywordsdict:
1488 self.pkeywordsdict[cp] = {}
1489 self.pkeywordsdict[cp][key] = pkgdict[key]
1492 licdict = grabdict_package(os.path.join(
1493 abs_user_config, "package.license"), recursive=1)
1494 for k, v in licdict.iteritems():
1496 cp_dict = self._plicensedict.get(cp)
1499 self._plicensedict[cp] = cp_dict
1500 cp_dict[k] = self.expandLicenseTokens(v)
1503 pkgunmasklines = grabfile_package(
1504 os.path.join(abs_user_config, "package.unmask"),
1506 for x in pkgunmasklines:
1507 mycatpkg=dep_getkey(x)
1508 if mycatpkg in self.punmaskdict:
1509 self.punmaskdict[mycatpkg].append(x)
1511 self.punmaskdict[mycatpkg]=[x]
1513 #getting categories from an external file now
1514 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1515 self.categories = stack_lists(categories, incremental=1)
1518 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1519 archlist = stack_lists(archlist, incremental=1)
1520 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1524 for x in pmask_locations:
1525 pkgmasklines.append(grabfile_package(
1526 os.path.join(x, "package.mask"), recursive=1))
1527 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1530 for x in pkgmasklines:
1531 mycatpkg=dep_getkey(x)
1532 if mycatpkg in self.pmaskdict:
1533 self.pmaskdict[mycatpkg].append(x)
1535 self.pmaskdict[mycatpkg]=[x]
1537 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1538 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1539 has_invalid_data = False
1540 for x in range(len(pkgprovidedlines)-1, -1, -1):
1541 myline = pkgprovidedlines[x]
1542 if not isvalidatom("=" + myline):
1543 writemsg("Invalid package name in package.provided:" + \
1544 " %s\n" % myline, noiselevel=-1)
1545 has_invalid_data = True
1546 del pkgprovidedlines[x]
1548 cpvr = catpkgsplit(pkgprovidedlines[x])
1549 if not cpvr or cpvr[0] == "null":
1550 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1552 has_invalid_data = True
1553 del pkgprovidedlines[x]
1555 if cpvr[0] == "virtual":
1556 writemsg("Virtual package in package.provided: %s\n" % \
1557 myline, noiselevel=-1)
1558 has_invalid_data = True
1559 del pkgprovidedlines[x]
1561 if has_invalid_data:
1562 writemsg("See portage(5) for correct package.provided usage.\n",
1564 self.pprovideddict = {}
1565 for x in pkgprovidedlines:
1569 mycatpkg=dep_getkey(x)
1570 if mycatpkg in self.pprovideddict:
1571 self.pprovideddict[mycatpkg].append(x)
1573 self.pprovideddict[mycatpkg]=[x]
1575 # parse licensegroups
1576 self._license_groups = {}
1578 self._license_groups.update(
1579 grabdict(os.path.join(x, "license_groups")))
1581 # reasonable defaults; this is important as without USE_ORDER,
1582 # USE will always be "" (nothing set)!
1583 if "USE_ORDER" not in self:
1584 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1586 self["PORTAGE_GID"] = str(portage_gid)
1587 self.backup_changes("PORTAGE_GID")
1589 if self.get("PORTAGE_DEPCACHEDIR", None):
1590 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1591 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1592 self.backup_changes("PORTAGE_DEPCACHEDIR")
1594 overlays = self.get("PORTDIR_OVERLAY","").split()
1598 ov = normalize_path(ov)
1599 if os.path.isdir(ov):
1602 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1603 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1604 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1605 self.backup_changes("PORTDIR_OVERLAY")
1607 if "CBUILD" not in self and "CHOST" in self:
1608 self["CBUILD"] = self["CHOST"]
1609 self.backup_changes("CBUILD")
1611 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1612 self.backup_changes("PORTAGE_BIN_PATH")
1613 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1614 self.backup_changes("PORTAGE_PYM_PATH")
1616 # Expand license groups
1617 # This has to do be done for each config layer before regenerate()
1618 # in order for incremental negation to work properly.
1620 for c in self.configdict.itervalues():
1621 v = c.get("ACCEPT_LICENSE")
1624 v = " ".join(self.expandLicenseTokens(v.split()))
1625 c["ACCEPT_LICENSE"] = v
1628 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1630 self[var] = str(int(self.get(var, "0")))
1632 writemsg(("!!! %s='%s' is not a valid integer. " + \
1633 "Falling back to '0'.\n") % (var, self[var]),
1636 self.backup_changes(var)
1638 # initialize self.features
1641 # ACCEPT_LICENSE support depends on definition of license groups
1642 # in the tree, so it's disabled for now (accept anything).
1643 self._accept_license = set(["*"])
1645 if not portage.process.sandbox_capable and \
1646 ("sandbox" in self.features or "usersandbox" in self.features):
1647 if self.profile_path is not None and \
1648 os.path.realpath(self.profile_path) == \
1649 os.path.realpath(PROFILE_PATH):
1650 """ Don't show this warning when running repoman and the
1651 sandbox feature came from a profile that doesn't belong to
1653 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1654 " binary. Disabling...\n\n"), noiselevel=-1)
1655 if "sandbox" in self.features:
1656 self.features.remove("sandbox")
1657 if "usersandbox" in self.features:
1658 self.features.remove("usersandbox")
1660 self.features.sort()
1661 self["FEATURES"] = " ".join(self.features)
1662 self.backup_changes("FEATURES")
1669 def _init_dirs(self):
1671 Create a few directories that are critical to portage operation
1673 if not os.access(self["ROOT"], os.W_OK):
1676 # gid, mode, mask, preserve_perms
1678 "tmp" : ( -1, 01777, 0, True),
1679 "var/tmp" : ( -1, 01777, 0, True),
1680 PRIVATE_PATH : ( portage_gid, 02750, 02, False),
1681 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False)
1684 for mypath, (gid, mode, modemask, preserve_perms) \
1685 in dir_mode_map.iteritems():
1686 mydir = os.path.join(self["ROOT"], mypath)
1687 if preserve_perms and os.path.isdir(mydir):
1688 # Only adjust permissions on some directories if
1689 # they don't exist yet. This gives freedom to the
1690 # user to adjust permissions to suit their taste.
1693 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1694 except portage.exception.PortageException, e:
1695 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1697 writemsg("!!! %s\n" % str(e),
1700 def expandLicenseTokens(self, tokens):
1701 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1702 if it's a group token (indicated by @) or just return it if it's not a
1703 group. If a group is negated then negate all group elements."""
1704 expanded_tokens = []
1706 expanded_tokens.extend(self._expandLicenseToken(x, None))
1707 return expanded_tokens
1709 def _expandLicenseToken(self, token, traversed_groups):
1712 if token.startswith("-"):
1714 license_name = token[1:]
1716 license_name = token
1717 if not license_name.startswith("@"):
1718 rValue.append(token)
1720 group_name = license_name[1:]
1721 if not traversed_groups:
1722 traversed_groups = set()
1723 license_group = self._license_groups.get(group_name)
1724 if group_name in traversed_groups:
1725 writemsg(("Circular license group reference" + \
1726 " detected in '%s'\n") % group_name, noiselevel=-1)
1727 rValue.append("@"+group_name)
1729 traversed_groups.add(group_name)
1730 for l in license_group:
1731 if l.startswith("-"):
1732 writemsg(("Skipping invalid element %s" + \
1733 " in license group '%s'\n") % (l, group_name),
1736 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1738 writemsg("Undefined license group '%s'\n" % group_name,
1740 rValue.append("@"+group_name)
1742 rValue = ["-" + token for token in rValue]
1746 """Validate miscellaneous settings and display warnings if necessary.
1747 (This code was previously in the global scope of portage.py)"""
1749 groups = self["ACCEPT_KEYWORDS"].split()
1750 archlist = self.archlist()
1752 writemsg("--- 'profiles/arch.list' is empty or " + \
1753 "not available. Empty portage tree?\n", noiselevel=1)
1755 for group in groups:
1756 if group not in archlist and \
1757 not (group.startswith("-") and group[1:] in archlist) and \
1758 group not in ("*", "~*", "**"):
1759 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1762 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1763 PROFILE_PATH.lstrip(os.path.sep))
1764 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1765 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1766 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1767 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1769 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1770 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1772 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1773 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1774 if os.path.exists(abs_user_virtuals):
1775 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1776 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1777 writemsg("!!! this new location.\n\n")
1779 def loadVirtuals(self,root):
1780 """Not currently used by portage."""
1781 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1782 self.getvirtuals(root)
1784 def load_best_module(self,property_string):
1785 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1788 mod = load_mod(best_mod)
1790 if best_mod.startswith("cache."):
1791 best_mod = "portage." + best_mod
1793 mod = load_mod(best_mod)
1806 def modifying(self):
1808 raise Exception("Configuration is locked.")
1810 def backup_changes(self,key=None):
1812 if key and key in self.configdict["env"]:
1813 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1815 raise KeyError("No such key defined in environment: %s" % key)
1817 def reset(self,keeping_pkg=0,use_cache=1):
1819 Restore environment from self.backupenv, call self.regenerate()
1820 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1821 @type keeping_pkg: Boolean
1822 @param use_cache: Should self.regenerate use the cache or not
1823 @type use_cache: Boolean
1827 self.configdict["env"].clear()
1828 self.configdict["env"].update(self.backupenv)
1830 self.modifiedkeys = []
1834 self.configdict["pkg"].clear()
1835 self.configdict["pkginternal"].clear()
1836 self.configdict["defaults"]["USE"] = \
1837 " ".join(self.make_defaults_use)
1838 self.usemask = set(stack_lists(
1839 self.usemask_list, incremental=True))
1840 self.useforce = set(stack_lists(
1841 self.useforce_list, incremental=True))
1842 self.regenerate(use_cache=use_cache)
1844 def load_infodir(self,infodir):
1846 backup_pkg_metadata = dict(self.configdict["pkg"].iteritems())
1847 if "pkg" in self.configdict and \
1848 "CATEGORY" in self.configdict["pkg"]:
1849 self.configdict["pkg"].clear()
1850 self.configdict["pkg"]["CATEGORY"] = \
1851 backup_pkg_metadata["CATEGORY"]
1853 raise portage.exception.PortageException(
1854 "No pkg setup for settings instance?")
1857 found_category_file = False
1858 if os.path.isdir(infodir):
1859 if os.path.exists(infodir+"/environment"):
1860 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1862 myre = re.compile('^[A-Z]+$')
1864 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1865 if filename == "FEATURES":
1866 # FEATURES from the build host shouldn't be interpreted as
1867 # FEATURES on the client system.
1869 if filename == "CATEGORY":
1870 found_category_file = True
1872 if myre.match(filename):
1874 file_path = os.path.join(infodir, filename)
1875 mydata = open(file_path).read().strip()
1876 if len(mydata) < 2048 or filename == "USE":
1877 if null_byte in mydata:
1878 writemsg("!!! Null byte found in metadata " + \
1879 "file: '%s'\n" % file_path, noiselevel=-1)
1881 if filename == "USE":
1882 binpkg_flags = "-* " + mydata
1883 self.configdict["pkg"][filename] = binpkg_flags
1884 self.configdict["env"][filename] = mydata
1886 self.configdict["pkg"][filename] = mydata
1887 self.configdict["env"][filename] = mydata
1888 except (OSError, IOError):
1889 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1894 # Missing or corrupt CATEGORY will cause problems for
1895 # doebuild(), which uses it to infer the cpv. We already
1896 # know the category, so there's no need to trust this
1897 # file. Show a warning if the file is missing though,
1898 # because it's required (especially for binary packages).
1899 if not found_category_file:
1900 writemsg("!!! CATEGORY file is missing: %s\n" % \
1901 os.path.join(infodir, "CATEGORY"), noiselevel=-1)
1902 self.configdict["pkg"].update(backup_pkg_metadata)
1905 # Always set known good values for these variables, since
1906 # corruption of these can cause problems:
1907 cat, pf = catsplit(self.mycpv)
1908 self.configdict["pkg"]["CATEGORY"] = cat
1909 self.configdict["pkg"]["PF"] = pf
1913 def setcpv(self, mycpv, use_cache=1, mydb=None):
1915 Load a particular CPV into the config, this lets us see the
1916 Default USE flags for a particular ebuild as well as the USE
1917 flags from package.use.
1919 @param mycpv: A cpv to load
1921 @param use_cache: Enables caching
1922 @type use_cache: Boolean
1923 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1924 @type mydb: dbapi or derivative.
1931 if not isinstance(mycpv, basestring):
1936 if self.mycpv == mycpv:
1940 cat, pf = catsplit(mycpv)
1941 cp = dep_getkey(mycpv)
1942 cpv_slot = self.mycpv
1945 env_configdict = self.configdict["env"]
1946 pkg_configdict = self.configdict["pkg"]
1947 previous_iuse = pkg_configdict.get("IUSE")
1948 for k in ("A", "AA", "CATEGORY", "PKGUSE", "PF", "PORTAGE_USE"):
1949 env_configdict.pop(k, None)
1950 pkg_configdict["CATEGORY"] = cat
1951 pkg_configdict["PF"] = pf
1953 if not hasattr(mydb, "aux_get"):
1954 pkg_configdict.update(mydb)
1956 aux_keys = [k for k in auxdbkeys \
1957 if not k.startswith("UNUSED_")]
1958 for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
1959 pkg_configdict[k] = v
1960 for k in pkg_configdict:
1962 env_configdict.pop(k, None)
1963 slot = pkg_configdict["SLOT"]
1964 iuse = pkg_configdict["IUSE"]
1966 cpv_slot = "%s:%s" % (self.mycpv, slot)
1970 for x in iuse.split():
1971 if x.startswith("+"):
1972 pkginternaluse.append(x[1:])
1973 elif x.startswith("-"):
1974 pkginternaluse.append(x)
1975 pkginternaluse = " ".join(pkginternaluse)
1976 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1977 self.configdict["pkginternal"]["USE"] = pkginternaluse
1981 for i in xrange(len(self.profiles)):
1982 cpdict = self.pkgprofileuse[i].get(cp, None)
1984 keys = cpdict.keys()
1986 bestmatch = best_match_to_list(cpv_slot, keys)
1988 keys.remove(bestmatch)
1989 defaults.insert(pos, cpdict[bestmatch])
1993 if self.make_defaults_use[i]:
1994 defaults.insert(pos, self.make_defaults_use[i])
1996 defaults = " ".join(defaults)
1997 if defaults != self.configdict["defaults"].get("USE",""):
1998 self.configdict["defaults"]["USE"] = defaults
2001 useforce = self._getUseForce(cpv_slot)
2002 if useforce != self.useforce:
2003 self.useforce = useforce
2006 usemask = self._getUseMask(cpv_slot)
2007 if usemask != self.usemask:
2008 self.usemask = usemask
2012 cpdict = self.pusedict.get(cp)
2014 keys = cpdict.keys()
2016 self.pusekey = best_match_to_list(cpv_slot, keys)
2018 keys.remove(self.pusekey)
2019 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2023 if oldpuse != self.puse:
2025 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2026 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2029 self.reset(keeping_pkg=1,use_cache=use_cache)
2031 # If reset() has not been called, it's safe to return
2032 # early if IUSE has not changed.
2033 if not has_changed and previous_iuse == iuse:
2036 # Filter out USE flags that aren't part of IUSE. This has to
2037 # be done for every setcpv() call since practically every
2038 # package has different IUSE.
2039 use = set(self["USE"].split())
2040 iuse_implicit = self._get_implicit_iuse()
2041 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2043 # Escape anything except ".*" which is supposed
2044 # to pass through from _get_implicit_iuse()
2045 regex = sorted(re.escape(x) for x in iuse_implicit)
2046 regex = "^(%s)$" % "|".join(regex)
2047 regex = regex.replace("\\.\\*", ".*")
2048 self.configdict["pkg"]["PORTAGE_IUSE"] = regex
2050 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2051 if ebuild_force_test and \
2052 not hasattr(self, "_ebuild_force_test_msg_shown"):
2053 self._ebuild_force_test_msg_shown = True
2054 writemsg("Forcing test.\n", noiselevel=-1)
2055 if "test" in self.features and "test" in iuse_implicit:
2056 if "test" in self.usemask and not ebuild_force_test:
2057 # "test" is in IUSE and USE=test is masked, so execution
2058 # of src_test() probably is not reliable. Therefore,
2059 # temporarily disable FEATURES=test just for this package.
2060 self["FEATURES"] = " ".join(x for x in self.features \
2065 if ebuild_force_test:
2066 self.usemask.discard("test")
2068 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2069 # that they are consistent. For optimal performance, use slice
2070 # comparison instead of startswith().
2071 use_expand = self.get("USE_EXPAND", "").split()
2072 for var in use_expand:
2073 prefix = var.lower() + "_"
2074 prefix_len = len(prefix)
2075 expand_flags = set([ x[prefix_len:] for x in use \
2076 if x[:prefix_len] == prefix ])
2077 var_split = self.get(var, "").split()
2078 # Preserve the order of var_split because it can matter for things
2080 var_split = [ x for x in var_split if x in expand_flags ]
2081 var_split.extend(expand_flags.difference(var_split))
2082 has_wildcard = "*" in var_split
2084 var_split = [ x for x in var_split if x != "*" ]
2086 for x in iuse_implicit:
2087 if x[:prefix_len] == prefix:
2088 has_iuse.add(x[prefix_len:])
2090 # * means to enable everything in IUSE that's not masked
2092 for x in iuse_implicit:
2093 if x[:prefix_len] == prefix and x not in self.usemask:
2094 suffix = x[prefix_len:]
2095 var_split.append(suffix)
2098 # If there is a wildcard and no matching flags in IUSE then
2099 # LINGUAS should be unset so that all .mo files are
2102 # Make the flags unique and filter them according to IUSE.
2103 # Also, continue to preserve order for things like LINGUAS
2104 # and filter any duplicates that variable may contain.
2105 filtered_var_split = []
2106 remaining = has_iuse.intersection(var_split)
2110 filtered_var_split.append(x)
2111 var_split = filtered_var_split
2114 self[var] = " ".join(var_split)
2116 # Don't export empty USE_EXPAND vars unless the user config
2117 # exports them as empty. This is required for vars such as
2118 # LINGUAS, where unset and empty have different meanings.
2120 # ebuild.sh will see this and unset the variable so
2121 # that things like LINGUAS work properly
2127 # It's not in IUSE, so just allow the variable content
2128 # to pass through if it is defined somewhere. This
2129 # allows packages that support LINGUAS but don't
2130 # declare it in IUSE to use the variable outside of the
2131 # USE_EXPAND context.
2134 # Filtered for the ebuild environment. Store this in a separate
2135 # attribute since we still want to be able to see global USE
2136 # settings for things like emerge --info.
2138 self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted(
2140 x in iuse_implicit))
2142 def _get_implicit_iuse(self):
2144 Some flags are considered to
2145 be implicit members of IUSE:
2146 * Flags derived from ARCH
2147 * Flags derived from USE_EXPAND_HIDDEN variables
2148 * Masked flags, such as those from {,package}use.mask
2149 * Forced flags, such as those from {,package}use.force
2150 * build and bootstrap flags used by bootstrap.sh
2152 iuse_implicit = set()
2153 # Flags derived from ARCH.
2154 arch = self.configdict["defaults"].get("ARCH")
2156 iuse_implicit.add(arch)
2157 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2159 # Flags derived from USE_EXPAND_HIDDEN variables
2160 # such as ELIBC, KERNEL, and USERLAND.
2161 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2162 for x in use_expand_hidden:
2163 iuse_implicit.add(x.lower() + "_.*")
2165 # Flags that have been masked or forced.
2166 iuse_implicit.update(self.usemask)
2167 iuse_implicit.update(self.useforce)
2169 # build and bootstrap flags used by bootstrap.sh
2170 iuse_implicit.add("build")
2171 iuse_implicit.add("bootstrap")
2172 return iuse_implicit
2174 def _getUseMask(self, pkg):
2175 cp = getattr(pkg, "cp", None)
2177 cp = dep_getkey(pkg)
2180 for i in xrange(len(self.profiles)):
2181 cpdict = self.pusemask_list[i].get(cp, None)
2183 keys = cpdict.keys()
2185 best_match = best_match_to_list(pkg, keys)
2187 keys.remove(best_match)
2188 usemask.insert(pos, cpdict[best_match])
2192 if self.usemask_list[i]:
2193 usemask.insert(pos, self.usemask_list[i])
2195 return set(stack_lists(usemask, incremental=True))
2197 def _getUseForce(self, pkg):
2198 cp = getattr(pkg, "cp", None)
2200 cp = dep_getkey(pkg)
2203 for i in xrange(len(self.profiles)):
2204 cpdict = self.puseforce_list[i].get(cp, None)
2206 keys = cpdict.keys()
2208 best_match = best_match_to_list(pkg, keys)
2210 keys.remove(best_match)
2211 useforce.insert(pos, cpdict[best_match])
2215 if self.useforce_list[i]:
2216 useforce.insert(pos, self.useforce_list[i])
2218 return set(stack_lists(useforce, incremental=True))
2220 def _getMaskAtom(self, cpv, metadata):
2222 Take a package and return a matching package.mask atom, or None if no
2223 such atom exists or it has been cancelled by package.unmask. PROVIDE
2224 is not checked, so atoms will not be found for old-style virtuals.
2226 @param cpv: The package name
2228 @param metadata: A dictionary of raw package metadata
2229 @type metadata: dict
2231 @return: An matching atom string or None if one is not found.
2234 cp = cpv_getkey(cpv)
2235 mask_atoms = self.pmaskdict.get(cp)
2237 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2238 unmask_atoms = self.punmaskdict.get(cp)
2239 for x in mask_atoms:
2240 if not match_from_list(x, pkg_list):
2243 for y in unmask_atoms:
2244 if match_from_list(y, pkg_list):
2249 def _getProfileMaskAtom(self, cpv, metadata):
2251 Take a package and return a matching profile atom, or None if no
2252 such atom exists. Note that a profile atom may or may not have a "*"
2253 prefix. PROVIDE is not checked, so atoms will not be found for
2256 @param cpv: The package name
2258 @param metadata: A dictionary of raw package metadata
2259 @type metadata: dict
2261 @return: An matching profile atom string or None if one is not found.
2264 cp = cpv_getkey(cpv)
2265 profile_atoms = self.prevmaskdict.get(cp)
2267 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2268 for x in profile_atoms:
2269 if match_from_list(x.lstrip("*"), pkg_list):
2274 def _getMissingKeywords(self, cpv, metadata):
2276 Take a package and return a list of any KEYWORDS that the user may
2277 may need to accept for the given package. If the KEYWORDS are empty
2278 and the the ** keyword has not been accepted, the returned list will
2279 contain ** alone (in order to distiguish from the case of "none
2282 @param cpv: The package name (for package.keywords support)
2284 @param metadata: A dictionary of raw package metadata
2285 @type metadata: dict
2287 @return: A list of KEYWORDS that have not been accepted.
2290 # Hack: Need to check the env directly here as otherwise stacking
2291 # doesn't work properly as negative values are lost in the config
2292 # object (bug #139600)
2293 egroups = self.configdict["backupenv"].get(
2294 "ACCEPT_KEYWORDS", "").split()
2295 mygroups = metadata["KEYWORDS"].split()
2296 # Repoman may modify this attribute as necessary.
2297 pgroups = self["ACCEPT_KEYWORDS"].split()
2299 cp = dep_getkey(cpv)
2300 pkgdict = self.pkeywordsdict.get(cp)
2303 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2304 for atom, pkgkeywords in pkgdict.iteritems():
2305 if match_from_list(atom, cpv_slot_list):
2307 pgroups.extend(pkgkeywords)
2308 if matches or egroups:
2309 pgroups.extend(egroups)
2312 if x.startswith("-"):
2316 inc_pgroups.discard(x[1:])
2319 pgroups = inc_pgroups
2324 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2325 writemsg(("--- WARNING: Package '%s' uses" + \
2326 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2333 elif gp.startswith("~"):
2335 elif not gp.startswith("-"):
2338 ((hastesting and "~*" in pgroups) or \
2339 (hasstable and "*" in pgroups) or "**" in pgroups):
2345 # If KEYWORDS is empty then we still have to return something
2346 # in order to distiguish from the case of "none missing".
2347 mygroups.append("**")
2351 def _getMissingLicenses(self, cpv, metadata):
2353 Take a LICENSE string and return a list any licenses that the user may
2354 may need to accept for the given package. The returned list will not
2355 contain any licenses that have already been accepted. This method
2356 can throw an InvalidDependString exception.
2358 @param cpv: The package name (for package.license support)
2360 @param metadata: A dictionary of raw package metadata
2361 @type metadata: dict
2363 @return: A list of licenses that have not been accepted.
2365 if "*" in self._accept_license:
2367 acceptable_licenses = self._accept_license
2368 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2370 acceptable_licenses = self._accept_license.copy()
2371 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2372 for atom in match_to_list(cpv_slot, cpdict.keys()):
2373 acceptable_licenses.update(cpdict[atom])
2375 license_str = metadata["LICENSE"]
2376 if "?" in license_str:
2377 use = metadata["USE"].split()
2381 license_struct = portage.dep.use_reduce(
2382 portage.dep.paren_reduce(license_str), uselist=use)
2383 license_struct = portage.dep.dep_opconvert(license_struct)
2384 return self._getMaskedLicenses(license_struct, acceptable_licenses)
2386 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2387 if not license_struct:
2389 if license_struct[0] == "||":
2391 for element in license_struct[1:]:
2392 if isinstance(element, list):
2394 ret.append(self._getMaskedLicenses(
2395 element, acceptable_licenses))
2399 if element in acceptable_licenses:
2402 # Return all masked licenses, since we don't know which combination
2403 # (if any) the user will decide to unmask.
2407 for element in license_struct:
2408 if isinstance(element, list):
2410 ret.extend(self._getMaskedLicenses(element,
2411 acceptable_licenses))
2413 if element not in acceptable_licenses:
2417 def _accept_chost(self, pkg):
2419 @return True if pkg CHOST is accepted, False otherwise.
2421 if self._accept_chost_re is None:
2422 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2423 if not accept_chost:
2424 chost = self.get("CHOST")
2426 accept_chost.append(chost)
2427 if not accept_chost:
2428 self._accept_chost_re = re.compile(".*")
2429 elif len(accept_chost) == 1:
2431 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2433 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2434 (accept_chost[0], e), noiselevel=-1)
2435 self._accept_chost_re = re.compile("^$")
2438 self._accept_chost_re = re.compile(
2439 r'^(%s)$' % "|".join(accept_chost))
2441 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2442 (" ".join(accept_chost), e), noiselevel=-1)
2443 self._accept_chost_re = re.compile("^$")
2445 return self._accept_chost_re.match(
2446 pkg.metadata.get("CHOST", "")) is not None
2448 def setinst(self,mycpv,mydbapi):
2449 """This updates the preferences for old-style virtuals,
2450 affecting the behavior of dep_expand() and dep_check()
2451 calls. It can change dbapi.match() behavior since that
2452 calls dep_expand(). However, dbapi instances have
2453 internal match caches that are not invalidated when
2454 preferences are updated here. This can potentially
2455 lead to some inconsistency (relevant to bug #1343)."""
2457 if len(self.virtuals) == 0:
2459 # Grab the virtuals this package provides and add them into the tree virtuals.
2460 if not hasattr(mydbapi, "aux_get"):
2461 provides = mydbapi["PROVIDE"]
2463 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2466 if isinstance(mydbapi, portdbapi):
2467 self.setcpv(mycpv, mydb=mydbapi)
2468 myuse = self["PORTAGE_USE"]
2469 elif not hasattr(mydbapi, "aux_get"):
2470 myuse = mydbapi["USE"]
2472 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2473 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2476 cp = dep_getkey(mycpv)
2478 virt = dep_getkey(virt)
2479 providers = self.virtuals.get(virt)
2480 if providers and cp in providers:
2482 providers = self._depgraphVirtuals.get(virt)
2483 if providers is None:
2485 self._depgraphVirtuals[virt] = providers
2486 if cp not in providers:
2487 providers.append(cp)
2491 self.virtuals = self.__getvirtuals_compile()
2494 """Reload things like /etc/profile.env that can change during runtime."""
2495 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2496 self.configdict["env.d"].clear()
2497 env_d = getconfig(env_d_filename, expand=False)
2499 # env_d will be None if profile.env doesn't exist.
2500 self.configdict["env.d"].update(env_d)
2502 def regenerate(self,useonly=0,use_cache=1):
2505 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2506 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2507 variables. This also updates the env.d configdict; useful in case an ebuild
2508 changes the environment.
2510 If FEATURES has already stacked, it is not stacked twice.
2512 @param useonly: Only regenerate USE flags (not any other incrementals)
2513 @type useonly: Boolean
2514 @param use_cache: Enable Caching (only for autouse)
2515 @type use_cache: Boolean
2520 if self.already_in_regenerate:
2521 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2522 writemsg("!!! Looping in regenerate.\n",1)
2525 self.already_in_regenerate = 1
2528 myincrementals=["USE"]
2530 myincrementals = self.incrementals
2531 myincrementals = set(myincrementals)
2532 # If self.features exists, it has already been stacked and may have
2533 # been mutated, so don't stack it again or else any mutations will be
2535 if "FEATURES" in myincrementals and hasattr(self, "features"):
2536 myincrementals.remove("FEATURES")
2538 if "USE" in myincrementals:
2539 # Process USE last because it depends on USE_EXPAND which is also
2541 myincrementals.remove("USE")
2543 for mykey in myincrementals:
2545 mydbs=self.configlist[:-1]
2549 if mykey not in curdb:
2551 #variables are already expanded
2552 mysplit = curdb[mykey].split()
2556 # "-*" is a special "minus" var that means "unset all settings".
2557 # so USE="-* gnome" will have *just* gnome enabled.
2562 # Not legal. People assume too much. Complain.
2563 writemsg(red("USE flags should not start with a '+': %s\n" % x),
2570 if (x[1:] in myflags):
2572 del myflags[myflags.index(x[1:])]
2575 # We got here, so add it now.
2576 if x not in myflags:
2580 #store setting in last element of configlist, the original environment:
2581 if myflags or mykey in self:
2582 self.configlist[-1][mykey] = " ".join(myflags)
2585 # Do the USE calculation last because it depends on USE_EXPAND.
2586 if "auto" in self["USE_ORDER"].split(":"):
2587 self.configdict["auto"]["USE"] = autouse(
2588 vartree(root=self["ROOT"], categories=self.categories,
2590 use_cache=use_cache, mysettings=self)
2592 self.configdict["auto"]["USE"] = ""
2594 use_expand = self.get("USE_EXPAND", "").split()
2597 for x in self["USE_ORDER"].split(":"):
2598 if x in self.configdict:
2599 self.uvlist.append(self.configdict[x])
2600 self.uvlist.reverse()
2602 # For optimal performance, use slice
2603 # comparison instead of startswith().
2605 for curdb in self.uvlist:
2606 cur_use_expand = [x for x in use_expand if x in curdb]
2607 mysplit = curdb.get("USE", "").split()
2608 if not mysplit and not cur_use_expand:
2616 writemsg(colorize("BAD", "USE flags should not start " + \
2617 "with a '+': %s\n" % x), noiselevel=-1)
2623 myflags.discard(x[1:])
2628 for var in cur_use_expand:
2629 var_lower = var.lower()
2630 is_not_incremental = var not in myincrementals
2631 if is_not_incremental:
2632 prefix = var_lower + "_"
2633 prefix_len = len(prefix)
2634 for x in list(myflags):
2635 if x[:prefix_len] == prefix:
2637 for x in curdb[var].split():
2639 if is_not_incremental:
2640 writemsg(colorize("BAD", "Invalid '+' " + \
2641 "operator in non-incremental variable " + \
2642 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2645 writemsg(colorize("BAD", "Invalid '+' " + \
2646 "operator in incremental variable " + \
2647 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2650 if is_not_incremental:
2651 writemsg(colorize("BAD", "Invalid '-' " + \
2652 "operator in non-incremental variable " + \
2653 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2655 myflags.discard(var_lower + "_" + x[1:])
2657 myflags.add(var_lower + "_" + x)
2659 if not hasattr(self, "features"):
2660 self.features = sorted(set(
2661 self.configlist[-1].get("FEATURES","").split()))
2662 self["FEATURES"] = " ".join(self.features)
2664 myflags.update(self.useforce)
2665 arch = self.configdict["defaults"].get("ARCH")
2669 myflags.difference_update(self.usemask)
2670 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2672 self.already_in_regenerate = 0
2674 def get_virts_p(self, myroot=None):
2677 virts = self.getvirtuals()
2680 vkeysplit = x.split("/")
2681 if vkeysplit[1] not in self.virts_p:
2682 self.virts_p[vkeysplit[1]] = virts[x]
2685 def getvirtuals(self, myroot=None):
2686 """myroot is now ignored because, due to caching, it has always been
2687 broken for all but the first call."""
2688 myroot = self["ROOT"]
2690 return self.virtuals
2693 for x in self.profiles:
2694 virtuals_file = os.path.join(x, "virtuals")
2695 virtuals_dict = grabdict(virtuals_file)
2696 for k in virtuals_dict.keys():
2697 if not isvalidatom(k) or dep_getkey(k) != k:
2698 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2699 (virtuals_file, k), noiselevel=-1)
2700 del virtuals_dict[k]
2702 myvalues = virtuals_dict[k]
2705 if x.startswith("-"):
2706 # allow incrementals
2708 if not isvalidatom(myatom):
2709 writemsg("--- Invalid atom in %s: %s\n" % \
2710 (virtuals_file, x), noiselevel=-1)
2713 del virtuals_dict[k]
2715 virtuals_list.append(virtuals_dict)
2717 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2720 for virt in self.dirVirtuals:
2721 # Preference for virtuals decreases from left to right.
2722 self.dirVirtuals[virt].reverse()
2724 # Repoman does not use user or tree virtuals.
2725 if self.local_config and not self.treeVirtuals:
2726 temp_vartree = vartree(myroot, None,
2727 categories=self.categories, settings=self)
2728 # Reduce the provides into a list by CP.
2729 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2731 self.virtuals = self.__getvirtuals_compile()
2732 return self.virtuals
2734 def __getvirtuals_compile(self):
2735 """Stack installed and profile virtuals. Preference for virtuals
2736 decreases from left to right.
2737 Order of preference:
2738 1. installed and in profile
2743 # Virtuals by profile+tree preferences.
2746 for virt, installed_list in self.treeVirtuals.iteritems():
2747 profile_list = self.dirVirtuals.get(virt, None)
2748 if not profile_list:
2750 for cp in installed_list:
2751 if cp in profile_list:
2752 ptVirtuals.setdefault(virt, [])
2753 ptVirtuals[virt].append(cp)
2755 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2756 self.dirVirtuals, self._depgraphVirtuals])
2759 def __delitem__(self,mykey):
2761 for x in self.lookuplist:
2766 def __getitem__(self,mykey):
2767 for d in self.lookuplist:
2770 return '' # for backward compat, don't raise KeyError
2772 def get(self, k, x=None):
2773 for d in self.lookuplist:
2778 def pop(self, key, *args):
2781 "pop expected at most 2 arguments, got " + \
2782 repr(1 + len(args)))
2784 for d in reversed(self.lookuplist):
2792 def has_key(self,mykey):
2793 warnings.warn("portage.config.has_key() is deprecated, "
2794 "use the in operator instead",
2796 return mykey in self
2798 def __contains__(self, mykey):
2799 """Called to implement membership test operators (in and not in)."""
2800 for d in self.lookuplist:
2805 def setdefault(self, k, x=None):
2818 for d in self.lookuplist:
2825 def iteritems(self):
2830 return list(self.iteritems())
2832 def __setitem__(self,mykey,myvalue):
2833 "set a value; will be thrown away at reset() time"
2834 if not isinstance(myvalue, str):
2835 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2837 self.modifiedkeys += [mykey]
2838 self.configdict["env"][mykey]=myvalue
2841 "return our locally-maintained environment"
2843 environ_filter = self._environ_filter
2845 filter_calling_env = False
2846 temp_dir = self.get("T")
2847 if temp_dir is not None and \
2848 os.path.exists(os.path.join(temp_dir, "environment")):
2849 filter_calling_env = True
2851 environ_whitelist = self._environ_whitelist
2852 env_d = self.configdict["env.d"]
2854 if x in environ_filter:
2857 if not isinstance(myvalue, basestring):
2858 writemsg("!!! Non-string value in config: %s=%s\n" % \
2859 (x, myvalue), noiselevel=-1)
2861 if filter_calling_env and \
2862 x not in environ_whitelist and \
2863 not self._environ_whitelist_re.match(x):
2864 # Do not allow anything to leak into the ebuild
2865 # environment unless it is explicitly whitelisted.
2866 # This ensures that variables unset by the ebuild
2870 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
2871 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2872 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2874 if filter_calling_env:
2875 phase = self.get("EBUILD_PHASE")
2879 whitelist.append("RPMDIR")
2885 # Filtered by IUSE and implicit IUSE.
2886 mydict["USE"] = self.get("PORTAGE_USE", "")
2888 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
2889 # so we have to back it up and restore it.
2890 rootpath = mydict.get("ROOTPATH")
2892 mydict["PORTAGE_ROOTPATH"] = rootpath
2896 def thirdpartymirrors(self):
2897 if getattr(self, "_thirdpartymirrors", None) is None:
2898 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2899 for x in self["PORTDIR_OVERLAY"].split():
2900 profileroots.insert(0, os.path.join(x, "profiles"))
2901 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2902 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2903 return self._thirdpartymirrors
2906 return flatten([[myarch, "~" + myarch] \
2907 for myarch in self["PORTAGE_ARCHLIST"].split()])
2909 def selinux_enabled(self):
2910 if getattr(self, "_selinux_enabled", None) is None:
2911 self._selinux_enabled = 0
2912 if "selinux" in self["USE"].split():
2913 if "selinux" in globals():
2914 if selinux.is_selinux_enabled() == 1:
2915 self._selinux_enabled = 1
2917 self._selinux_enabled = 0
2919 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2921 self._selinux_enabled = 0
2922 if self._selinux_enabled == 0:
2924 del sys.modules["selinux"]
2927 return self._selinux_enabled
2929 def _shell_quote(s):
2931 Quote a string in double-quotes and use backslashes to
2932 escape any backslashes, double-quotes, dollar signs, or
2933 backquotes in the string.
2935 for letter in "\\\"$`":
2937 s = s.replace(letter, "\\" + letter)
2940 # In some cases, openpty can be slow when it fails. Therefore,
2941 # stop trying to use it after the first failure.
2942 _disable_openpty = False
2944 def _create_pty_or_pipe(copy_term_size=None):
2946 Try to create a pty and if then fails then create a normal
2949 @param copy_term_size: If a tty file descriptor is given
2950 then the term size will be copied to the pty.
2951 @type copy_term_size: int
2953 @returns: A tuple of (is_pty, master_fd, slave_fd) where
2954 is_pty is True if a pty was successfully allocated, and
2955 False if a normal pipe was allocated.
2960 global _disable_openpty
2961 if _disable_openpty:
2962 master_fd, slave_fd = os.pipe()
2964 from pty import openpty
2966 master_fd, slave_fd = openpty()
2968 except EnvironmentError, e:
2969 _disable_openpty = True
2970 writemsg("openpty failed: '%s'\n" % str(e),
2973 master_fd, slave_fd = os.pipe()
2976 # Disable post-processing of output since otherwise weird
2977 # things like \n -> \r\n transformations may occur.
2979 mode = termios.tcgetattr(slave_fd)
2980 mode[1] &= ~termios.OPOST
2981 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
2984 copy_term_size is not None and \
2985 os.isatty(copy_term_size):
2986 from portage.output import get_term_size, set_term_size
2987 rows, columns = get_term_size()
2988 set_term_size(rows, columns, slave_fd)
2990 return (got_pty, master_fd, slave_fd)
2992 # XXX This would be to replace getstatusoutput completely.
2993 # XXX Issue: cannot block execution. Deadlock condition.
2994 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
2996 Spawn a subprocess with extra portage-specific options.
2999 Sandbox: Sandbox means the spawned process will be limited in its ability t
3000 read and write files (normally this means it is restricted to ${IMAGE}/)
3001 SElinux Sandbox: Enables sandboxing on SElinux
3002 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3005 Notes: os.system cannot be used because it messes with signal handling. Instead we
3006 use the portage.process spawn* family of functions.
3008 This function waits for the process to terminate.
3010 @param mystring: Command to run
3011 @type mystring: String
3012 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3013 @type mysettings: Dictionary or config instance
3014 @param debug: Ignored
3015 @type debug: Boolean
3016 @param free: Enable sandboxing for this process
3018 @param droppriv: Drop to portage:portage when running this command
3019 @type droppriv: Boolean
3020 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3021 @type sesandbox: Boolean
3022 @param fakeroot: Run this command with faked root privileges
3023 @type fakeroot: Boolean
3024 @param keywords: Extra options encoded as a dict, to be passed to spawn
3025 @type keywords: Dictionary
3028 1. The return code of the spawned process.
3031 if isinstance(mysettings, dict):
3033 keywords["opt_name"]="[ %s ]" % "portage"
3035 check_config_instance(mysettings)
3036 env=mysettings.environ()
3037 keywords["opt_name"]="[%s]" % mysettings["PF"]
3039 fd_pipes = keywords.get("fd_pipes")
3040 if fd_pipes is None:
3042 0:sys.stdin.fileno(),
3043 1:sys.stdout.fileno(),
3044 2:sys.stderr.fileno(),
3046 # In some cases the above print statements don't flush stdout, so
3047 # it needs to be flushed before allowing a child process to use it
3048 # so that output always shows in the correct order.
3049 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3050 for fd in fd_pipes.itervalues():
3051 if fd in stdout_filenos:
3056 # The default policy for the sesandbox domain only allows entry (via exec)
3057 # from shells and from binaries that belong to portage (the number of entry
3058 # points is minimized). The "tee" binary is not among the allowed entry
3059 # points, so it is spawned outside of the sesandbox domain and reads from a
3060 # pseudo-terminal that connects two domains.
3061 logfile = keywords.get("logfile")
3065 fd_pipes_orig = None
3068 del keywords["logfile"]
3069 if 1 not in fd_pipes or 2 not in fd_pipes:
3070 raise ValueError(fd_pipes)
3072 fd_pipes.setdefault(0, sys.stdin.fileno())
3073 fd_pipes_orig = fd_pipes.copy()
3075 got_pty, master_fd, slave_fd = \
3076 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3078 # We must set non-blocking mode before we close the slave_fd
3079 # since otherwise the fcntl call can fail on FreeBSD (the child
3080 # process might have already exited and closed slave_fd so we
3081 # have to keep it open in order to avoid FreeBSD potentially
3082 # generating an EAGAIN exception).
3084 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3085 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3087 fd_pipes[0] = fd_pipes_orig[0]
3088 fd_pipes[1] = slave_fd
3089 fd_pipes[2] = slave_fd
3090 keywords["fd_pipes"] = fd_pipes
3092 features = mysettings.features
3093 # TODO: Enable fakeroot to be used together with droppriv. The
3094 # fake ownership/permissions will have to be converted to real
3095 # permissions in the merge phase.
3096 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3097 if droppriv and not uid and portage_gid and portage_uid:
3098 keywords.update({"uid":portage_uid,"gid":portage_gid,
3099 "groups":userpriv_groups,"umask":002})
3101 free=((droppriv and "usersandbox" not in features) or \
3102 (not droppriv and "sandbox" not in features and \
3103 "usersandbox" not in features and not fakeroot))
3105 if free or "SANDBOX_ACTIVE" in os.environ:
3106 keywords["opt_name"] += " bash"
3107 spawn_func = portage.process.spawn_bash
3109 keywords["opt_name"] += " fakeroot"
3110 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3111 spawn_func = portage.process.spawn_fakeroot
3113 keywords["opt_name"] += " sandbox"
3114 spawn_func = portage.process.spawn_sandbox
3117 con = selinux.getcontext()
3118 con = con.replace(mysettings["PORTAGE_T"],
3119 mysettings["PORTAGE_SANDBOX_T"])
3120 selinux.setexec(con)
3122 returnpid = keywords.get("returnpid")
3123 keywords["returnpid"] = True
3125 mypids.extend(spawn_func(mystring, env=env, **keywords))
3130 selinux.setexec(None)
3136 log_file = open(logfile, 'a')
3137 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
3138 master_file = os.fdopen(master_fd, 'r')
3139 iwtd = [master_file]
3142 import array, select
3146 events = select.select(iwtd, owtd, ewtd)
3148 # Use non-blocking mode to prevent read
3149 # calls from blocking indefinitely.
3150 buf = array.array('B')
3152 buf.fromfile(f, buffsize)
3158 if f is master_file:
3159 buf.tofile(stdout_file)
3161 buf.tofile(log_file)
3167 retval = os.waitpid(pid, 0)[1]
3168 portage.process.spawned_pids.remove(pid)
3169 if retval != os.EX_OK:
3171 return (retval & 0xff) << 8
3175 _userpriv_spawn_kwargs = (
3176 ("uid", portage_uid),
3177 ("gid", portage_gid),
3178 ("groups", userpriv_groups),
3182 def _spawn_fetch(settings, args, **kwargs):
3184 Spawn a process with appropriate settings for fetching, including
3185 userfetch and selinux support.
3188 global _userpriv_spawn_kwargs
3190 # Redirect all output to stdout since some fetchers like
3191 # wget pollute stderr (if portage detects a problem then it
3192 # can send it's own message to stderr).
3193 if "fd_pipes" not in kwargs:
3195 kwargs["fd_pipes"] = {
3196 0 : sys.stdin.fileno(),
3197 1 : sys.stdout.fileno(),
3198 2 : sys.stdout.fileno(),
3201 if "userfetch" in settings.features and \
3202 os.getuid() == 0 and portage_gid and portage_uid:
3203 kwargs.update(_userpriv_spawn_kwargs)
3207 if settings.selinux_enabled():
3208 con = selinux.getcontext()
3209 con = con.replace(settings["PORTAGE_T"], settings["PORTAGE_FETCH_T"])
3210 selinux.setexec(con)
3211 # bash is an allowed entrypoint, while most binaries are not
3212 if args[0] != BASH_BINARY:
3213 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
3215 rval = portage.process.spawn(args,
3216 env=dict(settings.iteritems()), **kwargs)
3219 if settings.selinux_enabled():
3220 selinux.setexec(None)
3224 _userpriv_test_write_file_cache = {}
3225 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
3226 "rm -f %(file_path)s ; exit $rval"
3228 def _userpriv_test_write_file(settings, file_path):
3230 Drop privileges and try to open a file for writing. The file may or
3231 may not exist, and the parent directory is assumed to exist. The file
3232 is removed before returning.
3234 @param settings: A config instance which is passed to _spawn_fetch()
3235 @param file_path: A file path to open and write.
3236 @return: True if write succeeds, False otherwise.
3239 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
3240 rval = _userpriv_test_write_file_cache.get(file_path)
3241 if rval is not None:
3244 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
3245 {"file_path" : _shell_quote(file_path)}]
3247 returncode = _spawn_fetch(settings, args)
3249 rval = returncode == os.EX_OK
3250 _userpriv_test_write_file_cache[file_path] = rval
3253 def _checksum_failure_temp_file(distdir, basename):
3255 First try to find a duplicate temp file with the same checksum and return
3256 that filename if available. Otherwise, use mkstemp to create a new unique
3257 filename._checksum_failure_.$RANDOM, rename the given file, and return the
3258 new filename. In any case, filename will be renamed or removed before this
3259 function returns a temp filename.
3262 filename = os.path.join(distdir, basename)
3263 size = os.stat(filename).st_size
3265 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
3266 for temp_filename in os.listdir(distdir):
3267 if not tempfile_re.match(temp_filename):
3269 temp_filename = os.path.join(distdir, temp_filename)
3271 if size != os.stat(temp_filename).st_size:
3276 temp_checksum = portage.checksum.perform_md5(temp_filename)
3277 except portage.exception.FileNotFound:
3278 # Apparently the temp file disappeared. Let it go.
3280 if checksum is None:
3281 checksum = portage.checksum.perform_md5(filename)
3282 if checksum == temp_checksum:
3284 return temp_filename
3286 from tempfile import mkstemp
3287 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3289 os.rename(filename, temp_filename)
3290 return temp_filename
3292 def _check_digests(filename, digests, show_errors=1):
3294 Check digests and display a message if an error occurs.
3295 @return True if all digests match, False otherwise.
3297 verified_ok, reason = portage.checksum.verify_all(filename, digests)
3300 writemsg("!!! Previously fetched" + \
3301 " file: '%s'\n" % filename, noiselevel=-1)
3302 writemsg("!!! Reason: %s\n" % reason[0],
3304 writemsg(("!!! Got: %s\n" + \
3305 "!!! Expected: %s\n") % \
3306 (reason[1], reason[2]), noiselevel=-1)
3310 def _check_distfile(filename, digests, eout, show_errors=1):
3312 @return a tuple of (match, stat_obj) where match is True if filename
3313 matches all given digests (if any) and stat_obj is a stat result, or
3314 None if the file does not exist.
3318 size = digests.get("size")
3319 if size is not None and len(digests) == 1:
3323 st = os.stat(filename)
3325 return (False, None)
3326 if size is not None and size != st.st_size:
3329 if size is not None:
3330 eout.ebegin("%s %s ;-)" % (os.path.basename(filename), "size"))
3332 elif st.st_size == 0:
3333 # Zero-byte distfiles are always invalid.
3336 if _check_digests(filename, digests, show_errors=show_errors):
3337 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
3338 " ".join(sorted(digests))))
3344 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3346 _size_suffix_map = {
3358 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3359 "fetch files. Will use digest file if available."
3364 features = mysettings.features
3365 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3367 from portage.data import secpass
3368 userfetch = secpass >= 2 and "userfetch" in features
3369 userpriv = secpass >= 2 and "userpriv" in features
3371 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3372 if "mirror" in restrict or \
3373 "nomirror" in restrict:
3374 if ("mirror" in features) and ("lmirror" not in features):
3375 # lmirror should allow you to bypass mirror restrictions.
3376 # XXX: This is not a good thing, and is temporary at best.
3377 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3380 # Generally, downloading the same file repeatedly from
3381 # every single available mirror is a waste of bandwidth
3382 # and time, so there needs to be a cap.
3383 checksum_failure_max_tries = 5
3384 v = checksum_failure_max_tries
3386 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3387 checksum_failure_max_tries))
3388 except (ValueError, OverflowError):
3389 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3390 " contains non-integer value: '%s'\n" % \
3391 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3392 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3393 "default value: %s\n" % checksum_failure_max_tries,
3395 v = checksum_failure_max_tries
3397 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3398 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3399 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3400 "default value: %s\n" % checksum_failure_max_tries,
3402 v = checksum_failure_max_tries
3403 checksum_failure_max_tries = v
3406 fetch_resume_size_default = "350K"
3407 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3408 if fetch_resume_size is not None:
3409 fetch_resume_size = "".join(fetch_resume_size.split())
3410 if not fetch_resume_size:
3411 # If it's undefined or empty, silently use the default.
3412 fetch_resume_size = fetch_resume_size_default
3413 match = _fetch_resume_size_re.match(fetch_resume_size)
3414 if match is None or \
3415 (match.group(2).upper() not in _size_suffix_map):
3416 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3417 " contains an unrecognized format: '%s'\n" % \
3418 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3419 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3420 "default value: %s\n" % fetch_resume_size_default,
3422 fetch_resume_size = None
3423 if fetch_resume_size is None:
3424 fetch_resume_size = fetch_resume_size_default
3425 match = _fetch_resume_size_re.match(fetch_resume_size)
3426 fetch_resume_size = int(match.group(1)) * \
3427 2 ** _size_suffix_map[match.group(2).upper()]
3429 # Behave like the package has RESTRICT="primaryuri" after a
3430 # couple of checksum failures, to increase the probablility
3431 # of success before checksum_failure_max_tries is reached.
3432 checksum_failure_primaryuri = 2
3433 thirdpartymirrors = mysettings.thirdpartymirrors()
3435 # In the background parallel-fetch process, it's safe to skip checksum
3436 # verification of pre-existing files in $DISTDIR that have the correct
3437 # file size. The parent process will verify their checksums prior to
3440 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3441 if parallel_fetchonly:
3444 check_config_instance(mysettings)
3446 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3447 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3451 if listonly or ("distlocks" not in features):
3455 if "skiprocheck" in features:
3458 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3460 writemsg(red("!!! For fetching to a read-only filesystem, " + \
3461 "locking should be turned off.\n"), noiselevel=-1)
3462 writemsg("!!! This can be done by adding -distlocks to " + \
3463 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3466 # local mirrors are always added
3467 if "local" in custommirrors:
3468 mymirrors += custommirrors["local"]
3470 if "nomirror" in restrict or \
3471 "mirror" in restrict:
3472 # We don't add any mirrors.
3476 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3478 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
3479 pkgdir = mysettings.get("O")
3480 if not (pkgdir is None or skip_manifest):
3481 mydigests = Manifest(
3482 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3484 # no digests because fetch was not called for a specific package
3488 ro_distdirs = [x for x in \
3489 shlex.split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
3490 if os.path.isdir(x)]
3493 for x in range(len(mymirrors)-1,-1,-1):
3494 if mymirrors[x] and mymirrors[x][0]=='/':
3495 fsmirrors += [mymirrors[x]]
3498 restrict_fetch = "fetch" in restrict
3499 custom_local_mirrors = custommirrors.get("local", [])
3501 # With fetch restriction, a normal uri may only be fetched from
3502 # custom local mirrors (if available). A mirror:// uri may also
3503 # be fetched from specific mirrors (effectively overriding fetch
3504 # restriction, but only for specific mirrors).
3505 locations = custom_local_mirrors
3507 locations = mymirrors
3509 file_uri_tuples = []
3510 if isinstance(myuris, dict):
3511 for myfile, uri_set in myuris.iteritems():
3512 for myuri in uri_set:
3513 file_uri_tuples.append((myfile, myuri))
3515 for myuri in myuris:
3516 file_uri_tuples.append((os.path.basename(myuri), myuri))
3519 primaryuri_indexes={}
3520 primaryuri_dict = {}
3521 thirdpartymirror_uris = {}
3522 for myfile, myuri in file_uri_tuples:
3523 if myfile not in filedict:
3525 for y in range(0,len(locations)):
3526 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3527 if myuri[:9]=="mirror://":
3528 eidx = myuri.find("/", 9)
3530 mirrorname = myuri[9:eidx]
3531 path = myuri[eidx+1:]
3533 # Try user-defined mirrors first
3534 if mirrorname in custommirrors:
3535 for cmirr in custommirrors[mirrorname]:
3536 filedict[myfile].append(
3537 cmirr.rstrip("/") + "/" + path)
3539 # now try the official mirrors
3540 if mirrorname in thirdpartymirrors:
3541 shuffle(thirdpartymirrors[mirrorname])
3543 uris = [locmirr.rstrip("/") + "/" + path \
3544 for locmirr in thirdpartymirrors[mirrorname]]
3545 filedict[myfile].extend(uris)
3546 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
3548 if not filedict[myfile]:
3549 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3551 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3552 writemsg(" %s\n" % (myuri), noiselevel=-1)
3555 # Only fetch from specific mirrors is allowed.
3557 if "primaryuri" in restrict:
3558 # Use the source site first.
3559 if myfile in primaryuri_indexes:
3560 primaryuri_indexes[myfile] += 1
3562 primaryuri_indexes[myfile] = 0
3563 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3565 filedict[myfile].append(myuri)
3566 primaryuris = primaryuri_dict.get(myfile)
3567 if primaryuris is None:
3569 primaryuri_dict[myfile] = primaryuris
3570 primaryuris.append(myuri)
3572 # Prefer thirdpartymirrors over normal mirrors in cases when
3573 # the file does not yet exist on the normal mirrors.
3574 for myfile, uris in thirdpartymirror_uris.iteritems():
3575 primaryuri_dict.setdefault(myfile, []).extend(uris)
3582 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3583 if not mysettings.get(var_name, None):
3586 if can_fetch and not fetch_to_ro:
3587 global _userpriv_test_write_file_cache
3591 dir_gid = portage_gid
3592 if "FAKED_MODE" in mysettings:
3593 # When inside fakeroot, directories with portage's gid appear
3594 # to have root's gid. Therefore, use root's gid instead of
3595 # portage's gid to avoid spurrious permissions adjustments
3596 # when inside fakeroot.
3599 if "distlocks" in features:
3600 distdir_dirs.append(".locks")
3603 for x in distdir_dirs:
3604 mydir = os.path.join(mysettings["DISTDIR"], x)
3605 write_test_file = os.path.join(
3606 mydir, ".__portage_test_write__")
3613 if st is not None and stat.S_ISDIR(st.st_mode):
3614 if not (userfetch or userpriv):
3616 if _userpriv_test_write_file(mysettings, write_test_file):
3619 _userpriv_test_write_file_cache.pop(write_test_file, None)
3620 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
3622 # The directory has just been created
3623 # and therefore it must be empty.
3625 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3628 raise # bail out on the first error that occurs during recursion
3629 if not apply_recursive_permissions(mydir,
3630 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
3631 filemode=filemode, filemask=modemask, onerror=onerror):
3632 raise portage.exception.OperationNotPermitted(
3633 "Failed to apply recursive permissions for the portage group.")
3634 except portage.exception.PortageException, e:
3635 if not os.path.isdir(mysettings["DISTDIR"]):
3636 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3637 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3638 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3641 not fetch_to_ro and \
3642 not os.access(mysettings["DISTDIR"], os.W_OK):
3643 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3647 if can_fetch and use_locks and locks_in_subdir:
3648 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3649 if not os.access(distlocks_subdir, os.W_OK):
3650 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3653 del distlocks_subdir
3655 distdir_writable = can_fetch and not fetch_to_ro
3656 failed_files = set()
3657 restrict_fetch_msg = False
3659 for myfile in filedict:
3663 1 partially downloaded
3664 2 completely downloaded
3668 orig_digests = mydigests.get(myfile, {})
3669 size = orig_digests.get("size")
3671 # Zero-byte distfiles are always invalid, so discard their digests.
3672 del mydigests[myfile]
3673 orig_digests.clear()
3675 pruned_digests = orig_digests
3676 if parallel_fetchonly:
3678 if size is not None:
3679 pruned_digests["size"] = size
3681 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3685 writemsg_stdout("\n", noiselevel=-1)
3687 # check if there is enough space in DISTDIR to completely store myfile
3688 # overestimate the filesize so we aren't bitten by FS overhead
3689 if size is not None and hasattr(os, "statvfs"):
3690 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3692 mysize = os.stat(myfile_path).st_size
3694 if e.errno != errno.ENOENT:
3698 if (size - mysize + vfs_stat.f_bsize) >= \
3699 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3700 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3703 if distdir_writable and use_locks:
3705 if not parallel_fetchonly and "parallel-fetch" in features:
3706 waiting_msg = ("Fetching '%s' " + \
3707 "in the background. " + \
3708 "To view fetch progress, run `tail -f " + \
3709 "/var/log/emerge-fetch.log` in another " + \
3710 "terminal.") % myfile
3711 msg_prefix = colorize("GOOD", " * ")
3712 from textwrap import wrap
3713 waiting_msg = "\n".join(msg_prefix + line \
3714 for line in wrap(waiting_msg, 65))
3717 lock_file = os.path.join(mysettings["DISTDIR"],
3718 locks_in_subdir, myfile)
3720 lock_file = myfile_path
3724 lock_kwargs["flags"] = os.O_NONBLOCK
3726 lock_kwargs["waiting_msg"] = waiting_msg
3729 file_lock = portage.locks.lockfile(myfile_path,
3730 wantnewlockfile=1, **lock_kwargs)
3731 except portage.exception.TryAgain:
3732 writemsg((">>> File '%s' is already locked by " + \
3733 "another fetcher. Continuing...\n") % myfile,
3739 eout = portage.output.EOutput()
3740 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
3741 match, mystat = _check_distfile(
3742 myfile_path, pruned_digests, eout)
3744 if distdir_writable:
3746 apply_secpass_permissions(myfile_path,
3747 gid=portage_gid, mode=0664, mask=02,
3749 except portage.exception.PortageException, e:
3750 if not os.access(myfile_path, os.R_OK):
3751 writemsg("!!! Failed to adjust permissions:" + \
3752 " %s\n" % str(e), noiselevel=-1)
3756 if distdir_writable and mystat is None:
3757 # Remove broken symlinks if necessary.
3759 os.unlink(myfile_path)
3763 if mystat is not None:
3764 if mystat.st_size == 0:
3765 if distdir_writable:
3767 os.unlink(myfile_path)
3770 elif distdir_writable:
3771 if mystat.st_size < fetch_resume_size and \
3772 mystat.st_size < size:
3773 writemsg((">>> Deleting distfile with size " + \
3774 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3775 "ME_MIN_SIZE)\n") % mystat.st_size)
3777 os.unlink(myfile_path)
3779 if e.errno != errno.ENOENT:
3782 elif mystat.st_size >= size:
3784 _checksum_failure_temp_file(
3785 mysettings["DISTDIR"], myfile)
3786 writemsg_stdout("Refetching... " + \
3787 "File renamed to '%s'\n\n" % \
3788 temp_filename, noiselevel=-1)
3790 if distdir_writable and ro_distdirs:
3791 readonly_file = None
3792 for x in ro_distdirs:
3793 filename = os.path.join(x, myfile)
3794 match, mystat = _check_distfile(
3795 filename, pruned_digests, eout)
3797 readonly_file = filename
3799 if readonly_file is not None:
3801 os.unlink(myfile_path)
3803 if e.errno != errno.ENOENT:
3806 os.symlink(readonly_file, myfile_path)
3809 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3810 for mydir in fsmirrors:
3811 mirror_file = os.path.join(mydir, myfile)
3813 shutil.copyfile(mirror_file, myfile_path)
3814 writemsg(_("Local mirror has file:" + \
3815 " %(file)s\n" % {"file":myfile}))
3817 except (IOError, OSError), e:
3818 if e.errno != errno.ENOENT:
3823 mystat = os.stat(myfile_path)
3825 if e.errno != errno.ENOENT:
3830 apply_secpass_permissions(
3831 myfile_path, gid=portage_gid, mode=0664, mask=02,
3833 except portage.exception.PortageException, e:
3834 if not os.access(myfile_path, os.R_OK):
3835 writemsg("!!! Failed to adjust permissions:" + \
3836 " %s\n" % str(e), noiselevel=-1)
3838 # If the file is empty then it's obviously invalid. Remove
3839 # the empty file and try to download if possible.
3840 if mystat.st_size == 0:
3841 if distdir_writable:
3843 os.unlink(myfile_path)
3844 except EnvironmentError:
3846 elif myfile not in mydigests:
3847 # We don't have a digest, but the file exists. We must
3848 # assume that it is fully downloaded.
3851 if mystat.st_size < mydigests[myfile]["size"] and \
3853 fetched = 1 # Try to resume this download.
3854 elif parallel_fetchonly and \
3855 mystat.st_size == mydigests[myfile]["size"]:
3856 eout = portage.output.EOutput()
3858 mysettings.get("PORTAGE_QUIET") == "1"
3860 "%s size ;-)" % (myfile, ))
3864 verified_ok, reason = portage.checksum.verify_all(
3865 myfile_path, mydigests[myfile])
3867 writemsg("!!! Previously fetched" + \
3868 " file: '%s'\n" % myfile, noiselevel=-1)
3869 writemsg("!!! Reason: %s\n" % reason[0],
3871 writemsg(("!!! Got: %s\n" + \
3872 "!!! Expected: %s\n") % \
3873 (reason[1], reason[2]), noiselevel=-1)
3874 if reason[0] == "Insufficient data for checksum verification":
3876 if distdir_writable:
3878 _checksum_failure_temp_file(
3879 mysettings["DISTDIR"], myfile)
3880 writemsg_stdout("Refetching... " + \
3881 "File renamed to '%s'\n\n" % \
3882 temp_filename, noiselevel=-1)
3884 eout = portage.output.EOutput()
3886 mysettings.get("PORTAGE_QUIET", None) == "1"
3887 digests = mydigests.get(myfile)
3889 digests = digests.keys()
3892 "%s %s ;-)" % (myfile, " ".join(digests)))
3894 continue # fetch any remaining files
3896 # Create a reversed list since that is optimal for list.pop().
3897 uri_list = filedict[myfile][:]
3899 checksum_failure_count = 0
3900 tried_locations = set()
3902 loc = uri_list.pop()
3903 # Eliminate duplicates here in case we've switched to
3904 # "primaryuri" mode on the fly due to a checksum failure.
3905 if loc in tried_locations:
3907 tried_locations.add(loc)
3909 writemsg_stdout(loc+" ", noiselevel=-1)
3911 # allow different fetchcommands per protocol
3912 protocol = loc[0:loc.find("://")]
3913 if "FETCHCOMMAND_" + protocol.upper() in mysettings:
3914 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
3916 fetchcommand=mysettings["FETCHCOMMAND"]
3917 if "RESUMECOMMAND_" + protocol.upper() in mysettings:
3918 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
3920 resumecommand=mysettings["RESUMECOMMAND"]
3925 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
3928 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
3930 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3931 if not mysettings.get(var_name, None):
3932 writemsg(("!!! %s is unset. It should " + \
3933 "have been defined in /etc/make.globals.\n") \
3934 % var_name, noiselevel=-1)
3939 if fetched != 2 and has_space:
3940 #we either need to resume or start the download
3943 mystat = os.stat(myfile_path)
3945 if e.errno != errno.ENOENT:
3950 if mystat.st_size < fetch_resume_size:
3951 writemsg((">>> Deleting distfile with size " + \
3952 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3953 "ME_MIN_SIZE)\n") % mystat.st_size)
3955 os.unlink(myfile_path)
3957 if e.errno != errno.ENOENT:
3963 writemsg(">>> Resuming download...\n")
3964 locfetch=resumecommand
3967 locfetch=fetchcommand
3968 writemsg_stdout(">>> Downloading '%s'\n" % \
3969 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
3971 "DISTDIR": mysettings["DISTDIR"],
3975 import shlex, StringIO
3976 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
3977 lexer.whitespace_split = True
3978 myfetch = [varexpand(x, mydict=variables) for x in lexer]
3982 myret = _spawn_fetch(mysettings, myfetch)
3986 apply_secpass_permissions(myfile_path,
3987 gid=portage_gid, mode=0664, mask=02)
3988 except portage.exception.FileNotFound, e:
3990 except portage.exception.PortageException, e:
3991 if not os.access(myfile_path, os.R_OK):
3992 writemsg("!!! Failed to adjust permissions:" + \
3993 " %s\n" % str(e), noiselevel=-1)
3995 # If the file is empty then it's obviously invalid. Don't
3996 # trust the return value from the fetcher. Remove the
3997 # empty file and try to download again.
3999 if os.stat(myfile_path).st_size == 0:
4000 os.unlink(myfile_path)
4003 except EnvironmentError:
4006 if mydigests is not None and myfile in mydigests:
4008 mystat = os.stat(myfile_path)
4010 if e.errno != errno.ENOENT:
4015 # no exception? file exists. let digestcheck() report
4016 # an appropriately for size or checksum errors
4018 # If the fetcher reported success and the file is
4019 # too small, it's probably because the digest is
4020 # bad (upstream changed the distfile). In this
4021 # case we don't want to attempt to resume. Show a
4022 # digest verification failure to that the user gets
4023 # a clue about what just happened.
4024 if myret != os.EX_OK and \
4025 mystat.st_size < mydigests[myfile]["size"]:
4026 # Fetch failed... Try the next one... Kill 404 files though.
4027 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4028 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4029 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
4031 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4032 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
4035 except (IOError, OSError):
4040 # File is the correct size--check the checksums for the fetched
4041 # file NOW, for those users who don't have a stable/continuous
4042 # net connection. This way we have a chance to try to download
4043 # from another mirror...
4044 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4047 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
4049 writemsg("!!! Reason: "+reason[0]+"\n",
4051 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
4052 (reason[1], reason[2]), noiselevel=-1)
4053 if reason[0] == "Insufficient data for checksum verification":
4056 _checksum_failure_temp_file(
4057 mysettings["DISTDIR"], myfile)
4058 writemsg_stdout("Refetching... " + \
4059 "File renamed to '%s'\n\n" % \
4060 temp_filename, noiselevel=-1)
4062 checksum_failure_count += 1
4063 if checksum_failure_count == \
4064 checksum_failure_primaryuri:
4065 # Switch to "primaryuri" mode in order
4066 # to increase the probablility of
4069 primaryuri_dict.get(myfile)
4072 reversed(primaryuris))
4073 if checksum_failure_count >= \
4074 checksum_failure_max_tries:
4077 eout = portage.output.EOutput()
4078 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4079 digests = mydigests.get(myfile)
4081 eout.ebegin("%s %s ;-)" % \
4082 (myfile, " ".join(sorted(digests))))
4090 elif mydigests!=None:
4091 writemsg("No digest file available and download failed.\n\n",
4094 if use_locks and file_lock:
4095 portage.locks.unlockfile(file_lock)
4098 writemsg_stdout("\n", noiselevel=-1)
4100 if restrict_fetch and not restrict_fetch_msg:
4101 restrict_fetch_msg = True
4102 msg = ("\n!!! %s/%s" + \
4103 " has fetch restriction turned on.\n" + \
4104 "!!! This probably means that this " + \
4105 "ebuild's files must be downloaded\n" + \
4106 "!!! manually. See the comments in" + \
4107 " the ebuild for more information.\n\n") % \
4108 (mysettings["CATEGORY"], mysettings["PF"])
4109 portage.util.writemsg_level(msg,
4110 level=logging.ERROR, noiselevel=-1)
4111 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
4112 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
4113 if not parallel_fetchonly and have_builddir:
4114 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
4115 # ensuring sane $PWD (bug #239560) and storing elog
4116 # messages. Therefore, calling code needs to ensure that
4117 # PORTAGE_BUILDDIR is already clean and locked here.
4119 # All the pkg_nofetch goes to stderr since it's considered
4120 # to be an error message.
4122 0 : sys.stdin.fileno(),
4123 1 : sys.stderr.fileno(),
4124 2 : sys.stderr.fileno(),
4127 ebuild_phase = mysettings.get("EBUILD_PHASE")
4129 mysettings["EBUILD_PHASE"] = "nofetch"
4130 spawn(_shell_quote(EBUILD_SH_BINARY) + \
4131 " nofetch", mysettings, fd_pipes=fd_pipes)
4133 if ebuild_phase is None:
4134 mysettings.pop("EBUILD_PHASE", None)
4136 mysettings["EBUILD_PHASE"] = ebuild_phase
4138 elif restrict_fetch:
4142 elif not filedict[myfile]:
4143 writemsg("Warning: No mirrors available for file" + \
4144 " '%s'\n" % (myfile), noiselevel=-1)
4146 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
4152 failed_files.add(myfile)
4159 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
4161 Generates a digest file if missing. Assumes all files are available.
4162 DEPRECATED: this now only is a compability wrapper for
4163 portage.manifest.Manifest()
4164 NOTE: manifestonly and overwrite are useless with manifest2 and
4165 are therefore ignored."""
4166 if myportdb is None:
4167 writemsg("Warning: myportdb not specified to digestgen\n")
4170 global _doebuild_manifest_exempt_depend
4172 _doebuild_manifest_exempt_depend += 1
4174 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
4175 for cpv in fetchlist_dict:
4177 for myfile in fetchlist_dict[cpv]:
4178 distfiles_map.setdefault(myfile, []).append(cpv)
4179 except portage.exception.InvalidDependString, e:
4180 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4183 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
4184 manifest1_compat = False
4185 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
4186 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
4187 # Don't require all hashes since that can trigger excessive
4188 # fetches when sufficient digests already exist. To ease transition
4189 # while Manifest 1 is being removed, only require hashes that will
4190 # exist before and after the transition.
4191 required_hash_types = set()
4192 required_hash_types.add("size")
4193 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4194 dist_hashes = mf.fhashdict.get("DIST", {})
4195 missing_hashes = set()
4196 for myfile in distfiles_map:
4197 myhashes = dist_hashes.get(myfile)
4199 missing_hashes.add(myfile)
4201 if required_hash_types.difference(myhashes):
4202 missing_hashes.add(myfile)
4204 if myhashes["size"] == 0:
4205 missing_hashes.add(myfile)
4208 for myfile in missing_hashes:
4210 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4212 if e.errno != errno.ENOENT:
4215 missing_files.append(myfile)
4217 # If the file is empty then it's obviously invalid.
4219 missing_files.append(myfile)
4221 mytree = os.path.realpath(os.path.dirname(
4222 os.path.dirname(mysettings["O"])))
4223 fetch_settings = config(clone=mysettings)
4224 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4225 for myfile in missing_files:
4227 for cpv in distfiles_map[myfile]:
4228 myebuild = os.path.join(mysettings["O"],
4229 catsplit(cpv)[1] + ".ebuild")
4230 # for RESTRICT=fetch, mirror, etc...
4231 doebuild_environment(myebuild, "fetch",
4232 mysettings["ROOT"], fetch_settings,
4234 uri_map = myportdb.getFetchMap(cpv, mytree=mytree)
4235 myuris = {myfile:uri_map[myfile]}
4236 fetch_settings["A"] = myfile # for use by pkg_nofetch()
4237 if fetch(myuris, fetch_settings):
4241 writemsg(("!!! File %s doesn't exist, can't update " + \
4242 "Manifest\n") % myfile, noiselevel=-1)
4244 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
4246 mf.create(requiredDistfiles=myarchives,
4247 assumeDistHashesSometimes=True,
4248 assumeDistHashesAlways=(
4249 "assume-digests" in mysettings.features))
4250 except portage.exception.FileNotFound, e:
4251 writemsg(("!!! File %s doesn't exist, can't update " + \
4252 "Manifest\n") % e, noiselevel=-1)
4254 except portage.exception.PortagePackageException, e:
4255 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
4258 mf.write(sign=False)
4259 except portage.exception.PermissionDenied, e:
4260 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
4262 if "assume-digests" not in mysettings.features:
4263 distlist = mf.fhashdict.get("DIST", {}).keys()
4266 for filename in distlist:
4267 if not os.path.exists(
4268 os.path.join(mysettings["DISTDIR"], filename)):
4269 auto_assumed.append(filename)
4271 mytree = os.path.realpath(
4272 os.path.dirname(os.path.dirname(mysettings["O"])))
4273 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
4274 pkgs = myportdb.cp_list(cp, mytree=mytree)
4276 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
4277 str(len(auto_assumed)).rjust(18)) + "\n")
4278 for pkg_key in pkgs:
4279 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
4280 pv = pkg_key.split("/")[1]
4281 for filename in auto_assumed:
4282 if filename in fetchlist:
4284 " %s::%s\n" % (pv, filename))
4287 _doebuild_manifest_exempt_depend -= 1
4289 def digestParseFile(myfilename, mysettings=None):
4290 """(filename) -- Parses a given file for entries matching:
4291 <checksumkey> <checksum_hex_string> <filename> <filesize>
4292 Ignores lines that don't start with a valid checksum identifier
4293 and returns a dict with the filenames as keys and {checksumkey:checksum}
4295 DEPRECATED: this function is now only a compability wrapper for
4296 portage.manifest.Manifest()."""
4298 mysplit = myfilename.split(os.sep)
4299 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
4300 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
4301 elif mysplit[-1] == "Manifest":
4302 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
4304 if mysettings is None:
4306 mysettings = config(clone=settings)
4308 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
4310 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
4311 """Verifies checksums. Assumes all files have been downloaded.
4312 DEPRECATED: this is now only a compability wrapper for
4313 portage.manifest.Manifest()."""
4314 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4316 pkgdir = mysettings["O"]
4317 manifest_path = os.path.join(pkgdir, "Manifest")
4318 if not os.path.exists(manifest_path):
4319 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4325 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4326 manifest_empty = True
4327 for d in mf.fhashdict.itervalues():
4329 manifest_empty = False
4332 writemsg("!!! Manifest is empty: '%s'\n" % manifest_path,
4338 eout = portage.output.EOutput()
4339 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4341 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
4342 eout.ebegin("checking ebuild checksums ;-)")
4343 mf.checkTypeHashes("EBUILD")
4345 eout.ebegin("checking auxfile checksums ;-)")
4346 mf.checkTypeHashes("AUX")
4348 eout.ebegin("checking miscfile checksums ;-)")
4349 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4352 eout.ebegin("checking %s ;-)" % f)
4353 mf.checkFileHashes(mf.findFile(f), f)
4357 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
4359 except portage.exception.FileNotFound, e:
4361 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
4364 except portage.exception.DigestException, e:
4366 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
4367 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4368 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4369 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4370 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4372 # Make sure that all of the ebuilds are actually listed in the Manifest.
4373 for f in os.listdir(pkgdir):
4374 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4375 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4376 os.path.join(pkgdir, f), noiselevel=-1)
4379 """ epatch will just grab all the patches out of a directory, so we have to
4380 make sure there aren't any foreign files that it might grab."""
4381 filesdir = os.path.join(pkgdir, "files")
4382 for parent, dirs, files in os.walk(filesdir):
4384 if d.startswith(".") or d == "CVS":
4387 if f.startswith("."):
4389 f = os.path.join(parent, f)[len(filesdir) + 1:]
4390 file_type = mf.findFile(f)
4391 if file_type != "AUX" and not f.startswith("digest-"):
4392 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4393 os.path.join(filesdir, f), noiselevel=-1)
4398 # parse actionmap to spawn ebuild with the appropriate args
4399 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
4400 logfile=None, fd_pipes=None, returnpid=False):
4401 if not returnpid and \
4402 (alwaysdep or "noauto" not in mysettings.features):
4403 # process dependency first
4404 if "dep" in actionmap[mydo]:
4405 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
4406 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
4407 fd_pipes=fd_pipes, returnpid=returnpid)
4411 eapi = mysettings["EAPI"]
4413 if mydo == "configure" and eapi in ("0", "1", "2_pre1"):
4416 if mydo == "prepare" and eapi in ("0", "1", "2_pre1", "2_pre2"):
4419 kwargs = actionmap[mydo]["args"]
4420 mysettings["EBUILD_PHASE"] = mydo
4421 _doebuild_exit_status_unlink(
4422 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4425 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
4426 mysettings, debug=debug, logfile=logfile,
4427 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
4429 mysettings["EBUILD_PHASE"] = ""
4433 msg = _doebuild_exit_status_check(mydo, mysettings)
4436 from textwrap import wrap
4437 from portage.elog.messages import eerror
4438 for l in wrap(msg, 72):
4439 eerror(l, phase=mydo, key=mysettings.mycpv)
4441 _post_phase_userpriv_perms(mysettings)
4442 if mydo == "install":
4443 _check_build_log(mysettings)
4444 if phase_retval == os.EX_OK:
4445 phase_retval = _post_src_install_checks(mysettings)
4448 _post_phase_cmds = {
4452 "install_symlink_html_docs"],
4457 "preinst_selinux_labels",
4458 "preinst_suid_scan",
4462 "postinst_bsdflags"]
4465 def _post_phase_userpriv_perms(mysettings):
4466 if "userpriv" in mysettings.features and secpass >= 2:
4467 """ Privileged phases may have left files that need to be made
4468 writable to a less privileged user."""
4469 apply_recursive_permissions(mysettings["T"],
4470 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4471 filemode=060, filemask=0)
4473 def _post_src_install_checks(mysettings):
4474 _post_src_install_uid_fix(mysettings)
4475 global _post_phase_cmds
4476 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
4477 if retval != os.EX_OK:
4478 writemsg("!!! install_qa_check failed; exiting.\n",
4482 def _check_build_log(mysettings, out=None):
4484 Search the content of $PORTAGE_LOG_FILE if it exists
4485 and generate the following QA Notices when appropriate:
4487 * Automake "maintainer mode"
4489 * Unrecognized configure options
4491 logfile = mysettings.get("PORTAGE_LOG_FILE")
4495 f = open(logfile, 'rb')
4496 except EnvironmentError:
4499 am_maintainer_mode = []
4500 bash_command_not_found = []
4501 bash_command_not_found_re = re.compile(
4502 r'(.*): line (\d*): (.*): command not found$')
4503 command_not_found_exclude_re = re.compile(r'/configure: line ')
4504 helper_missing_file = []
4505 helper_missing_file_re = re.compile(
4506 r'^!!! (do|new).*: .* does not exist$')
4508 configure_opts_warn = []
4509 configure_opts_warn_re = re.compile(
4510 r'^configure: WARNING: Unrecognized options: .*')
4511 am_maintainer_mode_re = re.compile(r'.*/missing --run .*')
4512 am_maintainer_mode_exclude_re = \
4513 re.compile(r'.*/missing --run (autoheader|makeinfo)')
4515 make_jobserver_re = \
4516 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
4521 if am_maintainer_mode_re.search(line) is not None and \
4522 am_maintainer_mode_exclude_re.search(line) is None:
4523 am_maintainer_mode.append(line.rstrip("\n"))
4525 if bash_command_not_found_re.match(line) is not None and \
4526 command_not_found_exclude_re.search(line) is None:
4527 bash_command_not_found.append(line.rstrip("\n"))
4529 if helper_missing_file_re.match(line) is not None:
4530 helper_missing_file.append(line.rstrip("\n"))
4532 if configure_opts_warn_re.match(line) is not None:
4533 configure_opts_warn.append(line.rstrip("\n"))
4535 if make_jobserver_re.match(line) is not None:
4536 make_jobserver.append(line.rstrip("\n"))
4541 from portage.elog.messages import eqawarn
4542 def _eqawarn(lines):
4544 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
4545 from textwrap import wrap
4548 if am_maintainer_mode:
4549 msg = ["QA Notice: Automake \"maintainer mode\" detected:"]
4551 msg.extend("\t" + line for line in am_maintainer_mode)
4554 "If you patch Makefile.am, " + \
4555 "configure.in, or configure.ac then you " + \
4556 "should use autotools.eclass and " + \
4557 "eautomake or eautoreconf. Exceptions " + \
4558 "are limited to system packages " + \
4559 "for which it is impossible to run " + \
4560 "autotools during stage building. " + \
4561 "See http://www.gentoo.org/p" + \
4562 "roj/en/qa/autofailure.xml for more information.",
4566 if bash_command_not_found:
4567 msg = ["QA Notice: command not found:"]
4569 msg.extend("\t" + line for line in bash_command_not_found)
4572 if helper_missing_file:
4573 msg = ["QA Notice: file does not exist:"]
4575 msg.extend("\t" + line[4:] for line in helper_missing_file)
4578 if configure_opts_warn:
4579 msg = ["QA Notice: Unrecognized configure options:"]
4581 msg.extend("\t" + line for line in configure_opts_warn)
4585 msg = ["QA Notice: make jobserver unavailable:"]
4587 msg.extend("\t" + line for line in make_jobserver)
4590 def _post_src_install_uid_fix(mysettings):
4592 Files in $D with user and group bits that match the "portage"
4593 user or group are automatically mapped to PORTAGE_INST_UID and
4594 PORTAGE_INST_GID if necessary. The chown system call may clear
4595 S_ISUID and S_ISGID bits, so those bits are restored if
4598 inst_uid = int(mysettings["PORTAGE_INST_UID"])
4599 inst_gid = int(mysettings["PORTAGE_INST_GID"])
4600 for parent, dirs, files in os.walk(mysettings["D"]):
4601 for fname in chain(dirs, files):
4602 fpath = os.path.join(parent, fname)
4603 mystat = os.lstat(fpath)
4604 if mystat.st_uid != portage_uid and \
4605 mystat.st_gid != portage_gid:
4609 if mystat.st_uid == portage_uid:
4611 if mystat.st_gid == portage_gid:
4613 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
4614 mode=mystat.st_mode, stat_cached=mystat,
4617 def _post_pkg_preinst_cmd(mysettings):
4619 Post phase logic and tasks that have been factored out of
4620 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4621 can be used to wipe out any gmon.out files created during
4622 previous functions (in case any tools were built with -pg
4626 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4627 misc_sh_binary = os.path.join(portage_bin_path,
4628 os.path.basename(MISC_SH_BINARY))
4630 mysettings["EBUILD_PHASE"] = ""
4631 global _post_phase_cmds
4632 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
4636 def _post_pkg_postinst_cmd(mysettings):
4638 Post phase logic and tasks that have been factored out of
4642 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4643 misc_sh_binary = os.path.join(portage_bin_path,
4644 os.path.basename(MISC_SH_BINARY))
4646 mysettings["EBUILD_PHASE"] = ""
4647 global _post_phase_cmds
4648 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
4652 def _spawn_misc_sh(mysettings, commands, **kwargs):
4654 @param mysettings: the ebuild config
4655 @type mysettings: config
4656 @param commands: a list of function names to call in misc-functions.sh
4657 @type commands: list
4659 @returns: the return value from the spawn() call
4662 # Note: PORTAGE_BIN_PATH may differ from the global
4663 # constant when portage is reinstalling itself.
4664 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4665 misc_sh_binary = os.path.join(portage_bin_path,
4666 os.path.basename(MISC_SH_BINARY))
4667 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
4668 _doebuild_exit_status_unlink(
4669 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4670 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4671 logfile = mysettings.get("PORTAGE_LOG_FILE")
4672 mydo = mysettings["EBUILD_PHASE"]
4674 rval = spawn(mycommand, mysettings, debug=debug,
4675 logfile=logfile, **kwargs)
4678 msg = _doebuild_exit_status_check(mydo, mysettings)
4681 from textwrap import wrap
4682 from portage.elog.messages import eerror
4683 for l in wrap(msg, 72):
4684 eerror(l, phase=mydo, key=mysettings.mycpv)
4687 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
4689 def _eapi_is_deprecated(eapi):
4690 return eapi in _deprecated_eapis
4692 def eapi_is_supported(eapi):
4693 eapi = str(eapi).strip()
4695 if _eapi_is_deprecated(eapi):
4704 return eapi <= portage.const.EAPI
4706 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
4708 ebuild_path = os.path.abspath(myebuild)
4709 pkg_dir = os.path.dirname(ebuild_path)
4711 if "CATEGORY" in mysettings.configdict["pkg"]:
4712 cat = mysettings.configdict["pkg"]["CATEGORY"]
4714 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
4715 mypv = os.path.basename(ebuild_path)[:-7]
4716 mycpv = cat+"/"+mypv
4717 mysplit=pkgsplit(mypv,silent=0)
4719 raise portage.exception.IncorrectParameter(
4720 "Invalid ebuild path: '%s'" % myebuild)
4722 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
4723 # so that the caller can override it.
4724 tmpdir = mysettings["PORTAGE_TMPDIR"]
4726 if mydo != "depend" and mycpv != mysettings.mycpv:
4727 """For performance reasons, setcpv only triggers reset when it
4728 detects a package-specific change in config. For the ebuild
4729 environment, a reset call is forced in order to ensure that the
4730 latest env.d variables are used."""
4732 mysettings.reset(use_cache=use_cache)
4733 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
4735 # config.reset() might have reverted a change made by the caller,
4736 # so restore it to it's original value.
4737 mysettings["PORTAGE_TMPDIR"] = tmpdir
4739 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
4740 mysettings["EBUILD_PHASE"] = mydo
4742 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
4744 # We are disabling user-specific bashrc files.
4745 mysettings["BASH_ENV"] = INVALID_ENV_FILE
4747 if debug: # Otherwise it overrides emerge's settings.
4748 # We have no other way to set debug... debug can't be passed in
4749 # due to how it's coded... Don't overwrite this so we can use it.
4750 mysettings["PORTAGE_DEBUG"] = "1"
4752 mysettings["ROOT"] = myroot
4753 mysettings["STARTDIR"] = getcwd()
4755 mysettings["PORTAGE_REPO_NAME"] = ""
4756 # bindbapi has no getRepositories() method
4757 if mydbapi and hasattr(mydbapi, "getRepositories"):
4758 # do we have a origin repository name for the current package
4759 repopath = os.sep.join(pkg_dir.split(os.path.sep)[:-2])
4760 for reponame in mydbapi.getRepositories():
4761 if mydbapi.getRepositoryPath(reponame) == repopath:
4762 mysettings["PORTAGE_REPO_NAME"] = reponame
4765 mysettings["EBUILD"] = ebuild_path
4766 mysettings["O"] = pkg_dir
4767 mysettings.configdict["pkg"]["CATEGORY"] = cat
4768 mysettings["FILESDIR"] = pkg_dir+"/files"
4769 mysettings["PF"] = mypv
4771 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
4772 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
4773 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
4775 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
4776 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
4778 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
4779 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
4780 mysettings["PN"] = mysplit[0]
4781 mysettings["PV"] = mysplit[1]
4782 mysettings["PR"] = mysplit[2]
4784 if portage.util.noiselimit < 0:
4785 mysettings["PORTAGE_QUIET"] = "1"
4787 if mydo != "depend":
4788 # Metadata vars such as EAPI and RESTRICT are
4789 # set by the above config.setcpv() call.
4790 eapi = mysettings["EAPI"]
4791 if not eapi_is_supported(eapi):
4792 # can't do anything with this.
4793 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
4795 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
4796 portage.dep.use_reduce(portage.dep.paren_reduce(
4797 mysettings["RESTRICT"]),
4798 uselist=mysettings["PORTAGE_USE"].split())))
4799 except portage.exception.InvalidDependString:
4800 # RESTRICT is validated again inside doebuild, so let this go
4801 mysettings["PORTAGE_RESTRICT"] = ""
4803 if mysplit[2] == "r0":
4804 mysettings["PVR"]=mysplit[1]
4806 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
4808 if "PATH" in mysettings:
4809 mysplit=mysettings["PATH"].split(":")
4812 # Note: PORTAGE_BIN_PATH may differ from the global constant
4813 # when portage is reinstalling itself.
4814 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4815 if portage_bin_path not in mysplit:
4816 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
4818 # Sandbox needs cannonical paths.
4819 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
4820 mysettings["PORTAGE_TMPDIR"])
4821 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
4822 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
4824 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
4825 # locations in order to prevent interference.
4826 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
4827 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4828 mysettings["PKG_TMPDIR"],
4829 mysettings["CATEGORY"], mysettings["PF"])
4831 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4832 mysettings["BUILD_PREFIX"],
4833 mysettings["CATEGORY"], mysettings["PF"])
4835 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
4836 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
4837 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
4838 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
4840 mysettings["PORTAGE_BASHRC"] = os.path.join(
4841 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
4842 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
4843 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
4845 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
4846 if mydo != "depend" and "KV" not in mysettings:
4847 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
4849 # Regular source tree
4850 mysettings["KV"]=mykv
4853 mysettings.backup_changes("KV")
4855 # Allow color.map to control colors associated with einfo, ewarn, etc...
4857 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
4858 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
4859 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
4861 def prepare_build_dirs(myroot, mysettings, cleanup):
4863 clean_dirs = [mysettings["HOME"]]
4865 # We enable cleanup when we want to make sure old cruft (such as the old
4866 # environment) doesn't interfere with the current phase.
4868 clean_dirs.append(mysettings["T"])
4870 for clean_dir in clean_dirs:
4872 shutil.rmtree(clean_dir)
4874 if errno.ENOENT == oe.errno:
4876 elif errno.EPERM == oe.errno:
4877 writemsg("%s\n" % oe, noiselevel=-1)
4878 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
4879 clean_dir, noiselevel=-1)
4884 def makedirs(dir_path):
4886 os.makedirs(dir_path)
4888 if errno.EEXIST == oe.errno:
4890 elif errno.EPERM == oe.errno:
4891 writemsg("%s\n" % oe, noiselevel=-1)
4892 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
4893 dir_path, noiselevel=-1)
4899 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
4901 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
4902 mydirs.append(os.path.dirname(mydirs[-1]))
4905 for mydir in mydirs:
4906 portage.util.ensure_dirs(mydir)
4907 portage.util.apply_secpass_permissions(mydir,
4908 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
4909 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
4910 """These directories don't necessarily need to be group writable.
4911 However, the setup phase is commonly run as a privileged user prior
4912 to the other phases being run by an unprivileged user. Currently,
4913 we use the portage group to ensure that the unprivleged user still
4914 has write access to these directories in any case."""
4915 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
4916 portage.util.apply_secpass_permissions(mysettings[dir_key],
4917 uid=portage_uid, gid=portage_gid)
4918 except portage.exception.PermissionDenied, e:
4919 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
4921 except portage.exception.OperationNotPermitted, e:
4922 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
4924 except portage.exception.FileNotFound, e:
4925 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
4928 _prepare_workdir(mysettings)
4929 _prepare_features_dirs(mysettings)
4931 def _adjust_perms_msg(settings, msg):
4934 writemsg(msg, noiselevel=-1)
4936 background = settings.get("PORTAGE_BACKGROUND") == "1"
4937 log_path = settings.get("PORTAGE_LOG_FILE")
4940 if background and log_path is not None:
4942 log_file = open(log_path, 'a')
4954 if log_file is not None:
4957 def _prepare_features_dirs(mysettings):
4961 "basedir_var":"CCACHE_DIR",
4962 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
4963 "always_recurse":False},
4965 "basedir_var":"DISTCC_DIR",
4966 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
4967 "subdirs":("lock", "state"),
4968 "always_recurse":True}
4973 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4974 from portage.data import secpass
4975 droppriv = secpass >= 2 and \
4976 "userpriv" in mysettings.features and \
4977 "userpriv" not in restrict
4978 for myfeature, kwargs in features_dirs.iteritems():
4979 if myfeature in mysettings.features:
4980 basedir = mysettings[kwargs["basedir_var"]]
4982 basedir = kwargs["default_dir"]
4983 mysettings[kwargs["basedir_var"]] = basedir
4985 mydirs = [mysettings[kwargs["basedir_var"]]]
4986 if "subdirs" in kwargs:
4987 for subdir in kwargs["subdirs"]:
4988 mydirs.append(os.path.join(basedir, subdir))
4989 for mydir in mydirs:
4990 modified = portage.util.ensure_dirs(mydir)
4991 # Generally, we only want to apply permissions for
4992 # initial creation. Otherwise, we don't know exactly what
4993 # permissions the user wants, so should leave them as-is.
4994 droppriv_fix = False
4997 if st.st_gid != portage_gid or \
4998 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
5000 if not droppriv_fix:
5001 # Check permissions of files in the directory.
5002 for filename in os.listdir(mydir):
5004 subdir_st = os.lstat(
5005 os.path.join(mydir, filename))
5008 if subdir_st.st_gid != portage_gid or \
5009 ((stat.S_ISDIR(subdir_st.st_mode) and \
5010 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
5015 _adjust_perms_msg(mysettings,
5016 colorize("WARN", " * ") + \
5017 "Adjusting permissions " + \
5018 "for FEATURES=userpriv: '%s'\n" % mydir)
5020 _adjust_perms_msg(mysettings,
5021 colorize("WARN", " * ") + \
5022 "Adjusting permissions " + \
5023 "for FEATURES=%s: '%s'\n" % (myfeature, mydir))
5025 if modified or kwargs["always_recurse"] or droppriv_fix:
5027 raise # The feature is disabled if a single error
5028 # occurs during permissions adjustment.
5029 if not apply_recursive_permissions(mydir,
5030 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5031 filemode=filemode, filemask=modemask, onerror=onerror):
5032 raise portage.exception.OperationNotPermitted(
5033 "Failed to apply recursive permissions for the portage group.")
5034 except portage.exception.PortageException, e:
5035 mysettings.features.remove(myfeature)
5036 mysettings["FEATURES"] = " ".join(mysettings.features)
5037 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5038 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
5039 (kwargs["basedir_var"], basedir), noiselevel=-1)
5040 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
5044 def _prepare_workdir(mysettings):
5047 mode = mysettings["PORTAGE_WORKDIR_MODE"]
5049 parsed_mode = int(mode, 8)
5054 if parsed_mode & 07777 != parsed_mode:
5055 raise ValueError("Invalid file mode: %s" % mode)
5057 workdir_mode = parsed_mode
5059 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
5060 except ValueError, e:
5062 writemsg("%s\n" % e)
5063 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
5064 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
5065 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
5067 apply_secpass_permissions(mysettings["WORKDIR"],
5068 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
5069 except portage.exception.FileNotFound:
5070 pass # ebuild.sh will create it
5072 if mysettings.get("PORT_LOGDIR", "") == "":
5073 while "PORT_LOGDIR" in mysettings:
5074 del mysettings["PORT_LOGDIR"]
5075 if "PORT_LOGDIR" in mysettings:
5077 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
5079 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
5080 uid=portage_uid, gid=portage_gid, mode=02770)
5081 except portage.exception.PortageException, e:
5082 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5083 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
5084 mysettings["PORT_LOGDIR"], noiselevel=-1)
5085 writemsg("!!! Disabling logging.\n", noiselevel=-1)
5086 while "PORT_LOGDIR" in mysettings:
5087 del mysettings["PORT_LOGDIR"]
5088 if "PORT_LOGDIR" in mysettings and \
5089 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
5090 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
5091 if not os.path.exists(logid_path):
5092 f = open(logid_path, "w")
5095 logid_time = time.strftime("%Y%m%d-%H%M%S",
5096 time.gmtime(os.stat(logid_path).st_mtime))
5097 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5098 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
5099 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
5100 del logid_path, logid_time
5102 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
5103 # enabled since it is possible that local SELinux security policies
5104 # do not allow ouput to be piped out of the sesandbox domain.
5105 if not (mysettings.selinux_enabled() and \
5106 "sesandbox" in mysettings.features):
5107 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5108 mysettings["T"], "build.log")
5110 def _doebuild_exit_status_check(mydo, settings):
5112 Returns an error string if the shell appeared
5113 to exit unsuccessfully, None otherwise.
5115 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
5116 if not exit_status_file or \
5117 os.path.exists(exit_status_file):
5119 msg = ("The ebuild phase '%s' has exited " % mydo) + \
5120 "unexpectedly. This type of behavior " + \
5121 "is known to be triggered " + \
5122 "by things such as failed variable " + \
5123 "assignments (bug #190128) or bad substitution " + \
5124 "errors (bug #200313)."
5127 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
5128 if retval != os.EX_OK:
5130 msg = _doebuild_exit_status_check(mydo, settings)
5133 from textwrap import wrap
5134 from portage.elog.messages import eerror
5135 for l in wrap(msg, 72):
5136 eerror(l, phase=mydo, key=settings.mycpv)
5139 def _doebuild_exit_status_unlink(exit_status_file):
5141 Double check to make sure it really doesn't exist
5142 and raise an OSError if it still does (it shouldn't).
5143 OSError if necessary.
5145 if not exit_status_file:
5148 os.unlink(exit_status_file)
5151 if os.path.exists(exit_status_file):
5152 os.unlink(exit_status_file)
5154 _doebuild_manifest_exempt_depend = 0
5155 _doebuild_manifest_cache = None
5156 _doebuild_broken_ebuilds = set()
5157 _doebuild_broken_manifests = set()
5159 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
5160 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
5161 mydbapi=None, vartree=None, prev_mtimes=None,
5162 fd_pipes=None, returnpid=False):
5165 Wrapper function that invokes specific ebuild phases through the spawning
5168 @param myebuild: name of the ebuild to invoke the phase on (CPV)
5169 @type myebuild: String
5170 @param mydo: Phase to run
5172 @param myroot: $ROOT (usually '/', see man make.conf)
5173 @type myroot: String
5174 @param mysettings: Portage Configuration
5175 @type mysettings: instance of portage.config
5176 @param debug: Turns on various debug information (eg, debug for spawn)
5177 @type debug: Boolean
5178 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
5179 @type listonly: Boolean
5180 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
5181 @type fetchonly: Boolean
5182 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
5183 @type cleanup: Boolean
5184 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
5185 @type dbkey: Dict or String
5186 @param use_cache: Enables the cache
5187 @type use_cache: Boolean
5188 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
5189 @type fetchall: Boolean
5190 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
5192 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
5193 @type mydbapi: portdbapi instance
5194 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
5195 @type vartree: vartree instance
5196 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
5197 @type prev_mtimes: dictionary
5203 Most errors have an accompanying error message.
5205 listonly and fetchonly are only really necessary for operations involving 'fetch'
5206 prev_mtimes are only necessary for merge operations.
5207 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
5212 writemsg("Warning: tree not specified to doebuild\n")
5216 # chunked out deps for each phase, so that ebuild binary can use it
5217 # to collapse targets down.
5220 "unpack": ["setup"],
5221 "prepare": ["unpack"],
5222 "configure": ["prepare"],
5223 "compile":["configure"],
5224 "test": ["compile"],
5227 "package":["install"],
5231 mydbapi = db[myroot][tree].dbapi
5233 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
5234 vartree = db[myroot]["vartree"]
5236 features = mysettings.features
5237 noauto = "noauto" in features
5238 from portage.data import secpass
5240 clean_phases = ("clean", "cleanrm")
5241 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
5242 "config", "info", "setup", "depend",
5243 "fetch", "fetchall", "digest",
5244 "unpack", "prepare", "configure", "compile", "test",
5245 "install", "rpm", "qmerge", "merge",
5246 "package","unmerge", "manifest"]
5248 if mydo not in validcommands:
5249 validcommands.sort()
5250 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
5252 for vcount in range(len(validcommands)):
5254 writemsg("\n!!! ", noiselevel=-1)
5255 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
5256 writemsg("\n", noiselevel=-1)
5259 if mydo == "fetchall":
5263 if mydo not in clean_phases and not os.path.exists(myebuild):
5264 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
5268 global _doebuild_manifest_exempt_depend
5270 if "strict" in features and \
5271 "digest" not in features and \
5272 tree == "porttree" and \
5273 mydo not in ("digest", "manifest", "help") and \
5274 not _doebuild_manifest_exempt_depend:
5275 # Always verify the ebuild checksums before executing it.
5276 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
5277 _doebuild_broken_ebuilds
5279 if myebuild in _doebuild_broken_ebuilds:
5282 pkgdir = os.path.dirname(myebuild)
5283 manifest_path = os.path.join(pkgdir, "Manifest")
5285 # Avoid checking the same Manifest several times in a row during a
5286 # regen with an empty cache.
5287 if _doebuild_manifest_cache is None or \
5288 _doebuild_manifest_cache.getFullname() != manifest_path:
5289 _doebuild_manifest_cache = None
5290 if not os.path.exists(manifest_path):
5291 out = portage.output.EOutput()
5292 out.eerror("Manifest not found for '%s'" % (myebuild,))
5293 _doebuild_broken_ebuilds.add(myebuild)
5295 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5298 mf = _doebuild_manifest_cache
5301 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
5303 out = portage.output.EOutput()
5304 out.eerror("Missing digest for '%s'" % (myebuild,))
5305 _doebuild_broken_ebuilds.add(myebuild)
5307 except portage.exception.FileNotFound:
5308 out = portage.output.EOutput()
5309 out.eerror("A file listed in the Manifest " + \
5310 "could not be found: '%s'" % (myebuild,))
5311 _doebuild_broken_ebuilds.add(myebuild)
5313 except portage.exception.DigestException, e:
5314 out = portage.output.EOutput()
5315 out.eerror("Digest verification failed:")
5316 out.eerror("%s" % e.value[0])
5317 out.eerror("Reason: %s" % e.value[1])
5318 out.eerror("Got: %s" % e.value[2])
5319 out.eerror("Expected: %s" % e.value[3])
5320 _doebuild_broken_ebuilds.add(myebuild)
5323 if mf.getFullname() in _doebuild_broken_manifests:
5326 if mf is not _doebuild_manifest_cache:
5328 # Make sure that all of the ebuilds are
5329 # actually listed in the Manifest.
5330 for f in os.listdir(pkgdir):
5331 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
5332 f = os.path.join(pkgdir, f)
5333 if f not in _doebuild_broken_ebuilds:
5334 out = portage.output.EOutput()
5335 out.eerror("A file is not listed in the " + \
5336 "Manifest: '%s'" % (f,))
5337 _doebuild_broken_manifests.add(manifest_path)
5340 # Only cache it if the above stray files test succeeds.
5341 _doebuild_manifest_cache = mf
5343 def exit_status_check(retval):
5344 if retval != os.EX_OK:
5346 msg = _doebuild_exit_status_check(mydo, mysettings)
5349 from textwrap import wrap
5350 from portage.elog.messages import eerror
5351 for l in wrap(msg, 72):
5352 eerror(l, phase=mydo, key=mysettings.mycpv)
5355 # Note: PORTAGE_BIN_PATH may differ from the global
5356 # constant when portage is reinstalling itself.
5357 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5358 ebuild_sh_binary = os.path.join(portage_bin_path,
5359 os.path.basename(EBUILD_SH_BINARY))
5360 misc_sh_binary = os.path.join(portage_bin_path,
5361 os.path.basename(MISC_SH_BINARY))
5364 builddir_lock = None
5369 if mydo in ("digest", "manifest", "help"):
5370 # Temporarily exempt the depend phase from manifest checks, in case
5371 # aux_get calls trigger cache generation.
5372 _doebuild_manifest_exempt_depend += 1
5374 # If we don't need much space and we don't need a constant location,
5375 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
5376 # so that there's no need for locking and it can be used even if the
5377 # user isn't in the portage group.
5378 if mydo in ("info",):
5379 from tempfile import mkdtemp
5381 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
5382 mysettings["PORTAGE_TMPDIR"] = tmpdir
5384 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
5387 if mydo in clean_phases:
5388 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
5389 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
5390 logfile=None, returnpid=returnpid)
5393 # get possible slot information from the deps file
5394 if mydo == "depend":
5395 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
5396 droppriv = "userpriv" in mysettings.features
5398 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5399 mysettings, fd_pipes=fd_pipes, returnpid=True,
5402 elif isinstance(dbkey, dict):
5403 mysettings["dbkey"] = ""
5406 0:sys.stdin.fileno(),
5407 1:sys.stdout.fileno(),
5408 2:sys.stderr.fileno(),
5410 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5412 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
5413 os.close(pw) # belongs exclusively to the child process now
5417 mybytes.append(os.read(pr, maxbytes))
5421 mybytes = "".join(mybytes)
5423 for k, v in izip(auxdbkeys, mybytes.splitlines()):
5425 retval = os.waitpid(mypids[0], 0)[1]
5426 portage.process.spawned_pids.remove(mypids[0])
5427 # If it got a signal, return the signal that was sent, but
5428 # shift in order to distinguish it from a return value. (just
5429 # like portage.process.spawn() would do).
5431 return (retval & 0xff) << 8
5432 # Otherwise, return its exit code.
5435 mysettings["dbkey"] = dbkey
5437 mysettings["dbkey"] = \
5438 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
5440 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
5444 # Validate dependency metadata here to ensure that ebuilds with invalid
5445 # data are never installed via the ebuild command. Don't bother when
5446 # returnpid == True since there's no need to do this every time emerge
5449 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
5450 if rval != os.EX_OK:
5453 if "PORTAGE_TMPDIR" not in mysettings or \
5454 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
5455 writemsg("The directory specified in your " + \
5456 "PORTAGE_TMPDIR variable, '%s',\n" % \
5457 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
5458 writemsg("does not exist. Please create this directory or " + \
5459 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
5462 # as some people use a separate PORTAGE_TMPDIR mount
5463 # we prefer that as the checks below would otherwise be pointless
5465 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
5466 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
5468 checkdir = mysettings["PORTAGE_TMPDIR"]
5470 if not os.access(checkdir, os.W_OK):
5471 writemsg("%s is not writable.\n" % checkdir + \
5472 "Likely cause is that you've mounted it as readonly.\n" \
5476 from tempfile import NamedTemporaryFile
5477 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
5478 os.chmod(fd.name, 0755)
5479 if not os.access(fd.name, os.X_OK):
5480 writemsg("Can not execute files in %s\n" % checkdir + \
5481 "Likely cause is that you've mounted it with one of the\n" + \
5482 "following mount options: 'noexec', 'user', 'users'\n\n" + \
5483 "Please make sure that portage can execute files in this directory.\n" \
5490 if mydo == "unmerge":
5491 return unmerge(mysettings["CATEGORY"],
5492 mysettings["PF"], myroot, mysettings, vartree=vartree)
5494 # Build directory creation isn't required for any of these.
5495 have_build_dirs = False
5496 if not mydo in ("digest", "help", "manifest"):
5497 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
5500 have_build_dirs = True
5502 # emerge handles logging externally
5504 # PORTAGE_LOG_FILE is set by the
5505 # above prepare_build_dirs() call.
5506 logfile = mysettings.get("PORTAGE_LOG_FILE")
5509 env_file = os.path.join(mysettings["T"], "environment")
5513 env_stat = os.stat(env_file)
5515 if e.errno != errno.ENOENT:
5519 saved_env = os.path.join(
5520 os.path.dirname(myebuild), "environment.bz2")
5521 if not os.path.isfile(saved_env):
5525 "bzip2 -dc %s > %s" % \
5526 (_shell_quote(saved_env),
5527 _shell_quote(env_file)))
5529 env_stat = os.stat(env_file)
5531 if e.errno != errno.ENOENT:
5534 if os.WIFEXITED(retval) and \
5535 os.WEXITSTATUS(retval) == os.EX_OK and \
5536 env_stat and env_stat.st_size > 0:
5537 # This is a signal to ebuild.sh, so that it knows to filter
5538 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
5539 # would be preserved between normal phases.
5540 open(env_file + ".raw", "w")
5542 writemsg(("!!! Error extracting saved " + \
5543 "environment: '%s'\n") % \
5544 saved_env, noiselevel=-1)
5548 if e.errno != errno.ENOENT:
5555 for var in ("ARCH", ):
5556 value = mysettings.get(var)
5557 if value and value.strip():
5559 msg = ("%s is not set... " % var) + \
5560 ("Are you missing the '%setc/make.profile' symlink? " % \
5561 mysettings["PORTAGE_CONFIGROOT"]) + \
5562 "Is the symlink correct? " + \
5563 "Is your portage tree complete?"
5564 from portage.elog.messages import eerror
5565 from textwrap import wrap
5566 for line in wrap(msg, 70):
5567 eerror(line, phase="setup", key=mysettings.mycpv)
5568 from portage.elog import elog_process
5569 elog_process(mysettings.mycpv, mysettings)
5571 del env_file, env_stat, saved_env
5572 _doebuild_exit_status_unlink(
5573 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5575 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
5577 # if any of these are being called, handle them -- running them out of
5578 # the sandbox -- and stop now.
5580 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
5581 mysettings, debug=debug, free=1, logfile=logfile)
5582 elif mydo == "setup":
5584 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
5585 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
5586 returnpid=returnpid)
5589 retval = exit_status_check(retval)
5591 """ Privileged phases may have left files that need to be made
5592 writable to a less privileged user."""
5593 apply_recursive_permissions(mysettings["T"],
5594 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
5595 filemode=060, filemask=0)
5597 elif mydo == "preinst":
5598 phase_retval = spawn(
5599 _shell_quote(ebuild_sh_binary) + " " + mydo,
5600 mysettings, debug=debug, free=1, logfile=logfile,
5601 fd_pipes=fd_pipes, returnpid=returnpid)
5606 phase_retval = exit_status_check(phase_retval)
5607 if phase_retval == os.EX_OK:
5608 _doebuild_exit_status_unlink(
5609 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5610 mysettings.pop("EBUILD_PHASE", None)
5611 phase_retval = spawn(
5612 " ".join(_post_pkg_preinst_cmd(mysettings)),
5613 mysettings, debug=debug, free=1, logfile=logfile)
5614 phase_retval = exit_status_check(phase_retval)
5615 if phase_retval != os.EX_OK:
5616 writemsg("!!! post preinst failed; exiting.\n",
5619 elif mydo == "postinst":
5620 phase_retval = spawn(
5621 _shell_quote(ebuild_sh_binary) + " " + mydo,
5622 mysettings, debug=debug, free=1, logfile=logfile,
5623 fd_pipes=fd_pipes, returnpid=returnpid)
5628 phase_retval = exit_status_check(phase_retval)
5629 if phase_retval == os.EX_OK:
5630 _doebuild_exit_status_unlink(
5631 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5632 mysettings.pop("EBUILD_PHASE", None)
5633 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
5634 mysettings, debug=debug, free=1, logfile=logfile)
5635 phase_retval = exit_status_check(phase_retval)
5636 if phase_retval != os.EX_OK:
5637 writemsg("!!! post postinst failed; exiting.\n",
5640 elif mydo in ("prerm", "postrm", "config", "info"):
5642 _shell_quote(ebuild_sh_binary) + " " + mydo,
5643 mysettings, debug=debug, free=1, logfile=logfile,
5644 fd_pipes=fd_pipes, returnpid=returnpid)
5649 retval = exit_status_check(retval)
5652 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
5654 emerge_skip_distfiles = returnpid
5655 # Only try and fetch the files if we are going to need them ...
5656 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
5657 # unpack compile install`, we will try and fetch 4 times :/
5658 need_distfiles = not emerge_skip_distfiles and \
5659 (mydo in ("fetch", "unpack") or \
5660 mydo not in ("digest", "manifest") and "noauto" not in features)
5661 alist = mysettings.configdict["pkg"].get("A")
5662 aalist = mysettings.configdict["pkg"].get("AA")
5663 if need_distfiles or alist is None or aalist is None:
5664 # Make sure we get the correct tree in case there are overlays.
5665 mytree = os.path.realpath(
5666 os.path.dirname(os.path.dirname(mysettings["O"])))
5667 useflags = mysettings["PORTAGE_USE"].split()
5669 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
5671 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
5672 except portage.exception.InvalidDependString, e:
5673 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5674 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
5678 mysettings.configdict["pkg"]["A"] = " ".join(alist)
5679 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
5681 alist = set(alist.split())
5682 aalist = set(aalist.split())
5683 if ("mirror" in features) or fetchall:
5691 # Files are already checked inside fetch(),
5692 # so do not check them again.
5696 if not emerge_skip_distfiles and \
5697 need_distfiles and not fetch(
5698 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
5701 if mydo == "fetch" and listonly:
5705 if mydo == "manifest":
5706 return not digestgen(aalist, mysettings, overwrite=1,
5707 manifestonly=1, myportdb=mydbapi)
5708 elif mydo == "digest":
5709 return not digestgen(aalist, mysettings, overwrite=1,
5711 elif "digest" in mysettings.features:
5712 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
5713 except portage.exception.PermissionDenied, e:
5714 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
5715 if mydo in ("digest", "manifest"):
5718 # See above comment about fetching only when needed
5719 if not emerge_skip_distfiles and \
5720 not digestcheck(checkme, mysettings, "strict" in features):
5726 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
5727 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
5728 orig_distdir = mysettings["DISTDIR"]
5729 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
5730 edpath = mysettings["DISTDIR"] = \
5731 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
5732 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0755)
5734 # Remove any unexpected files or directories.
5735 for x in os.listdir(edpath):
5736 symlink_path = os.path.join(edpath, x)
5737 st = os.lstat(symlink_path)
5738 if x in alist and stat.S_ISLNK(st.st_mode):
5740 if stat.S_ISDIR(st.st_mode):
5741 shutil.rmtree(symlink_path)
5743 os.unlink(symlink_path)
5745 # Check for existing symlinks and recreate if necessary.
5747 symlink_path = os.path.join(edpath, x)
5748 target = os.path.join(orig_distdir, x)
5750 link_target = os.readlink(symlink_path)
5752 os.symlink(target, symlink_path)
5754 if link_target != target:
5755 os.unlink(symlink_path)
5756 os.symlink(target, symlink_path)
5758 #initial dep checks complete; time to process main commands
5760 restrict = mysettings["PORTAGE_RESTRICT"].split()
5761 nosandbox = (("userpriv" in features) and \
5762 ("usersandbox" not in features) and \
5763 "userpriv" not in restrict and \
5764 "nouserpriv" not in restrict)
5765 if nosandbox and ("userpriv" not in features or \
5766 "userpriv" in restrict or \
5767 "nouserpriv" in restrict):
5768 nosandbox = ("sandbox" not in features and \
5769 "usersandbox" not in features)
5771 sesandbox = mysettings.selinux_enabled() and \
5772 "sesandbox" in mysettings.features
5774 droppriv = "userpriv" in mysettings.features and \
5775 "userpriv" not in restrict and \
5778 fakeroot = "fakeroot" in mysettings.features
5780 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
5781 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
5783 # args are for the to spawn function
5785 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
5786 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5787 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5788 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5789 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5790 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5791 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
5792 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5793 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5796 # merge the deps in so we have again a 'full' actionmap
5797 # be glad when this can die.
5799 if len(actionmap_deps.get(x, [])):
5800 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
5802 if mydo in actionmap:
5803 if mydo == "package":
5804 # Make sure the package directory exists before executing
5805 # this phase. This can raise PermissionDenied if
5806 # the current user doesn't have write access to $PKGDIR.
5807 parent_dir = os.path.join(mysettings["PKGDIR"],
5808 mysettings["CATEGORY"])
5809 portage.util.ensure_dirs(parent_dir)
5810 if not os.access(parent_dir, os.W_OK):
5811 raise portage.exception.PermissionDenied(
5812 "access('%s', os.W_OK)" % parent_dir)
5813 retval = spawnebuild(mydo,
5814 actionmap, mysettings, debug, logfile=logfile,
5815 fd_pipes=fd_pipes, returnpid=returnpid)
5816 elif mydo=="qmerge":
5817 # check to ensure install was run. this *only* pops up when users
5818 # forget it and are using ebuild
5819 if not os.path.exists(
5820 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
5821 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
5824 # qmerge is a special phase that implies noclean.
5825 if "noclean" not in mysettings.features:
5826 mysettings.features.append("noclean")
5827 #qmerge is specifically not supposed to do a runtime dep check
5829 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
5830 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
5831 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
5832 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
5834 retval = spawnebuild("install", actionmap, mysettings, debug,
5835 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
5836 returnpid=returnpid)
5837 retval = exit_status_check(retval)
5838 if retval != os.EX_OK:
5839 # The merge phase handles this already. Callers don't know how
5840 # far this function got, so we have to call elog_process() here
5841 # so that it's only called once.
5842 from portage.elog import elog_process
5843 elog_process(mysettings.mycpv, mysettings)
5844 if retval == os.EX_OK:
5845 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
5846 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
5847 "build-info"), myroot, mysettings,
5848 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
5849 vartree=vartree, prev_mtimes=prev_mtimes)
5851 print "!!! Unknown mydo:",mydo
5859 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
5860 shutil.rmtree(tmpdir)
5862 portage.locks.unlockdir(builddir_lock)
5864 # Make sure that DISTDIR is restored to it's normal value before we return!
5865 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
5866 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
5867 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
5871 if os.stat(logfile).st_size == 0:
5876 if mydo in ("digest", "manifest", "help"):
5877 # If necessary, depend phase has been triggered by aux_get calls
5878 # and the exemption is no longer needed.
5879 _doebuild_manifest_exempt_depend -= 1
5881 def _validate_deps(mysettings, myroot, mydo, mydbapi):
5883 invalid_dep_exempt_phases = \
5884 set(["clean", "cleanrm", "help", "prerm", "postrm"])
5885 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
5886 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
5887 other_keys = ["SLOT"]
5888 all_keys = dep_keys + misc_keys + other_keys
5889 metadata = dict(izip(all_keys,
5890 mydbapi.aux_get(mysettings.mycpv, all_keys)))
5892 class FakeTree(object):
5893 def __init__(self, mydb):
5895 dep_check_trees = {myroot:{}}
5896 dep_check_trees[myroot]["porttree"] = \
5897 FakeTree(fakedbapi(settings=mysettings))
5900 for dep_type in dep_keys:
5901 mycheck = dep_check(metadata[dep_type], None, mysettings,
5902 myuse="all", myroot=myroot, trees=dep_check_trees)
5904 msgs.append(" %s: %s\n %s\n" % (
5905 dep_type, metadata[dep_type], mycheck[1]))
5909 portage.dep.use_reduce(
5910 portage.dep.paren_reduce(metadata[k]), matchall=True)
5911 except portage.exception.InvalidDependString, e:
5912 msgs.append(" %s: %s\n %s\n" % (
5913 k, metadata[k], str(e)))
5915 if not metadata["SLOT"]:
5916 msgs.append(" SLOT is undefined\n")
5919 portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \
5920 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
5922 portage.util.writemsg_level(x,
5923 level=logging.ERROR, noiselevel=-1)
5924 if mydo not in invalid_dep_exempt_phases:
5931 def _movefile(src, dest, **kwargs):
5932 """Calls movefile and raises a PortageException if an error occurs."""
5933 if movefile(src, dest, **kwargs) is None:
5934 raise portage.exception.PortageException(
5935 "mv '%s' '%s'" % (src, dest))
5937 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
5938 """moves a file from src to dest, preserving all permissions and attributes; mtime will
5939 be preserved even when moving across filesystems. Returns true on success and false on
5940 failure. Move is atomic."""
5941 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
5943 if mysettings is None:
5945 mysettings = settings
5946 selinux_enabled = mysettings.selinux_enabled()
5951 except SystemExit, e:
5953 except Exception, e:
5954 print "!!! Stating source file failed... movefile()"
5960 dstat=os.lstat(dest)
5961 except (OSError, IOError):
5962 dstat=os.lstat(os.path.dirname(dest))
5966 if destexists and dstat.st_flags != 0:
5967 bsd_chflags.lchflags(dest, 0)
5968 # Use normal stat/chflags for the parent since we want to
5969 # follow any symlinks to the real parent directory.
5970 pflags = os.stat(os.path.dirname(dest)).st_flags
5972 bsd_chflags.chflags(os.path.dirname(dest), 0)
5975 if stat.S_ISLNK(dstat[stat.ST_MODE]):
5979 except SystemExit, e:
5981 except Exception, e:
5984 if stat.S_ISLNK(sstat[stat.ST_MODE]):
5986 target=os.readlink(src)
5987 if mysettings and mysettings["D"]:
5988 if target.find(mysettings["D"])==0:
5989 target=target[len(mysettings["D"]):]
5990 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
5993 sid = selinux.get_lsid(src)
5994 selinux.secure_symlink(target,dest,sid)
5996 os.symlink(target,dest)
5997 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
5998 # utime() only works on the target of a symlink, so it's not
5999 # possible to perserve mtime on symlinks.
6000 return os.lstat(dest)[stat.ST_MTIME]
6001 except SystemExit, e:
6003 except Exception, e:
6004 print "!!! failed to properly create symlink:"
6005 print "!!!",dest,"->",target
6010 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
6013 ret=selinux.secure_rename(src,dest)
6015 ret=os.rename(src,dest)
6017 except SystemExit, e:
6019 except Exception, e:
6020 if e[0]!=errno.EXDEV:
6021 # Some random error.
6022 print "!!! Failed to move",src,"to",dest
6025 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
6028 if stat.S_ISREG(sstat[stat.ST_MODE]):
6029 try: # For safety copy then move it over.
6031 selinux.secure_copy(src,dest+"#new")
6032 selinux.secure_rename(dest+"#new",dest)
6034 shutil.copyfile(src,dest+"#new")
6035 os.rename(dest+"#new",dest)
6037 except SystemExit, e:
6039 except Exception, e:
6040 print '!!! copy',src,'->',dest,'failed.'
6044 #we don't yet handle special, so we need to fall back to /bin/mv
6046 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
6048 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
6050 print "!!! Failed to move special file:"
6051 print "!!! '"+src+"' to '"+dest+"'"
6053 return None # failure
6056 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6057 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6059 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6060 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
6062 except SystemExit, e:
6064 except Exception, e:
6065 print "!!! Failed to chown/chmod/unlink in movefile()"
6071 if newmtime is not None:
6072 os.utime(dest, (newmtime, newmtime))
6074 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
6075 newmtime = long(sstat.st_mtime)
6077 # The utime can fail here with EPERM even though the move succeeded.
6078 # Instead of failing, use stat to return the mtime if possible.
6080 newmtime = long(os.stat(dest).st_mtime)
6082 writemsg("!!! Failed to stat in movefile()\n", noiselevel=-1)
6083 writemsg("!!! %s\n" % dest, noiselevel=-1)
6084 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6088 # Restore the flags we saved before moving
6090 bsd_chflags.chflags(os.path.dirname(dest), pflags)
6094 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
6095 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
6097 if not os.access(myroot, os.W_OK):
6098 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
6101 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
6102 vartree=vartree, blockers=blockers, scheduler=scheduler)
6103 return mylink.merge(pkgloc, infloc, myroot, myebuild,
6104 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6106 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
6107 ldpath_mtimes=None, scheduler=None):
6108 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
6109 vartree=vartree, scheduler=scheduler)
6110 vartree = mylink.vartree
6114 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
6115 ldpath_mtimes=ldpath_mtimes)
6116 if retval == os.EX_OK:
6123 def getCPFromCPV(mycpv):
6124 """Calls pkgsplit on a cpv and returns only the cp."""
6125 return pkgsplit(mycpv)[0]
6127 def dep_virtual(mysplit, mysettings):
6128 "Does virtual dependency conversion"
6130 myvirtuals = mysettings.getvirtuals()
6132 if isinstance(x, list):
6133 newsplit.append(dep_virtual(x, mysettings))
6136 mychoices = myvirtuals.get(mykey, None)
6138 if len(mychoices) == 1:
6139 a = x.replace(mykey, mychoices[0])
6142 # blocker needs "and" not "or(||)".
6147 a.append(x.replace(mykey, y))
6153 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
6154 trees=None, use_mask=None, use_force=None, **kwargs):
6155 """Recursively expand new-style virtuals so as to collapse one or more
6156 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
6157 zero cost regardless of whether or not they are currently installed. Virtual
6158 blockers are supported but only when the virtual expands to a single
6159 atom because it wouldn't necessarily make sense to block all the components
6160 of a compound virtual. When more than one new-style virtual is matched,
6161 the matches are sorted from highest to lowest versions and the atom is
6162 expanded to || ( highest match ... lowest match )."""
6164 # According to GLEP 37, RDEPEND is the only dependency type that is valid
6165 # for new-style virtuals. Repoman should enforce this.
6166 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
6167 portdb = trees[myroot]["porttree"].dbapi
6168 repoman = isinstance(mydbapi, portdbapi)
6169 if kwargs["use_binaries"]:
6170 portdb = trees[myroot]["bintree"].dbapi
6171 myvirtuals = mysettings.getvirtuals()
6172 myuse = kwargs["myuse"]
6177 elif isinstance(x, list):
6178 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
6179 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
6180 use_force=use_force, **kwargs))
6183 if not isinstance(x, portage.dep.Atom):
6185 x = portage.dep.Atom(x)
6186 except portage.exception.InvalidAtom:
6187 if portage.dep._dep_check_strict:
6188 raise portage.exception.ParseError(
6189 "invalid atom: '%s'" % x)
6191 if repoman and x.use and x.use.conditional:
6192 evaluated_atom = portage.dep.remove_slot(x)
6194 evaluated_atom += ":%s" % x.slot
6195 evaluated_atom += str(x.use._eval_qa_conditionals(
6196 use_mask, use_force))
6197 x = portage.dep.Atom(evaluated_atom)
6199 if not repoman and \
6200 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
6201 if x.use.conditional:
6202 evaluated_atom = portage.dep.remove_slot(x)
6204 evaluated_atom += ":%s" % x.slot
6205 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
6206 x = portage.dep.Atom(evaluated_atom)
6208 mykey = dep_getkey(x)
6209 if not mykey.startswith("virtual/"):
6212 mychoices = myvirtuals.get(mykey, [])
6213 isblocker = x.startswith("!")
6215 # Virtual blockers are no longer expanded here since
6216 # the un-expanded virtual atom is more useful for
6217 # maintaining a cache of blocker atoms.
6224 matches = portdb.match(match_atom)
6225 # Use descending order to prefer higher versions.
6228 # only use new-style matches
6229 if cpv.startswith("virtual/"):
6230 pkgs.append((cpv, catpkgsplit(cpv)[1:], portdb))
6231 if not (pkgs or mychoices):
6232 # This one couldn't be expanded as a new-style virtual. Old-style
6233 # virtuals have already been expanded by dep_virtual, so this one
6234 # is unavailable and dep_zapdeps will identify it as such. The
6235 # atom is not eliminated here since it may still represent a
6236 # dependency that needs to be satisfied.
6239 if not pkgs and len(mychoices) == 1:
6240 newsplit.append(x.replace(mykey, mychoices[0]))
6247 cpv, pv_split, db = y
6248 depstring = " ".join(db.aux_get(cpv, dep_keys))
6249 pkg_kwargs = kwargs.copy()
6250 if isinstance(db, portdbapi):
6255 use_split = db.aux_get(cpv, ["USE"])[0].split()
6256 pkg_kwargs["myuse"] = use_split
6258 print "Virtual Parent: ", y[0]
6259 print "Virtual Depstring:", depstring
6260 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
6261 trees=trees, **pkg_kwargs)
6263 raise portage.exception.ParseError(
6264 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
6266 virtual_atoms = [atom for atom in mycheck[1] \
6267 if not atom.startswith("!")]
6268 if len(virtual_atoms) == 1:
6269 # It wouldn't make sense to block all the components of a
6270 # compound virtual, so only a single atom block is allowed.
6271 a.append("!" + virtual_atoms[0])
6273 mycheck[1].append("="+y[0]) # pull in the new-style virtual
6274 a.append(mycheck[1])
6275 # Plain old-style virtuals. New-style virtuals are preferred.
6277 a.append(x.replace(mykey, y))
6278 if isblocker and not a:
6279 # Probably a compound virtual. Pass the atom through unprocessed.
6285 def dep_eval(deplist):
6288 if deplist[0]=="||":
6289 #or list; we just need one "1"
6290 for x in deplist[1:]:
6291 if isinstance(x, list):
6296 #XXX: unless there's no available atoms in the list
6297 #in which case we need to assume that everything is
6298 #okay as some ebuilds are relying on an old bug.
6299 if len(deplist) == 1:
6304 if isinstance(x, list):
6311 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
6312 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
6313 Returned deplist contains steps that must be taken to satisfy dependencies."""
6317 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
6318 if not reduced or unreduced == ["||"] or dep_eval(reduced):
6321 if unreduced[0] != "||":
6323 for dep, satisfied in izip(unreduced, reduced):
6324 if isinstance(dep, list):
6325 unresolved += dep_zapdeps(dep, satisfied, myroot,
6326 use_binaries=use_binaries, trees=trees)
6328 unresolved.append(dep)
6331 # We're at a ( || atom ... ) type level and need to make a choice
6332 deps = unreduced[1:]
6333 satisfieds = reduced[1:]
6335 # Our preference order is for an the first item that:
6336 # a) contains all unmasked packages with the same key as installed packages
6337 # b) contains all unmasked packages
6338 # c) contains masked installed packages
6339 # d) is the first item
6342 preferred_not_installed = []
6343 preferred_any_slot = []
6344 possible_upgrades = []
6347 # Alias the trees we'll be checking availability against
6348 parent = trees[myroot].get("parent")
6349 graph_db = trees[myroot].get("graph_db")
6351 if "vartree" in trees[myroot]:
6352 vardb = trees[myroot]["vartree"].dbapi
6354 mydbapi = trees[myroot]["bintree"].dbapi
6356 mydbapi = trees[myroot]["porttree"].dbapi
6358 # Sort the deps into preferred (installed) and other
6359 # with values of [[required_atom], availablility]
6360 for dep, satisfied in izip(deps, satisfieds):
6361 if isinstance(dep, list):
6362 atoms = dep_zapdeps(dep, satisfied, myroot,
6363 use_binaries=use_binaries, trees=trees)
6369 other.append((atoms, None, False))
6372 all_available = True
6375 avail_pkg = mydbapi.match(atom)
6377 avail_pkg = avail_pkg[-1] # highest (ascending order)
6378 avail_slot = "%s:%s" % (dep_getkey(atom),
6379 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
6381 all_available = False
6384 versions[avail_slot] = avail_pkg
6386 this_choice = (atoms, versions, all_available)
6388 # The "all installed" criterion is not version or slot specific.
6389 # If any version of a package is installed then we assume that it
6390 # is preferred over other possible packages choices.
6391 all_installed = True
6392 for atom in set([dep_getkey(atom) for atom in atoms]):
6393 # New-style virtuals have zero cost to install.
6394 if not vardb.match(atom) and not atom.startswith("virtual/"):
6395 all_installed = False
6397 all_installed_slots = False
6399 all_installed_slots = True
6400 for slot_atom in versions:
6401 # New-style virtuals have zero cost to install.
6402 if not vardb.match(slot_atom) and \
6403 not slot_atom.startswith("virtual/"):
6404 all_installed_slots = False
6407 if all_installed_slots:
6408 preferred.append(this_choice)
6410 preferred_any_slot.append(this_choice)
6411 elif graph_db is None:
6412 possible_upgrades.append(this_choice)
6415 for slot_atom in versions:
6416 # New-style virtuals have zero cost to install.
6417 if not graph_db.match(slot_atom) and \
6418 not slot_atom.startswith("virtual/"):
6419 all_in_graph = False
6423 preferred_not_installed.append(this_choice)
6425 # Check if the atom would result in a direct circular
6426 # dependency and try to avoid that if it seems likely
6427 # to be unresolvable.
6428 cpv_slot_list = [parent]
6429 circular_atom = None
6433 if vardb.match(atom):
6434 # If the atom is satisfied by an installed
6435 # version then it's not a circular dep.
6437 if dep_getkey(atom) != parent.cp:
6439 if match_from_list(atom, cpv_slot_list):
6440 circular_atom = atom
6442 if circular_atom is None:
6443 preferred_not_installed.append(this_choice)
6445 other.append(this_choice)
6447 possible_upgrades.append(this_choice)
6449 other.append(this_choice)
6451 # Compare the "all_installed" choices against the "all_available" choices
6452 # for possible missed upgrades. The main purpose of this code is to find
6453 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
6454 # into || ( highest version ... lowest version ). We want to prefer the
6455 # highest all_available version of the new-style virtual when there is a
6456 # lower all_installed version.
6457 preferred.extend(preferred_not_installed)
6458 preferred.extend(preferred_any_slot)
6459 preferred.extend(possible_upgrades)
6460 possible_upgrades = preferred[1:]
6461 for possible_upgrade in possible_upgrades:
6462 atoms, versions, all_available = possible_upgrade
6463 myslots = set(versions)
6464 for other_choice in preferred:
6465 if possible_upgrade is other_choice:
6466 # possible_upgrade will not be promoted, so move on
6468 o_atoms, o_versions, o_all_available = other_choice
6469 intersecting_slots = myslots.intersection(o_versions)
6470 if not intersecting_slots:
6473 has_downgrade = False
6474 for myslot in intersecting_slots:
6475 myversion = versions[myslot]
6476 o_version = o_versions[myslot]
6477 difference = pkgcmp(catpkgsplit(myversion)[1:],
6478 catpkgsplit(o_version)[1:])
6483 has_downgrade = True
6485 if has_upgrade and not has_downgrade:
6486 preferred.remove(possible_upgrade)
6487 o_index = preferred.index(other_choice)
6488 preferred.insert(o_index, possible_upgrade)
6491 # preferred now contains a) and c) from the order above with
6492 # the masked flag differentiating the two. other contains b)
6493 # and d) so adding other to preferred will give us a suitable
6494 # list to iterate over.
6495 preferred.extend(other)
6497 for allow_masked in (False, True):
6498 for atoms, versions, all_available in preferred:
6499 if all_available or allow_masked:
6502 assert(False) # This point should not be reachable
6505 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
6511 mydep = dep_getcpv(orig_dep)
6512 myindex = orig_dep.index(mydep)
6513 prefix = orig_dep[:myindex]
6514 postfix = orig_dep[myindex+len(mydep):]
6515 expanded = cpv_expand(mydep, mydb=mydb,
6516 use_cache=use_cache, settings=settings)
6518 return portage.dep.Atom(prefix + expanded + postfix)
6519 except portage.exception.InvalidAtom:
6520 # Missing '=' prefix is allowed for backward compatibility.
6521 if not isvalidatom("=" + prefix + expanded + postfix):
6523 return portage.dep.Atom("=" + prefix + expanded + postfix)
6525 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
6526 use_cache=1, use_binaries=0, myroot="/", trees=None):
6527 """Takes a depend string and parses the condition."""
6528 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
6529 #check_config_instance(mysettings)
6531 trees = globals()["db"]
6535 myusesplit = mysettings["PORTAGE_USE"].split()
6538 # We've been given useflags to use.
6539 #print "USE FLAGS PASSED IN."
6541 #if "bindist" in myusesplit:
6542 # print "BINDIST is set!"
6544 # print "BINDIST NOT set."
6546 #we are being run by autouse(), don't consult USE vars yet.
6547 # WE ALSO CANNOT USE SETTINGS
6550 #convert parenthesis to sublists
6552 mysplit = portage.dep.paren_reduce(depstring)
6553 except portage.exception.InvalidDependString, e:
6558 useforce.add(mysettings["ARCH"])
6560 # This masking/forcing is only for repoman. In other cases, relevant
6561 # masking/forcing should have already been applied via
6562 # config.regenerate(). Also, binary or installed packages may have
6563 # been built with flags that are now masked, and it would be
6564 # inconsistent to mask them now. Additionally, myuse may consist of
6565 # flags from a parent package that is being merged to a $ROOT that is
6566 # different from the one that mysettings represents.
6567 mymasks.update(mysettings.usemask)
6568 mymasks.update(mysettings.archlist())
6569 mymasks.discard(mysettings["ARCH"])
6570 useforce.update(mysettings.useforce)
6571 useforce.difference_update(mymasks)
6573 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
6574 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
6575 except portage.exception.InvalidDependString, e:
6578 # Do the || conversions
6579 mysplit=portage.dep.dep_opconvert(mysplit)
6582 #dependencies were reduced to nothing
6585 # Recursively expand new-style virtuals so as to
6586 # collapse one or more levels of indirection.
6588 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
6589 use=use, mode=mode, myuse=myuse,
6590 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
6591 use_binaries=use_binaries, myroot=myroot, trees=trees)
6592 except portage.exception.ParseError, e:
6596 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
6597 if mysplit2 is None:
6598 return [0,"Invalid token"]
6600 writemsg("\n\n\n", 1)
6601 writemsg("mysplit: %s\n" % (mysplit), 1)
6602 writemsg("mysplit2: %s\n" % (mysplit2), 1)
6604 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
6605 use_binaries=use_binaries, trees=trees)
6606 mylist = flatten(myzaps)
6607 writemsg("myzaps: %s\n" % (myzaps), 1)
6608 writemsg("mylist: %s\n" % (mylist), 1)
6613 writemsg("mydict: %s\n" % (mydict), 1)
6614 return [1,mydict.keys()]
6616 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
6617 "Reduces the deplist to ones and zeros"
6618 deplist=mydeplist[:]
6619 for mypos, token in enumerate(deplist):
6620 if isinstance(deplist[mypos], list):
6622 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
6623 elif deplist[mypos]=="||":
6625 elif token[:1] == "!":
6626 deplist[mypos] = False
6628 mykey = dep_getkey(deplist[mypos])
6629 if mysettings and mykey in mysettings.pprovideddict and \
6630 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
6632 elif mydbapi is None:
6633 # Assume nothing is satisfied. This forces dep_zapdeps to
6634 # return all of deps the deps that have been selected
6635 # (excluding those satisfied by package.provided).
6636 deplist[mypos] = False
6639 x = mydbapi.xmatch(mode, deplist[mypos])
6640 if mode.startswith("minimum-"):
6647 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
6650 if deplist[mypos][0]=="!":
6654 #encountered invalid string
6658 def cpv_getkey(mycpv):
6659 myslash=mycpv.split("/")
6660 mysplit=pkgsplit(myslash[-1])
6663 return myslash[0]+"/"+mysplit[0]
6669 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
6670 mysplit=mykey.split("/")
6671 if settings is None:
6672 settings = globals()["settings"]
6673 virts = settings.getvirtuals("/")
6674 virts_p = settings.get_virts_p("/")
6676 if hasattr(mydb, "cp_list"):
6677 for x in mydb.categories:
6678 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
6680 if mykey in virts_p:
6681 return(virts_p[mykey][0])
6682 return "null/"+mykey
6684 if hasattr(mydb, "cp_list"):
6685 if not mydb.cp_list(mykey, use_cache=use_cache) and \
6686 virts and mykey in virts:
6687 return virts[mykey][0]
6690 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
6691 """Given a string (packagename or virtual) expand it into a valid
6692 cat/package string. Virtuals use the mydb to determine which provided
6693 virtual is a valid choice and defaults to the first element when there
6694 are no installed/available candidates."""
6695 myslash=mycpv.split("/")
6696 mysplit=pkgsplit(myslash[-1])
6697 if settings is None:
6698 settings = globals()["settings"]
6699 virts = settings.getvirtuals("/")
6700 virts_p = settings.get_virts_p("/")
6702 # this is illegal case.
6705 elif len(myslash)==2:
6707 mykey=myslash[0]+"/"+mysplit[0]
6710 if mydb and virts and mykey in virts:
6711 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
6712 if hasattr(mydb, "cp_list"):
6713 if not mydb.cp_list(mykey, use_cache=use_cache):
6714 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
6715 mykey_orig = mykey[:]
6716 for vkey in virts[mykey]:
6717 # The virtuals file can contain a versioned atom, so
6718 # it may be necessary to remove the operator and
6719 # version from the atom before it is passed into
6721 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
6723 writemsg("virts chosen: %s\n" % (mykey), 1)
6725 if mykey == mykey_orig:
6726 mykey=virts[mykey][0]
6727 writemsg("virts defaulted: %s\n" % (mykey), 1)
6728 #we only perform virtual expansion if we are passed a dbapi
6730 #specific cpv, no category, ie. "foo-1.0"
6738 if mydb and hasattr(mydb, "categories"):
6739 for x in mydb.categories:
6740 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
6741 matches.append(x+"/"+myp)
6742 if len(matches) > 1:
6743 virtual_name_collision = False
6744 if len(matches) == 2:
6746 if not x.startswith("virtual/"):
6747 # Assume that the non-virtual is desired. This helps
6748 # avoid the ValueError for invalid deps that come from
6749 # installed packages (during reverse blocker detection,
6753 virtual_name_collision = True
6754 if not virtual_name_collision:
6755 # AmbiguousPackageName inherits from ValueError,
6756 # for backward compatibility with calling code
6757 # that already handles ValueError.
6758 raise portage.exception.AmbiguousPackageName(matches)
6762 if not mykey and not isinstance(mydb, list):
6764 mykey=virts_p[myp][0]
6765 #again, we only perform virtual expansion if we have a dbapi (not a list)
6769 if mysplit[2]=="r0":
6770 return mykey+"-"+mysplit[1]
6772 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
6776 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
6777 from portage.util import grablines
6778 if settings is None:
6779 settings = globals()["settings"]
6781 portdb = globals()["portdb"]
6782 mysplit = catpkgsplit(mycpv)
6784 raise ValueError("invalid CPV: %s" % mycpv)
6785 if metadata is None:
6786 db_keys = list(portdb._aux_cache_keys)
6788 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6790 if not portdb.cpv_exists(mycpv):
6792 if metadata is None:
6793 # Can't access SLOT due to corruption.
6794 cpv_slot_list = [mycpv]
6796 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6797 mycp=mysplit[0]+"/"+mysplit[1]
6799 # XXX- This is a temporary duplicate of code from the config constructor.
6800 locations = [os.path.join(settings["PORTDIR"], "profiles")]
6801 locations.extend(settings.profiles)
6802 for ov in settings["PORTDIR_OVERLAY"].split():
6803 profdir = os.path.join(normalize_path(ov), "profiles")
6804 if os.path.isdir(profdir):
6805 locations.append(profdir)
6806 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
6807 USER_CONFIG_PATH.lstrip(os.path.sep)))
6809 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
6811 if mycp in settings.pmaskdict:
6812 for x in settings.pmaskdict[mycp]:
6813 if match_from_list(x, cpv_slot_list):
6817 for pmask in pmasklists:
6818 pmask_filename = os.path.join(pmask[0], "package.mask")
6819 for i in xrange(len(pmask[1])):
6820 l = pmask[1][i].strip()
6826 comment_valid = i + 1
6828 if comment_valid != i:
6831 return (comment, pmask_filename)
6834 elif comment_valid != -1:
6835 # Apparently this comment applies to muliple masks, so
6836 # it remains valid until a blank line is encountered.
6843 def getmaskingstatus(mycpv, settings=None, portdb=None):
6844 if settings is None:
6845 settings = config(clone=globals()["settings"])
6847 portdb = globals()["portdb"]
6851 if not isinstance(mycpv, basestring):
6852 # emerge passed in a Package instance
6855 metadata = pkg.metadata
6856 installed = pkg.installed
6858 mysplit = catpkgsplit(mycpv)
6860 raise ValueError("invalid CPV: %s" % mycpv)
6861 if metadata is None:
6862 db_keys = list(portdb._aux_cache_keys)
6864 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6866 if not portdb.cpv_exists(mycpv):
6868 return ["corruption"]
6869 if "?" in metadata["LICENSE"]:
6870 settings.setcpv(mycpv, mydb=metadata)
6871 metadata["USE"] = settings["PORTAGE_USE"]
6873 metadata["USE"] = ""
6874 mycp=mysplit[0]+"/"+mysplit[1]
6879 if settings._getProfileMaskAtom(mycpv, metadata):
6880 rValue.append("profile")
6882 # package.mask checking
6883 if settings._getMaskAtom(mycpv, metadata):
6884 rValue.append("package.mask")
6887 eapi = metadata["EAPI"]
6888 mygroups = metadata["KEYWORDS"]
6889 licenses = metadata["LICENSE"]
6890 slot = metadata["SLOT"]
6891 if eapi.startswith("-"):
6893 if not eapi_is_supported(eapi):
6894 return ["EAPI %s" % eapi]
6895 elif _eapi_is_deprecated(eapi) and not installed:
6896 return ["EAPI %s" % eapi]
6897 egroups = settings.configdict["backupenv"].get(
6898 "ACCEPT_KEYWORDS", "").split()
6899 mygroups = mygroups.split()
6900 pgroups = settings["ACCEPT_KEYWORDS"].split()
6901 myarch = settings["ARCH"]
6902 if pgroups and myarch not in pgroups:
6903 """For operating systems other than Linux, ARCH is not necessarily a
6905 myarch = pgroups[0].lstrip("~")
6907 cp = dep_getkey(mycpv)
6908 pkgdict = settings.pkeywordsdict.get(cp)
6911 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6912 for atom, pkgkeywords in pkgdict.iteritems():
6913 if match_from_list(atom, cpv_slot_list):
6915 pgroups.extend(pkgkeywords)
6916 if matches or egroups:
6917 pgroups.extend(egroups)
6920 if x.startswith("-"):
6924 inc_pgroups.discard(x[1:])
6927 pgroups = inc_pgroups
6932 for keyword in pgroups:
6933 if keyword in mygroups:
6942 elif gp=="-"+myarch and myarch in pgroups:
6945 elif gp=="~"+myarch and myarch in pgroups:
6950 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
6951 if missing_licenses:
6952 allowed_tokens = set(["||", "(", ")"])
6953 allowed_tokens.update(missing_licenses)
6954 license_split = licenses.split()
6955 license_split = [x for x in license_split \
6956 if x in allowed_tokens]
6957 msg = license_split[:]
6958 msg.append("license(s)")
6959 rValue.append(" ".join(msg))
6960 except portage.exception.InvalidDependString, e:
6961 rValue.append("LICENSE: "+str(e))
6963 # Only show KEYWORDS masks for installed packages
6964 # if they're not masked for any other reason.
6965 if kmask and (not installed or not rValue):
6966 rValue.append(kmask+" keyword")
6972 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
6973 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
6974 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
6975 'PDEPEND', 'PROVIDE', 'EAPI',
6976 'PROPERTIES', 'UNUSED_06', 'UNUSED_05', 'UNUSED_04',
6977 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
6979 auxdbkeylen=len(auxdbkeys)
6981 from portage.dbapi import dbapi
6982 from portage.dbapi.virtual import fakedbapi
6983 from portage.dbapi.bintree import bindbapi, binarytree
6984 from portage.dbapi.vartree import vardbapi, vartree, dblink
6985 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
6987 class FetchlistDict(UserDict.DictMixin):
6988 """This provide a mapping interface to retrieve fetch lists. It's used
6989 to allow portage.manifest.Manifest to access fetch lists via a standard
6990 mapping interface rather than use the dbapi directly."""
6991 def __init__(self, pkgdir, settings, mydbapi):
6992 """pkgdir is a directory containing ebuilds and settings is passed into
6993 portdbapi.getfetchlist for __getitem__ calls."""
6994 self.pkgdir = pkgdir
6995 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
6996 self.settings = settings
6997 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
6998 self.portdb = mydbapi
6999 def __getitem__(self, pkg_key):
7000 """Returns the complete fetch list for a given package."""
7001 return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
7002 def __contains__(self, cpv):
7003 return cpv in self.keys()
7004 def has_key(self, pkg_key):
7005 """Returns true if the given package exists within pkgdir."""
7006 return pkg_key in self
7008 """Returns keys for all packages within pkgdir"""
7009 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7011 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
7012 vartree=None, prev_mtimes=None, blockers=None):
7013 """will merge a .tbz2 file, returning a list of runtime dependencies
7014 that must be satisfied, or None if there was a merge error. This
7015 code assumes the package exists."""
7018 mydbapi = db[myroot]["bintree"].dbapi
7020 vartree = db[myroot]["vartree"]
7021 if mytbz2[-5:]!=".tbz2":
7022 print "!!! Not a .tbz2 file"
7028 did_merge_phase = False
7031 """ Don't lock the tbz2 file because the filesytem could be readonly or
7032 shared by a cluster."""
7033 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
7035 mypkg = os.path.basename(mytbz2)[:-5]
7036 xptbz2 = portage.xpak.tbz2(mytbz2)
7037 mycat = xptbz2.getfile("CATEGORY")
7039 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7042 mycat = mycat.strip()
7044 # These are the same directories that would be used at build time.
7045 builddir = os.path.join(
7046 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7047 catdir = os.path.dirname(builddir)
7048 pkgloc = os.path.join(builddir, "image")
7049 infloc = os.path.join(builddir, "build-info")
7050 myebuild = os.path.join(
7051 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7052 portage.util.ensure_dirs(os.path.dirname(catdir),
7053 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7054 catdir_lock = portage.locks.lockdir(catdir)
7055 portage.util.ensure_dirs(catdir,
7056 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7058 shutil.rmtree(builddir)
7059 except (IOError, OSError), e:
7060 if e.errno != errno.ENOENT:
7063 for mydir in (builddir, pkgloc, infloc):
7064 portage.util.ensure_dirs(mydir, uid=portage_uid,
7065 gid=portage_gid, mode=0755)
7066 writemsg_stdout(">>> Extracting info\n")
7067 xptbz2.unpackinfo(infloc)
7068 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
7069 # Store the md5sum in the vdb.
7070 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7071 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
7074 # This gives bashrc users an opportunity to do various things
7075 # such as remove binary packages after they're installed.
7076 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
7077 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
7078 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7080 # Eventually we'd like to pass in the saved ebuild env here.
7081 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7082 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7083 if retval != os.EX_OK:
7084 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7087 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7088 retval = portage.process.spawn_bash(
7089 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7090 env=mysettings.environ())
7091 if retval != os.EX_OK:
7092 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7094 #portage.locks.unlockfile(tbz2_lock)
7097 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7098 treetype="bintree", blockers=blockers)
7099 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7100 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7101 did_merge_phase = True
7102 success = retval == os.EX_OK
7105 mysettings.pop("PORTAGE_BINPKG_FILE", None)
7107 portage.locks.unlockfile(tbz2_lock)
7109 if not did_merge_phase:
7110 # The merge phase handles this already. Callers don't know how
7111 # far this function got, so we have to call elog_process() here
7112 # so that it's only called once.
7113 from portage.elog import elog_process
7114 elog_process(mycat + "/" + mypkg, mysettings)
7117 shutil.rmtree(builddir)
7118 except (IOError, OSError), e:
7119 if e.errno != errno.ENOENT:
7123 def deprecated_profile_check():
7124 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7126 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7127 dcontent = deprecatedfile.readlines()
7128 deprecatedfile.close()
7129 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7132 writemsg(red("!!! Please refer to the Gentoo Upgrading Guide.\n"),
7135 newprofile = dcontent[0]
7136 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7138 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7139 if len(dcontent) > 1:
7140 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7141 for myline in dcontent[1:]:
7142 writemsg(myline, noiselevel=-1)
7143 writemsg("\n\n", noiselevel=-1)
7146 # gets virtual package settings
7147 def getvirtuals(myroot):
7149 writemsg("--- DEPRECATED call to getvirtual\n")
7150 return settings.getvirtuals(myroot)
7152 def commit_mtimedb(mydict=None, filename=None):
7155 if "mtimedb" not in globals() or mtimedb is None:
7159 if filename is None:
7161 filename = mtimedbfile
7162 mydict["version"] = VERSION
7163 d = {} # for full backward compat, pickle it as a plain dict object.
7166 f = atomic_ofstream(filename)
7167 pickle.dump(d, f, -1)
7169 portage.util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7170 except (IOError, OSError), e:
7174 global uid,portage_gid,portdb,db
7175 if secpass and os.environ.get("SANDBOX_ON") != "1":
7176 close_portdbapi_caches()
7179 atexit_register(portageexit)
7181 def _global_updates(trees, prev_mtimes):
7183 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7185 @param trees: A dictionary containing portage trees.
7187 @param prev_mtimes: A dictionary containing mtimes of files located in
7188 $PORTDIR/profiles/updates/.
7189 @type prev_mtimes: dict
7190 @rtype: None or List
7191 @return: None if no were no updates, otherwise a list of update commands
7192 that have been performed.
7194 # only do this if we're root and not running repoman/ebuild digest
7196 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7199 mysettings = trees["/"]["vartree"].settings
7200 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7203 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7204 update_data = grab_updates(updpath)
7206 update_data = grab_updates(updpath, prev_mtimes)
7207 except portage.exception.DirectoryNotFound:
7208 writemsg("--- 'profiles/updates' is empty or " + \
7209 "not available. Empty portage tree?\n", noiselevel=1)
7212 if len(update_data) > 0:
7213 do_upgrade_packagesmessage = 0
7216 for mykey, mystat, mycontent in update_data:
7217 writemsg_stdout("\n\n")
7218 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
7219 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7220 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
7221 valid_updates, errors = parse_updates(mycontent)
7222 myupd.extend(valid_updates)
7223 writemsg_stdout(len(valid_updates) * "." + "\n")
7224 if len(errors) == 0:
7225 # Update our internal mtime since we
7226 # processed all of our directives.
7227 timestamps[mykey] = long(mystat.st_mtime)
7230 writemsg("%s\n" % msg, noiselevel=-1)
7232 world_file = os.path.join(root, WORLD_FILE)
7233 world_list = grabfile(world_file)
7234 world_modified = False
7235 for update_cmd in myupd:
7236 for pos, atom in enumerate(world_list):
7237 new_atom = update_dbentry(update_cmd, atom)
7238 if atom != new_atom:
7239 world_list[pos] = new_atom
7240 world_modified = True
7243 write_atomic(world_file,
7244 "".join("%s\n" % (x,) for x in world_list))
7246 update_config_files("/",
7247 mysettings.get("CONFIG_PROTECT","").split(),
7248 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7251 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7252 settings=mysettings)
7253 vardb = trees["/"]["vartree"].dbapi
7254 bindb = trees["/"]["bintree"].dbapi
7255 if not os.access(bindb.bintree.pkgdir, os.W_OK):
7257 for update_cmd in myupd:
7258 if update_cmd[0] == "move":
7259 moves = vardb.move_ent(update_cmd)
7261 writemsg_stdout(moves * "@")
7263 moves = bindb.move_ent(update_cmd)
7265 writemsg_stdout(moves * "%")
7266 elif update_cmd[0] == "slotmove":
7267 moves = vardb.move_slot_ent(update_cmd)
7269 writemsg_stdout(moves * "s")
7271 moves = bindb.move_slot_ent(update_cmd)
7273 writemsg_stdout(moves * "S")
7275 # The above global updates proceed quickly, so they
7276 # are considered a single mtimedb transaction.
7277 if len(timestamps) > 0:
7278 # We do not update the mtime in the mtimedb
7279 # until after _all_ of the above updates have
7280 # been processed because the mtimedb will
7281 # automatically commit when killed by ctrl C.
7282 for mykey, mtime in timestamps.iteritems():
7283 prev_mtimes[mykey] = mtime
7285 # We gotta do the brute force updates for these now.
7286 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7287 "fixpackages" in mysettings.features:
7288 def onProgress(maxval, curval):
7289 writemsg_stdout("*")
7290 vardb.update_ents(myupd, onProgress=onProgress)
7292 bindb.update_ents(myupd, onProgress=onProgress)
7294 do_upgrade_packagesmessage = 1
7296 # Update progress above is indicated by characters written to stdout so
7297 # we print a couple new lines here to separate the progress output from
7302 if do_upgrade_packagesmessage and bindb and \
7304 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7305 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7306 writemsg_stdout("\n")
7310 #continue setting up other trees
7312 class MtimeDB(dict):
7313 def __init__(self, filename):
7315 self.filename = filename
7316 self._load(filename)
7318 def _load(self, filename):
7321 mypickle = pickle.Unpickler(f)
7322 mypickle.find_global = None
7326 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
7327 if isinstance(e, pickle.UnpicklingError):
7328 writemsg("!!! Error loading '%s': %s\n" % \
7329 (filename, str(e)), noiselevel=-1)
7334 d["updates"] = d["old"]
7339 d.setdefault("starttime", 0)
7340 d.setdefault("version", "")
7341 for k in ("info", "ldpath", "updates"):
7344 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7345 "starttime", "updates", "version"))
7348 if k not in mtimedbkeys:
7349 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7352 self._clean_data = copy.deepcopy(d)
7355 if not self.filename:
7359 # Only commit if the internal state has changed.
7360 if d != self._clean_data:
7361 commit_mtimedb(mydict=d, filename=self.filename)
7362 self._clean_data = copy.deepcopy(d)
7364 def create_trees(config_root=None, target_root=None, trees=None):
7368 # clean up any existing portdbapi instances
7369 for myroot in trees:
7370 portdb = trees[myroot]["porttree"].dbapi
7371 portdb.close_caches()
7372 portdbapi.portdbapi_instances.remove(portdb)
7373 del trees[myroot]["porttree"], myroot, portdb
7375 settings = config(config_root=config_root, target_root=target_root,
7376 config_incrementals=portage.const.INCREMENTALS)
7379 myroots = [(settings["ROOT"], settings)]
7380 if settings["ROOT"] != "/":
7381 settings = config(config_root=None, target_root="/",
7382 config_incrementals=portage.const.INCREMENTALS)
7383 # When ROOT != "/" we only want overrides from the calling
7384 # environment to apply to the config that's associated
7385 # with ROOT != "/", so we wipe out the "backupenv" for the
7386 # config that is associated with ROOT == "/" and regenerate
7387 # it's incrementals.
7388 # Preserve backupenv values that are initialized in the config
7389 # constructor. Also, preserve XARGS since it is set by the
7390 # portage.data module.
7392 backupenv_whitelist = settings._environ_whitelist
7393 backupenv = settings.configdict["backupenv"]
7394 env_d = settings.configdict["env.d"]
7395 for k, v in os.environ.iteritems():
7396 if k in backupenv_whitelist:
7399 v == backupenv.get(k):
7400 backupenv.pop(k, None)
7401 settings.regenerate()
7403 myroots.append((settings["ROOT"], settings))
7405 for myroot, mysettings in myroots:
7406 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, None))
7407 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
7408 trees[myroot].addLazySingleton(
7409 "vartree", vartree, myroot, categories=mysettings.categories,
7410 settings=mysettings)
7411 trees[myroot].addLazySingleton("porttree",
7412 portagetree, myroot, settings=mysettings)
7413 trees[myroot].addLazySingleton("bintree",
7414 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
7417 class _LegacyGlobalProxy(portage.util.ObjectProxy):
7419 Instances of these serve as proxies to global variables
7420 that are initialized on demand.
7422 def __init__(self, name):
7423 portage.util.ObjectProxy.__init__(self)
7424 object.__setattr__(self, '_name', name)
7426 def _get_target(self):
7427 init_legacy_globals()
7428 name = object.__getattribute__(self, '_name')
7429 return globals()[name]
7431 class _PortdbProxy(portage.util.ObjectProxy):
7433 The portdb is initialized separately from the rest
7434 of the variables, since sometimes the other variables
7435 are needed while the portdb is not.
7438 def _get_target(self):
7439 init_legacy_globals()
7440 global db, portdb, root, _portdb_initialized
7441 if not _portdb_initialized:
7442 portdb = db[root]["porttree"].dbapi
7443 _portdb_initialized = True
7446 class _MtimedbProxy(portage.util.ObjectProxy):
7448 The mtimedb is independent from the portdb and other globals.
7451 def __init__(self, name):
7452 portage.util.ObjectProxy.__init__(self)
7453 object.__setattr__(self, '_name', name)
7455 def _get_target(self):
7456 global mtimedb, mtimedbfile, _mtimedb_initialized
7457 if not _mtimedb_initialized:
7458 mtimedbfile = os.path.join("/",
7459 CACHE_PATH.lstrip(os.path.sep), "mtimedb")
7460 mtimedb = MtimeDB(mtimedbfile)
7461 _mtimedb_initialized = True
7462 name = object.__getattribute__(self, '_name')
7463 return globals()[name]
7465 _legacy_global_var_names = ("archlist", "db", "features",
7466 "groups", "mtimedb", "mtimedbfile", "pkglines",
7467 "portdb", "profiledir", "root", "selinux_enabled",
7468 "settings", "thirdpartymirrors", "usedefaults")
7470 def _disable_legacy_globals():
7472 This deletes the ObjectProxy instances that are used
7473 for lazy initialization of legacy global variables.
7474 The purpose of deleting them is to prevent new code
7475 from referencing these deprecated variables.
7477 global _legacy_global_var_names
7478 for k in _legacy_global_var_names:
7479 globals().pop(k, None)
7481 # Initialization of legacy globals. No functions/classes below this point
7482 # please! When the above functions and classes become independent of the
7483 # below global variables, it will be possible to make the below code
7484 # conditional on a backward compatibility flag (backward compatibility could
7485 # be disabled via an environment variable, for example). This will enable new
7486 # code that is aware of this flag to import portage without the unnecessary
7487 # overhead (and other issues!) of initializing the legacy globals.
7489 def init_legacy_globals():
7490 global _globals_initialized
7491 if _globals_initialized:
7493 _globals_initialized = True
7495 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
7496 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
7497 profiledir, flushmtimedb
7499 # Portage needs to ensure a sane umask for the files it creates.
7503 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
7504 kwargs[k] = os.environ.get(envvar, "/")
7506 global _initializing_globals
7507 _initializing_globals = True
7508 db = create_trees(**kwargs)
7509 del _initializing_globals
7511 settings = db["/"]["vartree"].settings
7515 settings = db[myroot]["vartree"].settings
7518 root = settings["ROOT"]
7521 # ========================================================================
7523 # These attributes should not be used
7524 # within Portage under any circumstances.
7525 # ========================================================================
7526 archlist = settings.archlist()
7527 features = settings.features
7528 groups = settings["ACCEPT_KEYWORDS"].split()
7529 pkglines = settings.packages
7530 selinux_enabled = settings.selinux_enabled()
7531 thirdpartymirrors = settings.thirdpartymirrors()
7532 usedefaults = settings.use_defs
7534 if os.path.isdir(PROFILE_PATH):
7535 profiledir = PROFILE_PATH
7536 def flushmtimedb(record):
7537 writemsg("portage.flushmtimedb() is DEPRECATED\n")
7538 # ========================================================================
7540 # These attributes should not be used
7541 # within Portage under any circumstances.
7542 # ========================================================================
7546 _mtimedb_initialized = False
7547 mtimedb = _MtimedbProxy("mtimedb")
7548 mtimedbfile = _MtimedbProxy("mtimedbfile")
7550 _portdb_initialized = False
7551 portdb = _PortdbProxy()
7553 _globals_initialized = False
7555 for k in ("db", "settings", "root", "selinux_enabled",
7556 "archlist", "features", "groups",
7557 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
7559 globals()[k] = _LegacyGlobalProxy(k)
7564 # ============================================================================
7565 # ============================================================================