1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
23 import cPickle as pickle
29 from time import sleep
30 from random import shuffle
32 from itertools import chain, izip
35 except ImportError, e:
36 sys.stderr.write("\n\n")
37 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
38 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
39 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
41 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
42 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
43 sys.stderr.write(" "+str(e)+"\n\n");
47 if platform.system() in ["FreeBSD"]:
50 def _chflags(path, flags, opts=""):
51 cmd = "chflags %s %o '%s'" % (opts, flags, path)
52 status, output = commands.getstatusoutput(cmd)
53 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
55 # Try to generate an ENOENT error if appropriate.
60 # Make sure the binary exists.
61 if not portage.process.find_binary("chflags"):
62 raise portage.exception.CommandNotFound("chflags")
63 # Now we're not sure exactly why it failed or what
64 # the real errno was, so just report EPERM.
65 e = OSError(errno.EPERM, output)
70 def _lchflags(path, flags):
71 return _chflags(path, flags, opts="-h")
72 bsd_chflags.chflags = _chflags
73 bsd_chflags.lchflags = _lchflags
76 from portage.cache.cache_errors import CacheError
77 import portage.cvstree
79 import portage.getbinpkg
81 from portage.dep import dep_getcpv, dep_getkey, get_operator, \
82 isjustname, isspecific, isvalidatom, \
83 match_from_list, match_to_list, best_match_to_list
85 # XXX: This needs to get cleaned up.
87 from portage.output import bold, colorize, green, red, yellow
90 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
91 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
92 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
93 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
94 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
95 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
96 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
97 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
99 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
100 portage_uid, portage_gid, userpriv_groups
101 from portage.manifest import Manifest
104 from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
105 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
106 map_dictlist_vals, new_protect_filename, normalize_path, \
107 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
108 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
109 import portage.exception
111 import portage.process
112 from portage.process import atexit_register, run_exitfuncs
113 from portage.locks import unlockfile,unlockdir,lockfile,lockdir
114 import portage.checksum
115 from portage.checksum import perform_md5,perform_checksum,prelink_capable
116 import portage.eclass_cache
117 from portage.localization import _
118 from portage.update import dep_transform, fixdbentries, grab_updates, \
119 parse_updates, update_config_files, update_dbentries, update_dbentry
121 # Need these functions directly in portage namespace to not break every external tool in existence
122 from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \
123 pkgsplit, vercmp, ververify
125 # endversion and endversion_keys are for backward compatibility only.
126 from portage.versions import endversion_keys
127 from portage.versions import suffix_value as endversion
129 except ImportError, e:
130 sys.stderr.write("\n\n")
131 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
132 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
133 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
134 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
135 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
136 sys.stderr.write("!!! a recovery of portage.\n")
137 sys.stderr.write(" "+str(e)+"\n\n")
142 import portage._selinux as selinux
144 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
149 # ===========================================================================
150 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
151 # ===========================================================================
155 modname = ".".join(name.split(".")[:-1])
156 mod = __import__(modname)
157 components = name.split('.')
158 for comp in components[1:]:
159 mod = getattr(mod, comp)
162 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
164 if x in top_dict and key in top_dict[x]:
166 return copy.deepcopy(top_dict[x][key])
168 return top_dict[x][key]
172 raise KeyError("Key not found in list; '%s'" % key)
175 "this fixes situations where the current directory doesn't exist"
178 except OSError: #dir doesn't exist
183 def abssymlink(symlink):
184 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
185 mylink=os.readlink(symlink)
187 mydir=os.path.dirname(symlink)
188 mylink=mydir+"/"+mylink
189 return os.path.normpath(mylink)
195 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
196 global cacheHit,cacheMiss,cacheStale
197 mypath = normalize_path(my_original_path)
198 if mypath in dircache:
200 cached_mtime, list, ftype = dircache[mypath]
203 cached_mtime, list, ftype = -1, [], []
205 pathstat = os.stat(mypath)
206 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
207 mtime = pathstat.st_mtime
209 raise portage.exception.DirectoryNotFound(mypath)
210 except EnvironmentError, e:
211 if e.errno == portage.exception.PermissionDenied.errno:
212 raise portage.exception.PermissionDenied(mypath)
217 except portage.exception.PortageException:
221 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
222 if mtime != cached_mtime or time.time() - mtime < 4:
223 if mypath in dircache:
226 list = os.listdir(mypath)
227 except EnvironmentError, e:
228 if e.errno != errno.EACCES:
231 raise portage.exception.PermissionDenied(mypath)
236 pathstat = os.stat(mypath+"/"+x)
238 pathstat = os.lstat(mypath+"/"+x)
240 if stat.S_ISREG(pathstat[stat.ST_MODE]):
242 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
244 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
248 except (IOError, OSError):
250 dircache[mypath] = mtime, list, ftype
254 for x in range(0, len(list)):
255 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
256 ret_list.append(list[x])
257 ret_ftype.append(ftype[x])
258 elif (list[x] not in ignorelist):
259 ret_list.append(list[x])
260 ret_ftype.append(ftype[x])
262 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
263 return ret_list, ret_ftype
265 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
266 EmptyOnError=False, dirsonly=False):
268 Portage-specific implementation of os.listdir
270 @param mypath: Path whose contents you wish to list
272 @param recursive: Recursively scan directories contained within mypath
273 @type recursive: Boolean
274 @param filesonly; Only return files, not more directories
275 @type filesonly: Boolean
276 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
277 @type ignorecvs: Boolean
278 @param ignorelist: List of filenames/directories to exclude
279 @type ignorelist: List
280 @param followSymlinks: Follow Symlink'd files and directories
281 @type followSymlinks: Boolean
282 @param EmptyOnError: Return [] if an error occurs.
283 @type EmptyOnError: Boolean
284 @param dirsonly: Only return directories.
285 @type dirsonly: Boolean
287 @returns: A list of files and directories (or just files or just directories) or an empty list.
290 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
297 if not (filesonly or dirsonly or recursive):
303 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
304 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
308 for y in range(0,len(l)):
309 l[y]=list[x]+"/"+l[y]
315 for x in range(0,len(ftype)):
317 rlist=rlist+[list[x]]
320 for x in range(0, len(ftype)):
322 rlist = rlist + [list[x]]
328 def flatten(mytokens):
329 """this function now turns a [1,[2,3]] list into
330 a [1,2,3] list and returns it."""
333 if isinstance(x, list):
334 newlist.extend(flatten(x))
339 #beautiful directed graph object
341 class digraph(object):
343 """Create an empty digraph"""
345 # { node : ( { child : priority } , { parent : priority } ) }
349 def add(self, node, parent, priority=0):
350 """Adds the specified node with the specified parent.
352 If the dep is a soft-dep and the node already has a hard
353 relationship to the parent, the relationship is left as hard."""
355 if node not in self.nodes:
356 self.nodes[node] = ({}, {}, node)
357 self.order.append(node)
362 if parent not in self.nodes:
363 self.nodes[parent] = ({}, {}, parent)
364 self.order.append(parent)
366 if parent in self.nodes[node][1]:
367 if priority > self.nodes[node][1][parent]:
368 self.nodes[node][1][parent] = priority
370 self.nodes[node][1][parent] = priority
372 if node in self.nodes[parent][0]:
373 if priority > self.nodes[parent][0][node]:
374 self.nodes[parent][0][node] = priority
376 self.nodes[parent][0][node] = priority
378 def remove(self, node):
379 """Removes the specified node from the digraph, also removing
380 and ties to other nodes in the digraph. Raises KeyError if the
381 node doesn't exist."""
383 if node not in self.nodes:
386 for parent in self.nodes[node][1]:
387 del self.nodes[parent][0][node]
388 for child in self.nodes[node][0]:
389 del self.nodes[child][1][node]
392 self.order.remove(node)
394 def difference_update(self, t):
396 Remove all given nodes from node_set. This is more efficient
397 than multiple calls to the remove() method.
399 if isinstance(t, (list, tuple)) or \
400 not hasattr(t, "__contains__"):
403 for node in self.order:
407 for parent in self.nodes[node][1]:
408 del self.nodes[parent][0][node]
409 for child in self.nodes[node][0]:
410 del self.nodes[child][1][node]
414 def remove_edge(self, child, parent):
416 Remove edge in the direction from child to parent. Note that it is
417 possible for a remaining edge to exist in the opposite direction.
418 Any endpoint vertices that become isolated will remain in the graph.
421 # Nothing should be modified when a KeyError is raised.
422 for k in parent, child:
423 if k not in self.nodes:
426 # Make sure the edge exists.
427 if child not in self.nodes[parent][0]:
428 raise KeyError(child)
429 if parent not in self.nodes[child][1]:
430 raise KeyError(parent)
433 del self.nodes[child][1][parent]
434 del self.nodes[parent][0][child]
437 return iter(self.order)
439 def contains(self, node):
440 """Checks if the digraph contains mynode"""
441 return node in self.nodes
443 def get(self, key, default=None):
444 node_data = self.nodes.get(key, self)
445 if node_data is self:
450 """Return a list of all nodes in the graph"""
453 def child_nodes(self, node, ignore_priority=None):
454 """Return all children of the specified node"""
455 if ignore_priority is None:
456 return self.nodes[node][0].keys()
458 for child, priority in self.nodes[node][0].iteritems():
459 if priority > ignore_priority:
460 children.append(child)
463 def parent_nodes(self, node):
464 """Return all parents of the specified node"""
465 return self.nodes[node][1].keys()
467 def leaf_nodes(self, ignore_priority=None):
468 """Return all nodes that have no children
470 If ignore_soft_deps is True, soft deps are not counted as
471 children in calculations."""
474 for node in self.order:
476 for child in self.nodes[node][0]:
477 if self.nodes[node][0][child] > ignore_priority:
481 leaf_nodes.append(node)
484 def root_nodes(self, ignore_priority=None):
485 """Return all nodes that have no parents.
487 If ignore_soft_deps is True, soft deps are not counted as
488 parents in calculations."""
491 for node in self.order:
493 for parent in self.nodes[node][1]:
494 if self.nodes[node][1][parent] > ignore_priority:
498 root_nodes.append(node)
502 """Checks if the digraph is empty"""
503 return len(self.nodes) == 0
508 for k, v in self.nodes.iteritems():
509 clone.nodes[k] = (v[0].copy(), v[1].copy(), v[2])
510 clone.order = self.order[:]
513 # Backward compatibility
516 allzeros = leaf_nodes
518 __contains__ = contains
522 def delnode(self, node):
529 leaf_nodes = self.leaf_nodes()
534 def hasallzeros(self, ignore_priority=None):
535 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
538 def debug_print(self):
539 for node in self.nodes:
541 if self.nodes[node][0]:
544 print "(no children)"
545 for child in self.nodes[node][0]:
547 print "(%s)" % self.nodes[node][0][child]
549 #parse /etc/env.d and generate /etc/profile.env
551 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
552 env=None, writemsg_level=portage.util.writemsg_level):
553 if target_root is None:
555 target_root = settings["ROOT"]
556 if prev_mtimes is None:
558 prev_mtimes = mtimedb["ldpath"]
561 envd_dir = os.path.join(target_root, "etc", "env.d")
562 portage.util.ensure_dirs(envd_dir, mode=0755)
563 fns = listdir(envd_dir, EmptyOnError=1)
569 if not x[0].isdigit() or not x[1].isdigit():
571 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
577 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
578 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
579 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
580 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
581 "PYTHONPATH", "ROOTPATH"])
586 file_path = os.path.join(envd_dir, x)
588 myconfig = getconfig(file_path, expand=False)
589 except portage.exception.ParseError, e:
590 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
594 # broken symlink or file removed by a concurrent process
595 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
597 config_list.append(myconfig)
598 if "SPACE_SEPARATED" in myconfig:
599 space_separated.update(myconfig["SPACE_SEPARATED"].split())
600 del myconfig["SPACE_SEPARATED"]
601 if "COLON_SEPARATED" in myconfig:
602 colon_separated.update(myconfig["COLON_SEPARATED"].split())
603 del myconfig["COLON_SEPARATED"]
607 for var in space_separated:
609 for myconfig in config_list:
611 for item in myconfig[var].split():
612 if item and not item in mylist:
614 del myconfig[var] # prepare for env.update(myconfig)
616 env[var] = " ".join(mylist)
617 specials[var] = mylist
619 for var in colon_separated:
621 for myconfig in config_list:
623 for item in myconfig[var].split(":"):
624 if item and not item in mylist:
626 del myconfig[var] # prepare for env.update(myconfig)
628 env[var] = ":".join(mylist)
629 specials[var] = mylist
631 for myconfig in config_list:
632 """Cumulative variables have already been deleted from myconfig so that
633 they won't be overwritten by this dict.update call."""
636 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
638 myld = open(ldsoconf_path)
639 myldlines=myld.readlines()
643 #each line has at least one char (a newline)
647 except (IOError, OSError), e:
648 if e.errno != errno.ENOENT:
652 ld_cache_update=False
654 newld = specials["LDPATH"]
656 #ld.so.conf needs updating and ldconfig needs to be run
657 myfd = atomic_ofstream(ldsoconf_path)
658 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
659 myfd.write("# contents of /etc/env.d directory\n")
660 for x in specials["LDPATH"]:
665 # Update prelink.conf if we are prelink-enabled
667 newprelink = atomic_ofstream(
668 os.path.join(target_root, "etc", "prelink.conf"))
669 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
670 newprelink.write("# contents of /etc/env.d directory\n")
672 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
673 newprelink.write("-l "+x+"\n");
674 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
680 for y in specials["PRELINK_PATH_MASK"]:
689 newprelink.write("-h "+x+"\n")
690 for x in specials["PRELINK_PATH_MASK"]:
691 newprelink.write("-b "+x+"\n")
694 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
695 # granularity is possible. In order to avoid the potential ambiguity of
696 # mtimes that differ by less than 1 second, sleep here if any of the
697 # directories have been modified during the current second.
698 sleep_for_mtime_granularity = False
699 current_time = long(time.time())
700 mtime_changed = False
702 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
703 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
705 newldpathtime = long(os.stat(x).st_mtime)
706 lib_dirs.add(normalize_path(x))
708 if oe.errno == errno.ENOENT:
713 # ignore this path because it doesn't exist
716 if newldpathtime == current_time:
717 sleep_for_mtime_granularity = True
719 if prev_mtimes[x] == newldpathtime:
722 prev_mtimes[x] = newldpathtime
725 prev_mtimes[x] = newldpathtime
729 ld_cache_update = True
732 not ld_cache_update and \
733 contents is not None:
734 libdir_contents_changed = False
735 for mypath, mydata in contents.iteritems():
736 if mydata[0] not in ("obj","sym"):
738 head, tail = os.path.split(mypath)
740 libdir_contents_changed = True
742 if not libdir_contents_changed:
745 ldconfig = "/sbin/ldconfig"
746 if "CHOST" in env and "CBUILD" in env and \
747 env["CHOST"] != env["CBUILD"]:
748 from portage.process import find_binary
749 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
751 # Only run ldconfig as needed
752 if (ld_cache_update or makelinks) and ldconfig:
753 # ldconfig has very different behaviour between FreeBSD and Linux
754 if ostype=="Linux" or ostype.lower().endswith("gnu"):
755 # We can't update links if we haven't cleaned other versions first, as
756 # an older package installed ON TOP of a newer version will cause ldconfig
757 # to overwrite the symlinks we just made. -X means no links. After 'clean'
758 # we can safely create links.
759 writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \
762 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
764 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
765 elif ostype in ("FreeBSD","DragonFly"):
766 writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
768 os.system(("cd / ; %s -elf -i " + \
769 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
770 (ldconfig, target_root, target_root))
772 del specials["LDPATH"]
774 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
775 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
776 cenvnotice = penvnotice[:]
777 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
778 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
780 #create /etc/profile.env for bash support
781 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
782 outfile.write(penvnotice)
784 env_keys = [ x for x in env if x != "LDPATH" ]
788 if v.startswith('$') and not v.startswith('${'):
789 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
791 outfile.write("export %s='%s'\n" % (k, v))
794 #create /etc/csh.env for (t)csh support
795 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
796 outfile.write(cenvnotice)
798 outfile.write("setenv %s '%s'\n" % (x, env[x]))
801 if sleep_for_mtime_granularity:
802 while current_time == long(time.time()):
805 def ExtractKernelVersion(base_dir):
807 Try to figure out what kernel version we are running
808 @param base_dir: Path to sources (usually /usr/src/linux)
809 @type base_dir: string
810 @rtype: tuple( version[string], error[string])
812 1. tuple( version[string], error[string])
813 Either version or error is populated (but never both)
817 pathname = os.path.join(base_dir, 'Makefile')
819 f = open(pathname, 'r')
820 except OSError, details:
821 return (None, str(details))
822 except IOError, details:
823 return (None, str(details))
827 lines.append(f.readline())
828 except OSError, details:
829 return (None, str(details))
830 except IOError, details:
831 return (None, str(details))
833 lines = [l.strip() for l in lines]
837 #XXX: The following code relies on the ordering of vars within the Makefile
839 # split on the '=' then remove annoying whitespace
840 items = line.split("=")
841 items = [i.strip() for i in items]
842 if items[0] == 'VERSION' or \
843 items[0] == 'PATCHLEVEL':
846 elif items[0] == 'SUBLEVEL':
848 elif items[0] == 'EXTRAVERSION' and \
849 items[-1] != items[0]:
852 # Grab a list of files named localversion* and sort them
853 localversions = os.listdir(base_dir)
854 for x in range(len(localversions)-1,-1,-1):
855 if localversions[x][:12] != "localversion":
859 # Append the contents of each to the version string, stripping ALL whitespace
860 for lv in localversions:
861 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
863 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
864 kernelconfig = getconfig(base_dir+"/.config")
865 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
866 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
868 return (version,None)
870 def autouse(myvartree, use_cache=1, mysettings=None):
872 autuse returns a list of USE variables auto-enabled to packages being installed
874 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
875 @type myvartree: vartree
876 @param use_cache: read values from cache
877 @type use_cache: Boolean
878 @param mysettings: Instance of config
879 @type mysettings: config
881 @returns: A string containing a list of USE variables that are enabled via use.defaults
883 if mysettings is None:
885 mysettings = settings
886 if mysettings.profile_path is None:
889 usedefaults = mysettings.use_defs
890 for myuse in usedefaults:
892 for mydep in usedefaults[myuse]:
893 if not myvartree.dep_match(mydep,use_cache=True):
897 myusevars += " "+myuse
900 def check_config_instance(test):
901 if not isinstance(test, config):
902 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
904 class config(object):
906 This class encompasses the main portage configuration. Data is pulled from
907 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
908 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
911 Generally if you need data like USE flags, FEATURES, environment variables,
912 virtuals ...etc you look in here.
915 _environ_whitelist = []
917 # Whitelisted variables are always allowed to enter the ebuild
918 # environment. Generally, this only includes special portage
919 # variables. Ebuilds can unset variables that are not whitelisted
920 # and rely on them remaining unset for future phases, without them
921 # leaking back in from various locations (bug #189417). It's very
922 # important to set our special BASH_ENV variable in the ebuild
923 # environment in order to prevent sandbox from sourcing /etc/profile
924 # in it's bashrc (causing major leakage).
925 _environ_whitelist += [
926 "BASH_ENV", "BUILD_PREFIX", "D",
927 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
928 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
929 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
930 "FEATURES", "FILESDIR", "HOME", "PATH",
932 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
933 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
935 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
936 "PORTAGE_BINPKG_TMPFILE",
938 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
939 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
940 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
942 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
943 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
944 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
945 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
946 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
947 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
948 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
949 "USE_EXPAND", "USE_ORDER", "WORKDIR",
953 # user config variables
954 _environ_whitelist += [
955 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
958 _environ_whitelist += [
959 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
962 # misc variables inherited from the calling environment
963 _environ_whitelist += [
964 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
965 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
966 "TERM", "TERMCAP", "USER",
969 # other variables inherited from the calling environment
970 _environ_whitelist += [
971 "CVS_RSH", "ECHANGELOG_USER",
973 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
974 "STY", "WINDOW", "XAUTHORITY",
977 _environ_whitelist = frozenset(_environ_whitelist)
979 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
981 # Filter selected variables in the config.environ() method so that
982 # they don't needlessly propagate down into the ebuild environment.
985 # misc variables inherited from the calling environment
987 "INFOPATH", "MANPATH",
990 # variables that break bash
995 # portage config variables and variables set directly by portage
997 "ACCEPT_KEYWORDS", "AUTOCLEAN",
998 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
999 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
1000 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1001 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1002 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1003 "PORTAGE_BACKGROUND",
1004 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1005 "PORTAGE_COUNTER_HASH",
1006 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
1007 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1008 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1009 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1011 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1012 "PORTAGE_PACKAGE_EMPTY_ABORT",
1013 "PORTAGE_RO_DISTDIRS",
1014 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1015 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1016 "QUICKPKG_DEFAULT_OPTS",
1017 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1018 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1021 _environ_filter = frozenset(_environ_filter)
1023 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1024 config_incrementals=None, config_root=None, target_root=None,
1027 @param clone: If provided, init will use deepcopy to copy by value the instance.
1028 @type clone: Instance of config class.
1029 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1030 and then calling instance.setcpv(mycpv).
1032 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1033 @type config_profile_path: String
1034 @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS)
1035 @type config_incrementals: List
1036 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1037 @type config_root: String
1038 @param target_root: __init__ override of $ROOT env variable.
1039 @type target_root: String
1040 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1041 ignore local config (keywording and unmasking)
1042 @type local_config: Boolean
1045 # When initializing the global portage.settings instance, avoid
1046 # raising exceptions whenever possible since exceptions thrown
1047 # from 'import portage' or 'import portage.exceptions' statements
1048 # can practically render the api unusable for api consumers.
1049 tolerant = "_initializing_globals" in globals()
1051 self.already_in_regenerate = 0
1056 self.modifiedkeys = []
1058 self._accept_chost_re = None
1062 self.dirVirtuals = None
1065 # Virtuals obtained from the vartree
1066 self.treeVirtuals = {}
1067 # Virtuals by user specification. Includes negatives.
1068 self.userVirtuals = {}
1069 # Virtual negatives from user specifications.
1070 self.negVirtuals = {}
1071 # Virtuals added by the depgraph via self.setinst().
1072 self._depgraphVirtuals = {}
1074 self.user_profile_dir = None
1075 self.local_config = local_config
1078 self.incrementals = copy.deepcopy(clone.incrementals)
1079 self.profile_path = copy.deepcopy(clone.profile_path)
1080 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1081 self.local_config = copy.deepcopy(clone.local_config)
1083 self.module_priority = copy.deepcopy(clone.module_priority)
1084 self.modules = copy.deepcopy(clone.modules)
1086 self.depcachedir = copy.deepcopy(clone.depcachedir)
1088 self.packages = copy.deepcopy(clone.packages)
1089 self.virtuals = copy.deepcopy(clone.virtuals)
1091 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1092 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1093 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1094 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1095 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1097 self.use_defs = copy.deepcopy(clone.use_defs)
1098 self.usemask = copy.deepcopy(clone.usemask)
1099 self.usemask_list = copy.deepcopy(clone.usemask_list)
1100 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1101 self.useforce = copy.deepcopy(clone.useforce)
1102 self.useforce_list = copy.deepcopy(clone.useforce_list)
1103 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1104 self.puse = copy.deepcopy(clone.puse)
1105 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1106 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1107 self.mycpv = copy.deepcopy(clone.mycpv)
1109 self.configlist = copy.deepcopy(clone.configlist)
1110 self.lookuplist = self.configlist[:]
1111 self.lookuplist.reverse()
1113 "env.d": self.configlist[0],
1114 "pkginternal": self.configlist[1],
1115 "globals": self.configlist[2],
1116 "defaults": self.configlist[3],
1117 "conf": self.configlist[4],
1118 "pkg": self.configlist[5],
1119 "auto": self.configlist[6],
1120 "backupenv": self.configlist[7],
1121 "env": self.configlist[8] }
1122 self.profiles = copy.deepcopy(clone.profiles)
1123 self.backupenv = self.configdict["backupenv"]
1124 self.pusedict = copy.deepcopy(clone.pusedict)
1125 self.categories = copy.deepcopy(clone.categories)
1126 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1127 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1128 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1129 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1130 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1131 self.features = copy.deepcopy(clone.features)
1133 self._accept_license = copy.deepcopy(clone._accept_license)
1134 self._plicensedict = copy.deepcopy(clone._plicensedict)
1137 def check_var_directory(varname, var):
1138 if not os.path.isdir(var):
1139 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1140 "Please correct this.\n") % (varname, var),
1142 raise portage.exception.DirectoryNotFound(var)
1144 if config_root is None:
1147 config_root = normalize_path(os.path.abspath(
1148 config_root)).rstrip(os.path.sep) + os.path.sep
1150 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1152 self.depcachedir = DEPCACHE_PATH
1154 if not config_profile_path:
1155 config_profile_path = \
1156 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1157 if os.path.isdir(config_profile_path):
1158 self.profile_path = config_profile_path
1160 self.profile_path = None
1162 self.profile_path = config_profile_path[:]
1164 if not config_incrementals:
1165 writemsg("incrementals not specified to class config\n")
1166 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1168 self.incrementals = copy.deepcopy(config_incrementals)
1170 self.module_priority = ["user","default"]
1172 self.modules["user"] = getconfig(
1173 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1174 if self.modules["user"] is None:
1175 self.modules["user"] = {}
1176 self.modules["default"] = {
1177 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1178 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1184 # back up our incremental variables:
1186 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1187 self.configlist.append({})
1188 self.configdict["env.d"] = self.configlist[-1]
1190 self.configlist.append({})
1191 self.configdict["pkginternal"] = self.configlist[-1]
1193 # The symlink might not exist or might not be a symlink.
1194 if self.profile_path is None:
1198 def addProfile(currentPath):
1199 parentsFile = os.path.join(currentPath, "parent")
1200 eapi_file = os.path.join(currentPath, "eapi")
1202 eapi = open(eapi_file).readline().strip()
1206 if not eapi_is_supported(eapi):
1207 raise portage.exception.ParseError(
1208 "Profile contains unsupported " + \
1209 "EAPI '%s': '%s'" % \
1210 (eapi, os.path.realpath(eapi_file),))
1211 if os.path.exists(parentsFile):
1212 parents = grabfile(parentsFile)
1214 raise portage.exception.ParseError(
1215 "Empty parent file: '%s'" % parentsFile)
1216 for parentPath in parents:
1217 parentPath = normalize_path(os.path.join(
1218 currentPath, parentPath))
1219 if os.path.exists(parentPath):
1220 addProfile(parentPath)
1222 raise portage.exception.ParseError(
1223 "Parent '%s' not found: '%s'" % \
1224 (parentPath, parentsFile))
1225 self.profiles.append(currentPath)
1227 addProfile(os.path.realpath(self.profile_path))
1228 except portage.exception.ParseError, e:
1229 writemsg("!!! Unable to parse profile: '%s'\n" % \
1230 self.profile_path, noiselevel=-1)
1231 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1234 if local_config and self.profiles:
1235 custom_prof = os.path.join(
1236 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1237 if os.path.exists(custom_prof):
1238 self.user_profile_dir = custom_prof
1239 self.profiles.append(custom_prof)
1242 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1243 self.packages = stack_lists(self.packages_list, incremental=1)
1244 del self.packages_list
1245 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1248 self.prevmaskdict={}
1249 for x in self.packages:
1250 mycatpkg=dep_getkey(x)
1251 if mycatpkg not in self.prevmaskdict:
1252 self.prevmaskdict[mycatpkg]=[x]
1254 self.prevmaskdict[mycatpkg].append(x)
1256 # get profile-masked use flags -- INCREMENTAL Child over parent
1257 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1258 for x in self.profiles]
1259 self.usemask = set(stack_lists(
1260 self.usemask_list, incremental=True))
1261 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1262 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1265 self.pusemask_list = []
1266 rawpusemask = [grabdict_package(
1267 os.path.join(x, "package.use.mask")) \
1268 for x in self.profiles]
1269 for i in xrange(len(self.profiles)):
1271 for k, v in rawpusemask[i].iteritems():
1272 cpdict.setdefault(dep_getkey(k), {})[k] = v
1273 self.pusemask_list.append(cpdict)
1276 self.pkgprofileuse = []
1277 rawprofileuse = [grabdict_package(
1278 os.path.join(x, "package.use"), juststrings=True) \
1279 for x in self.profiles]
1280 for i in xrange(len(self.profiles)):
1282 for k, v in rawprofileuse[i].iteritems():
1283 cpdict.setdefault(dep_getkey(k), {})[k] = v
1284 self.pkgprofileuse.append(cpdict)
1287 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1288 for x in self.profiles]
1289 self.useforce = set(stack_lists(
1290 self.useforce_list, incremental=True))
1292 self.puseforce_list = []
1293 rawpuseforce = [grabdict_package(
1294 os.path.join(x, "package.use.force")) \
1295 for x in self.profiles]
1296 for i in xrange(len(self.profiles)):
1298 for k, v in rawpuseforce[i].iteritems():
1299 cpdict.setdefault(dep_getkey(k), {})[k] = v
1300 self.puseforce_list.append(cpdict)
1303 make_conf = getconfig(
1304 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1305 tolerant=tolerant, allow_sourcing=True)
1306 if make_conf is None:
1309 # Allow ROOT setting to come from make.conf if it's not overridden
1310 # by the constructor argument (from the calling environment).
1311 if target_root is None and "ROOT" in make_conf:
1312 target_root = make_conf["ROOT"]
1313 if not target_root.strip():
1315 if target_root is None:
1318 target_root = normalize_path(os.path.abspath(
1319 target_root)).rstrip(os.path.sep) + os.path.sep
1321 portage.util.ensure_dirs(target_root)
1322 check_var_directory("ROOT", target_root)
1324 # The expand_map is used for variable substitution
1325 # in getconfig() calls, and the getconfig() calls
1326 # update expand_map with the value of each variable
1327 # assignment that occurs. Variable substitution occurs
1328 # in the following order, which corresponds to the
1329 # order of appearance in self.lookuplist:
1336 # Notably absent is "env", since we want to avoid any
1337 # interaction with the calling environment that might
1338 # lead to unexpected results.
1341 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1343 # env_d will be None if profile.env doesn't exist.
1345 self.configdict["env.d"].update(env_d)
1346 expand_map.update(env_d)
1348 # backupenv is used for calculating incremental variables.
1349 self.backupenv = os.environ.copy()
1352 # Remove duplicate values so they don't override updated
1353 # profile.env values later (profile.env is reloaded in each
1354 # call to self.regenerate).
1355 for k, v in env_d.iteritems():
1357 if self.backupenv[k] == v:
1358 del self.backupenv[k]
1363 self.configdict["env"] = self.backupenv.copy()
1365 # make.globals should not be relative to config_root
1366 # because it only contains constants.
1367 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1368 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1373 if self.mygcfg is None:
1376 self.configlist.append(self.mygcfg)
1377 self.configdict["globals"]=self.configlist[-1]
1379 self.make_defaults_use = []
1382 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1383 expand=expand_map) for x in self.profiles]
1385 for cfg in mygcfg_dlists:
1387 self.make_defaults_use.append(cfg.get("USE", ""))
1389 self.make_defaults_use.append("")
1390 self.mygcfg = stack_dicts(mygcfg_dlists,
1391 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1392 if self.mygcfg is None:
1394 self.configlist.append(self.mygcfg)
1395 self.configdict["defaults"]=self.configlist[-1]
1397 self.mygcfg = getconfig(
1398 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1399 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1400 if self.mygcfg is None:
1403 # Don't allow the user to override certain variables in make.conf
1404 profile_only_variables = self.configdict["defaults"].get(
1405 "PROFILE_ONLY_VARIABLES", "").split()
1406 for k in profile_only_variables:
1407 self.mygcfg.pop(k, None)
1409 self.configlist.append(self.mygcfg)
1410 self.configdict["conf"]=self.configlist[-1]
1412 self.configlist.append({})
1413 self.configdict["pkg"]=self.configlist[-1]
1416 self.configlist.append({})
1417 self.configdict["auto"]=self.configlist[-1]
1419 self.configlist.append(self.backupenv) # XXX Why though?
1420 self.configdict["backupenv"]=self.configlist[-1]
1422 # Don't allow the user to override certain variables in the env
1423 for k in profile_only_variables:
1424 self.backupenv.pop(k, None)
1426 self.configlist.append(self.configdict["env"])
1428 # make lookuplist for loading package.*
1429 self.lookuplist=self.configlist[:]
1430 self.lookuplist.reverse()
1432 # Blacklist vars that could interfere with portage internals.
1433 for blacklisted in "CATEGORY", "EBUILD_PHASE", \
1434 "EMERGE_FROM", "PKGUSE", "PORTAGE_CONFIGROOT", \
1435 "PORTAGE_IUSE", "PORTAGE_USE", "ROOT":
1436 for cfg in self.lookuplist:
1437 cfg.pop(blacklisted, None)
1438 del blacklisted, cfg
1440 self["PORTAGE_CONFIGROOT"] = config_root
1441 self.backup_changes("PORTAGE_CONFIGROOT")
1442 self["ROOT"] = target_root
1443 self.backup_changes("ROOT")
1446 self.pkeywordsdict = {}
1447 self._plicensedict = {}
1448 self.punmaskdict = {}
1449 abs_user_config = os.path.join(config_root,
1450 USER_CONFIG_PATH.lstrip(os.path.sep))
1452 # locations for "categories" and "arch.list" files
1453 locations = [os.path.join(self["PORTDIR"], "profiles")]
1454 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1455 pmask_locations.extend(self.profiles)
1457 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1458 special cases are needed here."""
1459 overlay_profiles = []
1460 for ov in self["PORTDIR_OVERLAY"].split():
1461 ov = normalize_path(ov)
1462 profiles_dir = os.path.join(ov, "profiles")
1463 if os.path.isdir(profiles_dir):
1464 overlay_profiles.append(profiles_dir)
1465 locations += overlay_profiles
1467 pmask_locations.extend(overlay_profiles)
1470 locations.append(abs_user_config)
1471 pmask_locations.append(abs_user_config)
1472 pusedict = grabdict_package(
1473 os.path.join(abs_user_config, "package.use"), recursive=1)
1474 for key in pusedict.keys():
1475 cp = dep_getkey(key)
1476 if cp not in self.pusedict:
1477 self.pusedict[cp] = {}
1478 self.pusedict[cp][key] = pusedict[key]
1481 pkgdict = grabdict_package(
1482 os.path.join(abs_user_config, "package.keywords"),
1484 for key in pkgdict.keys():
1485 # default to ~arch if no specific keyword is given
1486 if not pkgdict[key]:
1488 if self.configdict["defaults"] and \
1489 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1490 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1493 for keyword in groups:
1494 if not keyword[0] in "~-":
1495 mykeywordlist.append("~"+keyword)
1496 pkgdict[key] = mykeywordlist
1497 cp = dep_getkey(key)
1498 if cp not in self.pkeywordsdict:
1499 self.pkeywordsdict[cp] = {}
1500 self.pkeywordsdict[cp][key] = pkgdict[key]
1503 licdict = grabdict_package(os.path.join(
1504 abs_user_config, "package.license"), recursive=1)
1505 for k, v in licdict.iteritems():
1507 cp_dict = self._plicensedict.get(cp)
1510 self._plicensedict[cp] = cp_dict
1511 cp_dict[k] = self.expandLicenseTokens(v)
1514 pkgunmasklines = grabfile_package(
1515 os.path.join(abs_user_config, "package.unmask"),
1517 for x in pkgunmasklines:
1518 mycatpkg=dep_getkey(x)
1519 if mycatpkg in self.punmaskdict:
1520 self.punmaskdict[mycatpkg].append(x)
1522 self.punmaskdict[mycatpkg]=[x]
1524 #getting categories from an external file now
1525 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1526 self.categories = stack_lists(categories, incremental=1)
1529 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1530 archlist = stack_lists(archlist, incremental=1)
1531 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1535 for x in pmask_locations:
1536 pkgmasklines.append(grabfile_package(
1537 os.path.join(x, "package.mask"), recursive=1))
1538 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1541 for x in pkgmasklines:
1542 mycatpkg=dep_getkey(x)
1543 if mycatpkg in self.pmaskdict:
1544 self.pmaskdict[mycatpkg].append(x)
1546 self.pmaskdict[mycatpkg]=[x]
1548 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1549 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1550 has_invalid_data = False
1551 for x in range(len(pkgprovidedlines)-1, -1, -1):
1552 myline = pkgprovidedlines[x]
1553 if not isvalidatom("=" + myline):
1554 writemsg("Invalid package name in package.provided:" + \
1555 " %s\n" % myline, noiselevel=-1)
1556 has_invalid_data = True
1557 del pkgprovidedlines[x]
1559 cpvr = catpkgsplit(pkgprovidedlines[x])
1560 if not cpvr or cpvr[0] == "null":
1561 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1563 has_invalid_data = True
1564 del pkgprovidedlines[x]
1566 if cpvr[0] == "virtual":
1567 writemsg("Virtual package in package.provided: %s\n" % \
1568 myline, noiselevel=-1)
1569 has_invalid_data = True
1570 del pkgprovidedlines[x]
1572 if has_invalid_data:
1573 writemsg("See portage(5) for correct package.provided usage.\n",
1575 self.pprovideddict = {}
1576 for x in pkgprovidedlines:
1580 mycatpkg=dep_getkey(x)
1581 if mycatpkg in self.pprovideddict:
1582 self.pprovideddict[mycatpkg].append(x)
1584 self.pprovideddict[mycatpkg]=[x]
1586 # parse licensegroups
1587 self._license_groups = {}
1589 self._license_groups.update(
1590 grabdict(os.path.join(x, "license_groups")))
1592 # reasonable defaults; this is important as without USE_ORDER,
1593 # USE will always be "" (nothing set)!
1594 if "USE_ORDER" not in self:
1595 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1597 self["PORTAGE_GID"] = str(portage_gid)
1598 self.backup_changes("PORTAGE_GID")
1600 if self.get("PORTAGE_DEPCACHEDIR", None):
1601 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1602 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1603 self.backup_changes("PORTAGE_DEPCACHEDIR")
1605 overlays = self.get("PORTDIR_OVERLAY","").split()
1609 ov = normalize_path(ov)
1610 if os.path.isdir(ov):
1613 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1614 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1615 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1616 self.backup_changes("PORTDIR_OVERLAY")
1618 if "CBUILD" not in self and "CHOST" in self:
1619 self["CBUILD"] = self["CHOST"]
1620 self.backup_changes("CBUILD")
1622 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1623 self.backup_changes("PORTAGE_BIN_PATH")
1624 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1625 self.backup_changes("PORTAGE_PYM_PATH")
1627 # Expand license groups
1628 # This has to do be done for each config layer before regenerate()
1629 # in order for incremental negation to work properly.
1631 for c in self.configdict.itervalues():
1632 v = c.get("ACCEPT_LICENSE")
1635 v = " ".join(self.expandLicenseTokens(v.split()))
1636 c["ACCEPT_LICENSE"] = v
1639 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1641 self[var] = str(int(self.get(var, "0")))
1643 writemsg(("!!! %s='%s' is not a valid integer. " + \
1644 "Falling back to '0'.\n") % (var, self[var]),
1647 self.backup_changes(var)
1649 # initialize self.features
1652 # ACCEPT_LICENSE support depends on definition of license groups
1653 # in the tree, so it's disabled for now (accept anything).
1654 self._accept_license = set(["*"])
1656 if not portage.process.sandbox_capable and \
1657 ("sandbox" in self.features or "usersandbox" in self.features):
1658 if self.profile_path is not None and \
1659 os.path.realpath(self.profile_path) == \
1660 os.path.realpath(PROFILE_PATH):
1661 """ Don't show this warning when running repoman and the
1662 sandbox feature came from a profile that doesn't belong to
1664 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1665 " binary. Disabling...\n\n"), noiselevel=-1)
1666 if "sandbox" in self.features:
1667 self.features.remove("sandbox")
1668 if "usersandbox" in self.features:
1669 self.features.remove("usersandbox")
1671 self.features.sort()
1672 self["FEATURES"] = " ".join(self.features)
1673 self.backup_changes("FEATURES")
1680 def _init_dirs(self):
1682 Create a few directories that are critical to portage operation
1684 if not os.access(self["ROOT"], os.W_OK):
1687 # gid, mode, mask, preserve_perms
1689 "tmp" : ( -1, 01777, 0, True),
1690 "var/tmp" : ( -1, 01777, 0, True),
1691 PRIVATE_PATH : ( portage_gid, 02750, 02, False),
1692 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False)
1695 for mypath, (gid, mode, modemask, preserve_perms) \
1696 in dir_mode_map.iteritems():
1697 mydir = os.path.join(self["ROOT"], mypath)
1698 if preserve_perms and os.path.isdir(mydir):
1699 # Only adjust permissions on some directories if
1700 # they don't exist yet. This gives freedom to the
1701 # user to adjust permissions to suit their taste.
1704 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1705 except portage.exception.PortageException, e:
1706 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1708 writemsg("!!! %s\n" % str(e),
1711 def expandLicenseTokens(self, tokens):
1712 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1713 if it's a group token (indicated by @) or just return it if it's not a
1714 group. If a group is negated then negate all group elements."""
1715 expanded_tokens = []
1717 expanded_tokens.extend(self._expandLicenseToken(x, None))
1718 return expanded_tokens
1720 def _expandLicenseToken(self, token, traversed_groups):
1723 if token.startswith("-"):
1725 license_name = token[1:]
1727 license_name = token
1728 if not license_name.startswith("@"):
1729 rValue.append(token)
1731 group_name = license_name[1:]
1732 if not traversed_groups:
1733 traversed_groups = set()
1734 license_group = self._license_groups.get(group_name)
1735 if group_name in traversed_groups:
1736 writemsg(("Circular license group reference" + \
1737 " detected in '%s'\n") % group_name, noiselevel=-1)
1738 rValue.append("@"+group_name)
1740 traversed_groups.add(group_name)
1741 for l in license_group:
1742 if l.startswith("-"):
1743 writemsg(("Skipping invalid element %s" + \
1744 " in license group '%s'\n") % (l, group_name),
1747 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1749 writemsg("Undefined license group '%s'\n" % group_name,
1751 rValue.append("@"+group_name)
1753 rValue = ["-" + token for token in rValue]
1757 """Validate miscellaneous settings and display warnings if necessary.
1758 (This code was previously in the global scope of portage.py)"""
1760 groups = self["ACCEPT_KEYWORDS"].split()
1761 archlist = self.archlist()
1763 writemsg("--- 'profiles/arch.list' is empty or " + \
1764 "not available. Empty portage tree?\n", noiselevel=1)
1766 for group in groups:
1767 if group not in archlist and \
1768 not (group.startswith("-") and group[1:] in archlist) and \
1769 group not in ("*", "~*", "**"):
1770 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1773 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1774 PROFILE_PATH.lstrip(os.path.sep))
1775 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1776 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1777 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1778 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1780 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1781 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1783 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1784 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1785 if os.path.exists(abs_user_virtuals):
1786 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1787 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1788 writemsg("!!! this new location.\n\n")
1790 def loadVirtuals(self,root):
1791 """Not currently used by portage."""
1792 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1793 self.getvirtuals(root)
1795 def load_best_module(self,property_string):
1796 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1799 mod = load_mod(best_mod)
1801 if best_mod.startswith("cache."):
1802 best_mod = "portage." + best_mod
1804 mod = load_mod(best_mod)
1817 def modifying(self):
1819 raise Exception("Configuration is locked.")
1821 def backup_changes(self,key=None):
1823 if key and key in self.configdict["env"]:
1824 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1826 raise KeyError("No such key defined in environment: %s" % key)
1828 def reset(self,keeping_pkg=0,use_cache=1):
1830 Restore environment from self.backupenv, call self.regenerate()
1831 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1832 @type keeping_pkg: Boolean
1833 @param use_cache: Should self.regenerate use the cache or not
1834 @type use_cache: Boolean
1838 self.configdict["env"].clear()
1839 self.configdict["env"].update(self.backupenv)
1841 self.modifiedkeys = []
1845 self.configdict["pkg"].clear()
1846 self.configdict["pkginternal"].clear()
1847 self.configdict["defaults"]["USE"] = \
1848 " ".join(self.make_defaults_use)
1849 self.usemask = set(stack_lists(
1850 self.usemask_list, incremental=True))
1851 self.useforce = set(stack_lists(
1852 self.useforce_list, incremental=True))
1853 self.regenerate(use_cache=use_cache)
1855 def load_infodir(self,infodir):
1857 backup_pkg_metadata = dict(self.configdict["pkg"].iteritems())
1858 if "pkg" in self.configdict and \
1859 "CATEGORY" in self.configdict["pkg"]:
1860 self.configdict["pkg"].clear()
1861 self.configdict["pkg"]["CATEGORY"] = \
1862 backup_pkg_metadata["CATEGORY"]
1864 raise portage.exception.PortageException(
1865 "No pkg setup for settings instance?")
1868 found_category_file = False
1869 if os.path.isdir(infodir):
1870 if os.path.exists(infodir+"/environment"):
1871 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1873 myre = re.compile('^[A-Z]+$')
1875 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1876 if filename == "FEATURES":
1877 # FEATURES from the build host shouldn't be interpreted as
1878 # FEATURES on the client system.
1880 if filename == "CATEGORY":
1881 found_category_file = True
1883 if myre.match(filename):
1885 file_path = os.path.join(infodir, filename)
1886 mydata = open(file_path).read().strip()
1887 if len(mydata) < 2048 or filename == "USE":
1888 if null_byte in mydata:
1889 writemsg("!!! Null byte found in metadata " + \
1890 "file: '%s'\n" % file_path, noiselevel=-1)
1892 if filename == "USE":
1893 binpkg_flags = "-* " + mydata
1894 self.configdict["pkg"][filename] = binpkg_flags
1895 self.configdict["env"][filename] = mydata
1897 self.configdict["pkg"][filename] = mydata
1898 self.configdict["env"][filename] = mydata
1899 except (OSError, IOError):
1900 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1905 # Missing or corrupt CATEGORY will cause problems for
1906 # doebuild(), which uses it to infer the cpv. We already
1907 # know the category, so there's no need to trust this
1908 # file. Show a warning if the file is missing though,
1909 # because it's required (especially for binary packages).
1910 if not found_category_file:
1911 writemsg("!!! CATEGORY file is missing: %s\n" % \
1912 os.path.join(infodir, "CATEGORY"), noiselevel=-1)
1913 self.configdict["pkg"].update(backup_pkg_metadata)
1916 # Always set known good values for these variables, since
1917 # corruption of these can cause problems:
1918 cat, pf = catsplit(self.mycpv)
1919 self.configdict["pkg"]["CATEGORY"] = cat
1920 self.configdict["pkg"]["PF"] = pf
1924 def setcpv(self, mycpv, use_cache=1, mydb=None):
1926 Load a particular CPV into the config, this lets us see the
1927 Default USE flags for a particular ebuild as well as the USE
1928 flags from package.use.
1930 @param mycpv: A cpv to load
1932 @param use_cache: Enables caching
1933 @type use_cache: Boolean
1934 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1935 @type mydb: dbapi or derivative.
1942 if not isinstance(mycpv, basestring):
1947 if self.mycpv == mycpv:
1951 cat, pf = catsplit(mycpv)
1952 cp = dep_getkey(mycpv)
1953 cpv_slot = self.mycpv
1956 env_configdict = self.configdict["env"]
1957 pkg_configdict = self.configdict["pkg"]
1958 previous_iuse = pkg_configdict.get("IUSE")
1959 for k in ("A", "AA", "CATEGORY", "PKGUSE", "PF", "PORTAGE_USE"):
1960 env_configdict.pop(k, None)
1961 pkg_configdict["CATEGORY"] = cat
1962 pkg_configdict["PF"] = pf
1964 if not hasattr(mydb, "aux_get"):
1965 pkg_configdict.update(mydb)
1967 aux_keys = [k for k in auxdbkeys \
1968 if not k.startswith("UNUSED_")]
1969 for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
1970 pkg_configdict[k] = v
1971 for k in pkg_configdict:
1973 env_configdict.pop(k, None)
1974 slot = pkg_configdict["SLOT"]
1975 iuse = pkg_configdict["IUSE"]
1977 cpv_slot = "%s:%s" % (self.mycpv, slot)
1981 for x in iuse.split():
1982 if x.startswith("+"):
1983 pkginternaluse.append(x[1:])
1984 elif x.startswith("-"):
1985 pkginternaluse.append(x)
1986 pkginternaluse = " ".join(pkginternaluse)
1987 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1988 self.configdict["pkginternal"]["USE"] = pkginternaluse
1992 for i in xrange(len(self.profiles)):
1993 cpdict = self.pkgprofileuse[i].get(cp, None)
1995 keys = cpdict.keys()
1997 bestmatch = best_match_to_list(cpv_slot, keys)
1999 keys.remove(bestmatch)
2000 defaults.insert(pos, cpdict[bestmatch])
2004 if self.make_defaults_use[i]:
2005 defaults.insert(pos, self.make_defaults_use[i])
2007 defaults = " ".join(defaults)
2008 if defaults != self.configdict["defaults"].get("USE",""):
2009 self.configdict["defaults"]["USE"] = defaults
2012 useforce = self._getUseForce(cpv_slot)
2013 if useforce != self.useforce:
2014 self.useforce = useforce
2017 usemask = self._getUseMask(cpv_slot)
2018 if usemask != self.usemask:
2019 self.usemask = usemask
2023 cpdict = self.pusedict.get(cp)
2025 keys = cpdict.keys()
2027 self.pusekey = best_match_to_list(cpv_slot, keys)
2029 keys.remove(self.pusekey)
2030 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2034 if oldpuse != self.puse:
2036 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2037 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2040 self.reset(keeping_pkg=1,use_cache=use_cache)
2042 # If reset() has not been called, it's safe to return
2043 # early if IUSE has not changed.
2044 if not has_changed and previous_iuse == iuse:
2047 # Filter out USE flags that aren't part of IUSE. This has to
2048 # be done for every setcpv() call since practically every
2049 # package has different IUSE.
2050 use = set(self["USE"].split())
2051 iuse_implicit = self._get_implicit_iuse()
2052 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2054 # Escape anything except ".*" which is supposed
2055 # to pass through from _get_implicit_iuse()
2056 regex = sorted(re.escape(x) for x in iuse_implicit)
2057 regex = "^(%s)$" % "|".join(regex)
2058 regex = regex.replace("\\.\\*", ".*")
2059 self.configdict["pkg"]["PORTAGE_IUSE"] = regex
2061 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2062 if ebuild_force_test and \
2063 not hasattr(self, "_ebuild_force_test_msg_shown"):
2064 self._ebuild_force_test_msg_shown = True
2065 writemsg("Forcing test.\n", noiselevel=-1)
2066 if "test" in self.features and "test" in iuse_implicit:
2067 if "test" in self.usemask and not ebuild_force_test:
2068 # "test" is in IUSE and USE=test is masked, so execution
2069 # of src_test() probably is not reliable. Therefore,
2070 # temporarily disable FEATURES=test just for this package.
2071 self["FEATURES"] = " ".join(x for x in self.features \
2076 if ebuild_force_test:
2077 self.usemask.discard("test")
2079 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2080 # that they are consistent. For optimal performance, use slice
2081 # comparison instead of startswith().
2082 use_expand = self.get("USE_EXPAND", "").split()
2083 for var in use_expand:
2084 prefix = var.lower() + "_"
2085 prefix_len = len(prefix)
2086 expand_flags = set([ x[prefix_len:] for x in use \
2087 if x[:prefix_len] == prefix ])
2088 var_split = self.get(var, "").split()
2089 # Preserve the order of var_split because it can matter for things
2091 var_split = [ x for x in var_split if x in expand_flags ]
2092 var_split.extend(expand_flags.difference(var_split))
2093 has_wildcard = "*" in var_split
2095 var_split = [ x for x in var_split if x != "*" ]
2097 for x in iuse_implicit:
2098 if x[:prefix_len] == prefix:
2099 has_iuse.add(x[prefix_len:])
2101 # * means to enable everything in IUSE that's not masked
2103 for x in iuse_implicit:
2104 if x[:prefix_len] == prefix and x not in self.usemask:
2105 suffix = x[prefix_len:]
2106 var_split.append(suffix)
2109 # If there is a wildcard and no matching flags in IUSE then
2110 # LINGUAS should be unset so that all .mo files are
2113 # Make the flags unique and filter them according to IUSE.
2114 # Also, continue to preserve order for things like LINGUAS
2115 # and filter any duplicates that variable may contain.
2116 filtered_var_split = []
2117 remaining = has_iuse.intersection(var_split)
2121 filtered_var_split.append(x)
2122 var_split = filtered_var_split
2125 self[var] = " ".join(var_split)
2127 # Don't export empty USE_EXPAND vars unless the user config
2128 # exports them as empty. This is required for vars such as
2129 # LINGUAS, where unset and empty have different meanings.
2131 # ebuild.sh will see this and unset the variable so
2132 # that things like LINGUAS work properly
2138 # It's not in IUSE, so just allow the variable content
2139 # to pass through if it is defined somewhere. This
2140 # allows packages that support LINGUAS but don't
2141 # declare it in IUSE to use the variable outside of the
2142 # USE_EXPAND context.
2145 # Filtered for the ebuild environment. Store this in a separate
2146 # attribute since we still want to be able to see global USE
2147 # settings for things like emerge --info.
2149 self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted(
2151 x in iuse_implicit))
2153 def _get_implicit_iuse(self):
2155 Some flags are considered to
2156 be implicit members of IUSE:
2157 * Flags derived from ARCH
2158 * Flags derived from USE_EXPAND_HIDDEN variables
2159 * Masked flags, such as those from {,package}use.mask
2160 * Forced flags, such as those from {,package}use.force
2161 * build and bootstrap flags used by bootstrap.sh
2163 iuse_implicit = set()
2164 # Flags derived from ARCH.
2165 arch = self.configdict["defaults"].get("ARCH")
2167 iuse_implicit.add(arch)
2168 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2170 # Flags derived from USE_EXPAND_HIDDEN variables
2171 # such as ELIBC, KERNEL, and USERLAND.
2172 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2173 for x in use_expand_hidden:
2174 iuse_implicit.add(x.lower() + "_.*")
2176 # Flags that have been masked or forced.
2177 iuse_implicit.update(self.usemask)
2178 iuse_implicit.update(self.useforce)
2180 # build and bootstrap flags used by bootstrap.sh
2181 iuse_implicit.add("build")
2182 iuse_implicit.add("bootstrap")
2183 return iuse_implicit
2185 def _getUseMask(self, pkg):
2186 cp = getattr(pkg, "cp", None)
2188 cp = dep_getkey(pkg)
2191 for i in xrange(len(self.profiles)):
2192 cpdict = self.pusemask_list[i].get(cp, None)
2194 keys = cpdict.keys()
2196 best_match = best_match_to_list(pkg, keys)
2198 keys.remove(best_match)
2199 usemask.insert(pos, cpdict[best_match])
2203 if self.usemask_list[i]:
2204 usemask.insert(pos, self.usemask_list[i])
2206 return set(stack_lists(usemask, incremental=True))
2208 def _getUseForce(self, pkg):
2209 cp = getattr(pkg, "cp", None)
2211 cp = dep_getkey(pkg)
2214 for i in xrange(len(self.profiles)):
2215 cpdict = self.puseforce_list[i].get(cp, None)
2217 keys = cpdict.keys()
2219 best_match = best_match_to_list(pkg, keys)
2221 keys.remove(best_match)
2222 useforce.insert(pos, cpdict[best_match])
2226 if self.useforce_list[i]:
2227 useforce.insert(pos, self.useforce_list[i])
2229 return set(stack_lists(useforce, incremental=True))
2231 def _getMaskAtom(self, cpv, metadata):
2233 Take a package and return a matching package.mask atom, or None if no
2234 such atom exists or it has been cancelled by package.unmask. PROVIDE
2235 is not checked, so atoms will not be found for old-style virtuals.
2237 @param cpv: The package name
2239 @param metadata: A dictionary of raw package metadata
2240 @type metadata: dict
2242 @return: An matching atom string or None if one is not found.
2245 cp = cpv_getkey(cpv)
2246 mask_atoms = self.pmaskdict.get(cp)
2248 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2249 unmask_atoms = self.punmaskdict.get(cp)
2250 for x in mask_atoms:
2251 if not match_from_list(x, pkg_list):
2254 for y in unmask_atoms:
2255 if match_from_list(y, pkg_list):
2260 def _getProfileMaskAtom(self, cpv, metadata):
2262 Take a package and return a matching profile atom, or None if no
2263 such atom exists. Note that a profile atom may or may not have a "*"
2264 prefix. PROVIDE is not checked, so atoms will not be found for
2267 @param cpv: The package name
2269 @param metadata: A dictionary of raw package metadata
2270 @type metadata: dict
2272 @return: An matching profile atom string or None if one is not found.
2275 cp = cpv_getkey(cpv)
2276 profile_atoms = self.prevmaskdict.get(cp)
2278 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2279 for x in profile_atoms:
2280 if match_from_list(x.lstrip("*"), pkg_list):
2285 def _getMissingKeywords(self, cpv, metadata):
2287 Take a package and return a list of any KEYWORDS that the user may
2288 may need to accept for the given package. If the KEYWORDS are empty
2289 and the the ** keyword has not been accepted, the returned list will
2290 contain ** alone (in order to distiguish from the case of "none
2293 @param cpv: The package name (for package.keywords support)
2295 @param metadata: A dictionary of raw package metadata
2296 @type metadata: dict
2298 @return: A list of KEYWORDS that have not been accepted.
2301 # Hack: Need to check the env directly here as otherwise stacking
2302 # doesn't work properly as negative values are lost in the config
2303 # object (bug #139600)
2304 egroups = self.configdict["backupenv"].get(
2305 "ACCEPT_KEYWORDS", "").split()
2306 mygroups = metadata["KEYWORDS"].split()
2307 # Repoman may modify this attribute as necessary.
2308 pgroups = self["ACCEPT_KEYWORDS"].split()
2310 cp = dep_getkey(cpv)
2311 pkgdict = self.pkeywordsdict.get(cp)
2314 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2315 for atom, pkgkeywords in pkgdict.iteritems():
2316 if match_from_list(atom, cpv_slot_list):
2318 pgroups.extend(pkgkeywords)
2319 if matches or egroups:
2320 pgroups.extend(egroups)
2323 if x.startswith("-"):
2327 inc_pgroups.discard(x[1:])
2330 pgroups = inc_pgroups
2335 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2336 writemsg(("--- WARNING: Package '%s' uses" + \
2337 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2344 elif gp.startswith("~"):
2346 elif not gp.startswith("-"):
2349 ((hastesting and "~*" in pgroups) or \
2350 (hasstable and "*" in pgroups) or "**" in pgroups):
2356 # If KEYWORDS is empty then we still have to return something
2357 # in order to distiguish from the case of "none missing".
2358 mygroups.append("**")
2362 def _getMissingLicenses(self, cpv, metadata):
2364 Take a LICENSE string and return a list any licenses that the user may
2365 may need to accept for the given package. The returned list will not
2366 contain any licenses that have already been accepted. This method
2367 can throw an InvalidDependString exception.
2369 @param cpv: The package name (for package.license support)
2371 @param metadata: A dictionary of raw package metadata
2372 @type metadata: dict
2374 @return: A list of licenses that have not been accepted.
2376 if "*" in self._accept_license:
2378 acceptable_licenses = self._accept_license
2379 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2381 acceptable_licenses = self._accept_license.copy()
2382 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2383 for atom in match_to_list(cpv_slot, cpdict.keys()):
2384 acceptable_licenses.update(cpdict[atom])
2386 license_str = metadata["LICENSE"]
2387 if "?" in license_str:
2388 use = metadata["USE"].split()
2392 license_struct = portage.dep.use_reduce(
2393 portage.dep.paren_reduce(license_str), uselist=use)
2394 license_struct = portage.dep.dep_opconvert(license_struct)
2395 return self._getMaskedLicenses(license_struct, acceptable_licenses)
2397 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2398 if not license_struct:
2400 if license_struct[0] == "||":
2402 for element in license_struct[1:]:
2403 if isinstance(element, list):
2405 ret.append(self._getMaskedLicenses(
2406 element, acceptable_licenses))
2410 if element in acceptable_licenses:
2413 # Return all masked licenses, since we don't know which combination
2414 # (if any) the user will decide to unmask.
2418 for element in license_struct:
2419 if isinstance(element, list):
2421 ret.extend(self._getMaskedLicenses(element,
2422 acceptable_licenses))
2424 if element not in acceptable_licenses:
2428 def _accept_chost(self, pkg):
2430 @return True if pkg CHOST is accepted, False otherwise.
2432 if self._accept_chost_re is None:
2433 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2434 if not accept_chost:
2435 chost = self.get("CHOST")
2437 accept_chost.append(chost)
2438 if not accept_chost:
2439 self._accept_chost_re = re.compile(".*")
2440 elif len(accept_chost) == 1:
2442 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2444 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2445 (accept_chost[0], e), noiselevel=-1)
2446 self._accept_chost_re = re.compile("^$")
2449 self._accept_chost_re = re.compile(
2450 r'^(%s)$' % "|".join(accept_chost))
2452 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2453 (" ".join(accept_chost), e), noiselevel=-1)
2454 self._accept_chost_re = re.compile("^$")
2456 return self._accept_chost_re.match(
2457 pkg.metadata.get("CHOST", "")) is not None
2459 def setinst(self,mycpv,mydbapi):
2460 """This updates the preferences for old-style virtuals,
2461 affecting the behavior of dep_expand() and dep_check()
2462 calls. It can change dbapi.match() behavior since that
2463 calls dep_expand(). However, dbapi instances have
2464 internal match caches that are not invalidated when
2465 preferences are updated here. This can potentially
2466 lead to some inconsistency (relevant to bug #1343)."""
2468 if len(self.virtuals) == 0:
2470 # Grab the virtuals this package provides and add them into the tree virtuals.
2471 if not hasattr(mydbapi, "aux_get"):
2472 provides = mydbapi["PROVIDE"]
2474 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2477 if isinstance(mydbapi, portdbapi):
2478 self.setcpv(mycpv, mydb=mydbapi)
2479 myuse = self["PORTAGE_USE"]
2480 elif not hasattr(mydbapi, "aux_get"):
2481 myuse = mydbapi["USE"]
2483 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2484 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2487 cp = dep_getkey(mycpv)
2489 virt = dep_getkey(virt)
2490 providers = self.virtuals.get(virt)
2491 if providers and cp in providers:
2493 providers = self._depgraphVirtuals.get(virt)
2494 if providers is None:
2496 self._depgraphVirtuals[virt] = providers
2497 if cp not in providers:
2498 providers.append(cp)
2502 self.virtuals = self.__getvirtuals_compile()
2505 """Reload things like /etc/profile.env that can change during runtime."""
2506 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2507 self.configdict["env.d"].clear()
2508 env_d = getconfig(env_d_filename, expand=False)
2510 # env_d will be None if profile.env doesn't exist.
2511 self.configdict["env.d"].update(env_d)
2513 def regenerate(self,useonly=0,use_cache=1):
2516 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2517 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2518 variables. This also updates the env.d configdict; useful in case an ebuild
2519 changes the environment.
2521 If FEATURES has already stacked, it is not stacked twice.
2523 @param useonly: Only regenerate USE flags (not any other incrementals)
2524 @type useonly: Boolean
2525 @param use_cache: Enable Caching (only for autouse)
2526 @type use_cache: Boolean
2531 if self.already_in_regenerate:
2532 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2533 writemsg("!!! Looping in regenerate.\n",1)
2536 self.already_in_regenerate = 1
2539 myincrementals=["USE"]
2541 myincrementals = self.incrementals
2542 myincrementals = set(myincrementals)
2543 # If self.features exists, it has already been stacked and may have
2544 # been mutated, so don't stack it again or else any mutations will be
2546 if "FEATURES" in myincrementals and hasattr(self, "features"):
2547 myincrementals.remove("FEATURES")
2549 if "USE" in myincrementals:
2550 # Process USE last because it depends on USE_EXPAND which is also
2552 myincrementals.remove("USE")
2554 for mykey in myincrementals:
2556 mydbs=self.configlist[:-1]
2560 if mykey not in curdb:
2562 #variables are already expanded
2563 mysplit = curdb[mykey].split()
2567 # "-*" is a special "minus" var that means "unset all settings".
2568 # so USE="-* gnome" will have *just* gnome enabled.
2573 # Not legal. People assume too much. Complain.
2574 writemsg(red("USE flags should not start with a '+': %s\n" % x),
2581 if (x[1:] in myflags):
2583 del myflags[myflags.index(x[1:])]
2586 # We got here, so add it now.
2587 if x not in myflags:
2591 #store setting in last element of configlist, the original environment:
2592 if myflags or mykey in self:
2593 self.configlist[-1][mykey] = " ".join(myflags)
2596 # Do the USE calculation last because it depends on USE_EXPAND.
2597 if "auto" in self["USE_ORDER"].split(":"):
2598 self.configdict["auto"]["USE"] = autouse(
2599 vartree(root=self["ROOT"], categories=self.categories,
2601 use_cache=use_cache, mysettings=self)
2603 self.configdict["auto"]["USE"] = ""
2605 use_expand = self.get("USE_EXPAND", "").split()
2608 for x in self["USE_ORDER"].split(":"):
2609 if x in self.configdict:
2610 self.uvlist.append(self.configdict[x])
2611 self.uvlist.reverse()
2613 # For optimal performance, use slice
2614 # comparison instead of startswith().
2616 for curdb in self.uvlist:
2617 cur_use_expand = [x for x in use_expand if x in curdb]
2618 mysplit = curdb.get("USE", "").split()
2619 if not mysplit and not cur_use_expand:
2627 writemsg(colorize("BAD", "USE flags should not start " + \
2628 "with a '+': %s\n" % x), noiselevel=-1)
2634 myflags.discard(x[1:])
2639 for var in cur_use_expand:
2640 var_lower = var.lower()
2641 is_not_incremental = var not in myincrementals
2642 if is_not_incremental:
2643 prefix = var_lower + "_"
2644 prefix_len = len(prefix)
2645 for x in list(myflags):
2646 if x[:prefix_len] == prefix:
2648 for x in curdb[var].split():
2650 if is_not_incremental:
2651 writemsg(colorize("BAD", "Invalid '+' " + \
2652 "operator in non-incremental variable " + \
2653 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2656 writemsg(colorize("BAD", "Invalid '+' " + \
2657 "operator in incremental variable " + \
2658 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2661 if is_not_incremental:
2662 writemsg(colorize("BAD", "Invalid '-' " + \
2663 "operator in non-incremental variable " + \
2664 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2666 myflags.discard(var_lower + "_" + x[1:])
2668 myflags.add(var_lower + "_" + x)
2670 if not hasattr(self, "features"):
2671 self.features = sorted(set(
2672 self.configlist[-1].get("FEATURES","").split()))
2673 self["FEATURES"] = " ".join(self.features)
2675 myflags.update(self.useforce)
2676 arch = self.configdict["defaults"].get("ARCH")
2680 myflags.difference_update(self.usemask)
2681 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2683 self.already_in_regenerate = 0
2685 def get_virts_p(self, myroot=None):
2688 virts = self.getvirtuals()
2691 vkeysplit = x.split("/")
2692 if vkeysplit[1] not in self.virts_p:
2693 self.virts_p[vkeysplit[1]] = virts[x]
2696 def getvirtuals(self, myroot=None):
2697 """myroot is now ignored because, due to caching, it has always been
2698 broken for all but the first call."""
2699 myroot = self["ROOT"]
2701 return self.virtuals
2704 for x in self.profiles:
2705 virtuals_file = os.path.join(x, "virtuals")
2706 virtuals_dict = grabdict(virtuals_file)
2707 for k in virtuals_dict.keys():
2708 if not isvalidatom(k) or dep_getkey(k) != k:
2709 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2710 (virtuals_file, k), noiselevel=-1)
2711 del virtuals_dict[k]
2713 myvalues = virtuals_dict[k]
2716 if x.startswith("-"):
2717 # allow incrementals
2719 if not isvalidatom(myatom):
2720 writemsg("--- Invalid atom in %s: %s\n" % \
2721 (virtuals_file, x), noiselevel=-1)
2724 del virtuals_dict[k]
2726 virtuals_list.append(virtuals_dict)
2728 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2731 for virt in self.dirVirtuals:
2732 # Preference for virtuals decreases from left to right.
2733 self.dirVirtuals[virt].reverse()
2735 # Repoman does not use user or tree virtuals.
2736 if self.local_config and not self.treeVirtuals:
2737 temp_vartree = vartree(myroot, None,
2738 categories=self.categories, settings=self)
2739 # Reduce the provides into a list by CP.
2740 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2742 self.virtuals = self.__getvirtuals_compile()
2743 return self.virtuals
2745 def __getvirtuals_compile(self):
2746 """Stack installed and profile virtuals. Preference for virtuals
2747 decreases from left to right.
2748 Order of preference:
2749 1. installed and in profile
2754 # Virtuals by profile+tree preferences.
2757 for virt, installed_list in self.treeVirtuals.iteritems():
2758 profile_list = self.dirVirtuals.get(virt, None)
2759 if not profile_list:
2761 for cp in installed_list:
2762 if cp in profile_list:
2763 ptVirtuals.setdefault(virt, [])
2764 ptVirtuals[virt].append(cp)
2766 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2767 self.dirVirtuals, self._depgraphVirtuals])
2770 def __delitem__(self,mykey):
2772 for x in self.lookuplist:
2777 def __getitem__(self,mykey):
2778 for d in self.lookuplist:
2781 return '' # for backward compat, don't raise KeyError
2783 def get(self, k, x=None):
2784 for d in self.lookuplist:
2789 def pop(self, key, *args):
2792 "pop expected at most 2 arguments, got " + \
2793 repr(1 + len(args)))
2795 for d in reversed(self.lookuplist):
2803 def has_key(self,mykey):
2804 warnings.warn("portage.config.has_key() is deprecated, "
2805 "use the in operator instead",
2807 return mykey in self
2809 def __contains__(self, mykey):
2810 """Called to implement membership test operators (in and not in)."""
2811 for d in self.lookuplist:
2816 def setdefault(self, k, x=None):
2829 for d in self.lookuplist:
2836 def iteritems(self):
2841 return list(self.iteritems())
2843 def __setitem__(self,mykey,myvalue):
2844 "set a value; will be thrown away at reset() time"
2845 if not isinstance(myvalue, str):
2846 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2848 self.modifiedkeys += [mykey]
2849 self.configdict["env"][mykey]=myvalue
2852 "return our locally-maintained environment"
2854 environ_filter = self._environ_filter
2856 filter_calling_env = False
2857 temp_dir = self.get("T")
2858 if temp_dir is not None and \
2859 os.path.exists(os.path.join(temp_dir, "environment")):
2860 filter_calling_env = True
2862 environ_whitelist = self._environ_whitelist
2863 env_d = self.configdict["env.d"]
2865 if x in environ_filter:
2868 if not isinstance(myvalue, basestring):
2869 writemsg("!!! Non-string value in config: %s=%s\n" % \
2870 (x, myvalue), noiselevel=-1)
2872 if filter_calling_env and \
2873 x not in environ_whitelist and \
2874 not self._environ_whitelist_re.match(x):
2875 # Do not allow anything to leak into the ebuild
2876 # environment unless it is explicitly whitelisted.
2877 # This ensures that variables unset by the ebuild
2881 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
2882 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2883 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2885 if filter_calling_env:
2886 phase = self.get("EBUILD_PHASE")
2890 whitelist.append("RPMDIR")
2896 # Filtered by IUSE and implicit IUSE.
2897 mydict["USE"] = self.get("PORTAGE_USE", "")
2899 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
2900 # so we have to back it up and restore it.
2901 rootpath = mydict.get("ROOTPATH")
2903 mydict["PORTAGE_ROOTPATH"] = rootpath
2907 def thirdpartymirrors(self):
2908 if getattr(self, "_thirdpartymirrors", None) is None:
2909 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2910 for x in self["PORTDIR_OVERLAY"].split():
2911 profileroots.insert(0, os.path.join(x, "profiles"))
2912 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2913 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2914 return self._thirdpartymirrors
2917 return flatten([[myarch, "~" + myarch] \
2918 for myarch in self["PORTAGE_ARCHLIST"].split()])
2920 def selinux_enabled(self):
2921 if getattr(self, "_selinux_enabled", None) is None:
2922 self._selinux_enabled = 0
2923 if "selinux" in self["USE"].split():
2924 if "selinux" in globals():
2925 if selinux.is_selinux_enabled() == 1:
2926 self._selinux_enabled = 1
2928 self._selinux_enabled = 0
2930 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2932 self._selinux_enabled = 0
2933 if self._selinux_enabled == 0:
2935 del sys.modules["selinux"]
2938 return self._selinux_enabled
2940 def _shell_quote(s):
2942 Quote a string in double-quotes and use backslashes to
2943 escape any backslashes, double-quotes, dollar signs, or
2944 backquotes in the string.
2946 for letter in "\\\"$`":
2948 s = s.replace(letter, "\\" + letter)
2951 # In some cases, openpty can be slow when it fails. Therefore,
2952 # stop trying to use it after the first failure.
2953 _disable_openpty = False
2955 def _create_pty_or_pipe(copy_term_size=None):
2957 Try to create a pty and if then fails then create a normal
2960 @param copy_term_size: If a tty file descriptor is given
2961 then the term size will be copied to the pty.
2962 @type copy_term_size: int
2964 @returns: A tuple of (is_pty, master_fd, slave_fd) where
2965 is_pty is True if a pty was successfully allocated, and
2966 False if a normal pipe was allocated.
2971 global _disable_openpty
2972 if _disable_openpty:
2973 master_fd, slave_fd = os.pipe()
2975 from pty import openpty
2977 master_fd, slave_fd = openpty()
2979 except EnvironmentError, e:
2980 _disable_openpty = True
2981 writemsg("openpty failed: '%s'\n" % str(e),
2984 master_fd, slave_fd = os.pipe()
2987 # Disable post-processing of output since otherwise weird
2988 # things like \n -> \r\n transformations may occur.
2990 mode = termios.tcgetattr(slave_fd)
2991 mode[1] &= ~termios.OPOST
2992 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
2995 copy_term_size is not None and \
2996 os.isatty(copy_term_size):
2997 from portage.output import get_term_size, set_term_size
2998 rows, columns = get_term_size()
2999 set_term_size(rows, columns, slave_fd)
3001 return (got_pty, master_fd, slave_fd)
3003 # XXX This would be to replace getstatusoutput completely.
3004 # XXX Issue: cannot block execution. Deadlock condition.
3005 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3007 Spawn a subprocess with extra portage-specific options.
3010 Sandbox: Sandbox means the spawned process will be limited in its ability t
3011 read and write files (normally this means it is restricted to ${IMAGE}/)
3012 SElinux Sandbox: Enables sandboxing on SElinux
3013 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3016 Notes: os.system cannot be used because it messes with signal handling. Instead we
3017 use the portage.process spawn* family of functions.
3019 This function waits for the process to terminate.
3021 @param mystring: Command to run
3022 @type mystring: String
3023 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3024 @type mysettings: Dictionary or config instance
3025 @param debug: Ignored
3026 @type debug: Boolean
3027 @param free: Enable sandboxing for this process
3029 @param droppriv: Drop to portage:portage when running this command
3030 @type droppriv: Boolean
3031 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3032 @type sesandbox: Boolean
3033 @param fakeroot: Run this command with faked root privileges
3034 @type fakeroot: Boolean
3035 @param keywords: Extra options encoded as a dict, to be passed to spawn
3036 @type keywords: Dictionary
3039 1. The return code of the spawned process.
3042 if isinstance(mysettings, dict):
3044 keywords["opt_name"]="[ %s ]" % "portage"
3046 check_config_instance(mysettings)
3047 env=mysettings.environ()
3048 keywords["opt_name"]="[%s]" % mysettings["PF"]
3050 fd_pipes = keywords.get("fd_pipes")
3051 if fd_pipes is None:
3053 0:sys.stdin.fileno(),
3054 1:sys.stdout.fileno(),
3055 2:sys.stderr.fileno(),
3057 # In some cases the above print statements don't flush stdout, so
3058 # it needs to be flushed before allowing a child process to use it
3059 # so that output always shows in the correct order.
3060 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3061 for fd in fd_pipes.itervalues():
3062 if fd in stdout_filenos:
3067 # The default policy for the sesandbox domain only allows entry (via exec)
3068 # from shells and from binaries that belong to portage (the number of entry
3069 # points is minimized). The "tee" binary is not among the allowed entry
3070 # points, so it is spawned outside of the sesandbox domain and reads from a
3071 # pseudo-terminal that connects two domains.
3072 logfile = keywords.get("logfile")
3076 fd_pipes_orig = None
3079 del keywords["logfile"]
3080 if 1 not in fd_pipes or 2 not in fd_pipes:
3081 raise ValueError(fd_pipes)
3083 fd_pipes.setdefault(0, sys.stdin.fileno())
3084 fd_pipes_orig = fd_pipes.copy()
3086 got_pty, master_fd, slave_fd = \
3087 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3089 # We must set non-blocking mode before we close the slave_fd
3090 # since otherwise the fcntl call can fail on FreeBSD (the child
3091 # process might have already exited and closed slave_fd so we
3092 # have to keep it open in order to avoid FreeBSD potentially
3093 # generating an EAGAIN exception).
3095 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3096 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3098 fd_pipes[0] = fd_pipes_orig[0]
3099 fd_pipes[1] = slave_fd
3100 fd_pipes[2] = slave_fd
3101 keywords["fd_pipes"] = fd_pipes
3103 features = mysettings.features
3104 # TODO: Enable fakeroot to be used together with droppriv. The
3105 # fake ownership/permissions will have to be converted to real
3106 # permissions in the merge phase.
3107 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3108 if droppriv and not uid and portage_gid and portage_uid:
3109 keywords.update({"uid":portage_uid,"gid":portage_gid,
3110 "groups":userpriv_groups,"umask":002})
3112 free=((droppriv and "usersandbox" not in features) or \
3113 (not droppriv and "sandbox" not in features and \
3114 "usersandbox" not in features and not fakeroot))
3116 if free or "SANDBOX_ACTIVE" in os.environ:
3117 keywords["opt_name"] += " bash"
3118 spawn_func = portage.process.spawn_bash
3120 keywords["opt_name"] += " fakeroot"
3121 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3122 spawn_func = portage.process.spawn_fakeroot
3124 keywords["opt_name"] += " sandbox"
3125 spawn_func = portage.process.spawn_sandbox
3128 con = selinux.getcontext()
3129 con = con.replace(mysettings["PORTAGE_T"],
3130 mysettings["PORTAGE_SANDBOX_T"])
3131 selinux.setexec(con)
3133 returnpid = keywords.get("returnpid")
3134 keywords["returnpid"] = True
3136 mypids.extend(spawn_func(mystring, env=env, **keywords))
3141 selinux.setexec(None)
3147 log_file = open(logfile, 'a')
3148 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
3149 master_file = os.fdopen(master_fd, 'r')
3150 iwtd = [master_file]
3153 import array, select
3157 events = select.select(iwtd, owtd, ewtd)
3159 # Use non-blocking mode to prevent read
3160 # calls from blocking indefinitely.
3161 buf = array.array('B')
3163 buf.fromfile(f, buffsize)
3169 if f is master_file:
3170 buf.tofile(stdout_file)
3172 buf.tofile(log_file)
3178 retval = os.waitpid(pid, 0)[1]
3179 portage.process.spawned_pids.remove(pid)
3180 if retval != os.EX_OK:
3182 return (retval & 0xff) << 8
3186 _userpriv_spawn_kwargs = (
3187 ("uid", portage_uid),
3188 ("gid", portage_gid),
3189 ("groups", userpriv_groups),
3193 def _spawn_fetch(settings, args, **kwargs):
3195 Spawn a process with appropriate settings for fetching, including
3196 userfetch and selinux support.
3199 global _userpriv_spawn_kwargs
3201 # Redirect all output to stdout since some fetchers like
3202 # wget pollute stderr (if portage detects a problem then it
3203 # can send it's own message to stderr).
3204 if "fd_pipes" not in kwargs:
3206 kwargs["fd_pipes"] = {
3207 0 : sys.stdin.fileno(),
3208 1 : sys.stdout.fileno(),
3209 2 : sys.stdout.fileno(),
3212 if "userfetch" in settings.features and \
3213 os.getuid() == 0 and portage_gid and portage_uid:
3214 kwargs.update(_userpriv_spawn_kwargs)
3218 if settings.selinux_enabled():
3219 con = selinux.getcontext()
3220 con = con.replace(settings["PORTAGE_T"], settings["PORTAGE_FETCH_T"])
3221 selinux.setexec(con)
3222 # bash is an allowed entrypoint, while most binaries are not
3223 if args[0] != BASH_BINARY:
3224 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
3226 rval = portage.process.spawn(args,
3227 env=dict(settings.iteritems()), **kwargs)
3230 if settings.selinux_enabled():
3231 selinux.setexec(None)
3235 _userpriv_test_write_file_cache = {}
3236 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
3237 "rm -f %(file_path)s ; exit $rval"
3239 def _userpriv_test_write_file(settings, file_path):
3241 Drop privileges and try to open a file for writing. The file may or
3242 may not exist, and the parent directory is assumed to exist. The file
3243 is removed before returning.
3245 @param settings: A config instance which is passed to _spawn_fetch()
3246 @param file_path: A file path to open and write.
3247 @return: True if write succeeds, False otherwise.
3250 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
3251 rval = _userpriv_test_write_file_cache.get(file_path)
3252 if rval is not None:
3255 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
3256 {"file_path" : _shell_quote(file_path)}]
3258 returncode = _spawn_fetch(settings, args)
3260 rval = returncode == os.EX_OK
3261 _userpriv_test_write_file_cache[file_path] = rval
3264 def _checksum_failure_temp_file(distdir, basename):
3266 First try to find a duplicate temp file with the same checksum and return
3267 that filename if available. Otherwise, use mkstemp to create a new unique
3268 filename._checksum_failure_.$RANDOM, rename the given file, and return the
3269 new filename. In any case, filename will be renamed or removed before this
3270 function returns a temp filename.
3273 filename = os.path.join(distdir, basename)
3274 size = os.stat(filename).st_size
3276 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
3277 for temp_filename in os.listdir(distdir):
3278 if not tempfile_re.match(temp_filename):
3280 temp_filename = os.path.join(distdir, temp_filename)
3282 if size != os.stat(temp_filename).st_size:
3287 temp_checksum = portage.checksum.perform_md5(temp_filename)
3288 except portage.exception.FileNotFound:
3289 # Apparently the temp file disappeared. Let it go.
3291 if checksum is None:
3292 checksum = portage.checksum.perform_md5(filename)
3293 if checksum == temp_checksum:
3295 return temp_filename
3297 from tempfile import mkstemp
3298 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3300 os.rename(filename, temp_filename)
3301 return temp_filename
3303 def _check_digests(filename, digests, show_errors=1):
3305 Check digests and display a message if an error occurs.
3306 @return True if all digests match, False otherwise.
3308 verified_ok, reason = portage.checksum.verify_all(filename, digests)
3311 writemsg("!!! Previously fetched" + \
3312 " file: '%s'\n" % filename, noiselevel=-1)
3313 writemsg("!!! Reason: %s\n" % reason[0],
3315 writemsg(("!!! Got: %s\n" + \
3316 "!!! Expected: %s\n") % \
3317 (reason[1], reason[2]), noiselevel=-1)
3321 def _check_distfile(filename, digests, eout, show_errors=1):
3323 @return a tuple of (match, stat_obj) where match is True if filename
3324 matches all given digests (if any) and stat_obj is a stat result, or
3325 None if the file does not exist.
3329 size = digests.get("size")
3330 if size is not None and len(digests) == 1:
3334 st = os.stat(filename)
3336 return (False, None)
3337 if size is not None and size != st.st_size:
3340 if size is not None:
3341 eout.ebegin("%s %s ;-)" % (os.path.basename(filename), "size"))
3343 elif st.st_size == 0:
3344 # Zero-byte distfiles are always invalid.
3347 if _check_digests(filename, digests, show_errors=show_errors):
3348 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
3349 " ".join(sorted(digests))))
3355 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3357 _size_suffix_map = {
3369 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3370 "fetch files. Will use digest file if available."
3375 features = mysettings.features
3376 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3378 from portage.data import secpass
3379 userfetch = secpass >= 2 and "userfetch" in features
3380 userpriv = secpass >= 2 and "userpriv" in features
3382 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3383 if "mirror" in restrict or \
3384 "nomirror" in restrict:
3385 if ("mirror" in features) and ("lmirror" not in features):
3386 # lmirror should allow you to bypass mirror restrictions.
3387 # XXX: This is not a good thing, and is temporary at best.
3388 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3391 # Generally, downloading the same file repeatedly from
3392 # every single available mirror is a waste of bandwidth
3393 # and time, so there needs to be a cap.
3394 checksum_failure_max_tries = 5
3395 v = checksum_failure_max_tries
3397 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3398 checksum_failure_max_tries))
3399 except (ValueError, OverflowError):
3400 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3401 " contains non-integer value: '%s'\n" % \
3402 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3403 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3404 "default value: %s\n" % checksum_failure_max_tries,
3406 v = checksum_failure_max_tries
3408 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3409 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3410 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3411 "default value: %s\n" % checksum_failure_max_tries,
3413 v = checksum_failure_max_tries
3414 checksum_failure_max_tries = v
3417 fetch_resume_size_default = "350K"
3418 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3419 if fetch_resume_size is not None:
3420 fetch_resume_size = "".join(fetch_resume_size.split())
3421 if not fetch_resume_size:
3422 # If it's undefined or empty, silently use the default.
3423 fetch_resume_size = fetch_resume_size_default
3424 match = _fetch_resume_size_re.match(fetch_resume_size)
3425 if match is None or \
3426 (match.group(2).upper() not in _size_suffix_map):
3427 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3428 " contains an unrecognized format: '%s'\n" % \
3429 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3430 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3431 "default value: %s\n" % fetch_resume_size_default,
3433 fetch_resume_size = None
3434 if fetch_resume_size is None:
3435 fetch_resume_size = fetch_resume_size_default
3436 match = _fetch_resume_size_re.match(fetch_resume_size)
3437 fetch_resume_size = int(match.group(1)) * \
3438 2 ** _size_suffix_map[match.group(2).upper()]
3440 # Behave like the package has RESTRICT="primaryuri" after a
3441 # couple of checksum failures, to increase the probablility
3442 # of success before checksum_failure_max_tries is reached.
3443 checksum_failure_primaryuri = 2
3444 thirdpartymirrors = mysettings.thirdpartymirrors()
3446 # In the background parallel-fetch process, it's safe to skip checksum
3447 # verification of pre-existing files in $DISTDIR that have the correct
3448 # file size. The parent process will verify their checksums prior to
3451 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3452 if parallel_fetchonly:
3455 check_config_instance(mysettings)
3457 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3458 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3462 if listonly or ("distlocks" not in features):
3466 if "skiprocheck" in features:
3469 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3471 writemsg(red("!!! For fetching to a read-only filesystem, " + \
3472 "locking should be turned off.\n"), noiselevel=-1)
3473 writemsg("!!! This can be done by adding -distlocks to " + \
3474 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3477 # local mirrors are always added
3478 if "local" in custommirrors:
3479 mymirrors += custommirrors["local"]
3481 if "nomirror" in restrict or \
3482 "mirror" in restrict:
3483 # We don't add any mirrors.
3487 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3489 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
3490 pkgdir = mysettings.get("O")
3491 if not (pkgdir is None or skip_manifest):
3492 mydigests = Manifest(
3493 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3495 # no digests because fetch was not called for a specific package
3499 ro_distdirs = [x for x in \
3500 shlex.split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
3501 if os.path.isdir(x)]
3504 for x in range(len(mymirrors)-1,-1,-1):
3505 if mymirrors[x] and mymirrors[x][0]=='/':
3506 fsmirrors += [mymirrors[x]]
3509 restrict_fetch = "fetch" in restrict
3510 custom_local_mirrors = custommirrors.get("local", [])
3512 # With fetch restriction, a normal uri may only be fetched from
3513 # custom local mirrors (if available). A mirror:// uri may also
3514 # be fetched from specific mirrors (effectively overriding fetch
3515 # restriction, but only for specific mirrors).
3516 locations = custom_local_mirrors
3518 locations = mymirrors
3520 file_uri_tuples = []
3521 if isinstance(myuris, dict):
3522 for myfile, uri_set in myuris.iteritems():
3523 for myuri in uri_set:
3524 file_uri_tuples.append((myfile, myuri))
3526 for myuri in myuris:
3527 file_uri_tuples.append((os.path.basename(myuri), myuri))
3530 primaryuri_indexes={}
3531 primaryuri_dict = {}
3532 thirdpartymirror_uris = {}
3533 for myfile, myuri in file_uri_tuples:
3534 if myfile not in filedict:
3536 for y in range(0,len(locations)):
3537 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3538 if myuri[:9]=="mirror://":
3539 eidx = myuri.find("/", 9)
3541 mirrorname = myuri[9:eidx]
3542 path = myuri[eidx+1:]
3544 # Try user-defined mirrors first
3545 if mirrorname in custommirrors:
3546 for cmirr in custommirrors[mirrorname]:
3547 filedict[myfile].append(
3548 cmirr.rstrip("/") + "/" + path)
3550 # now try the official mirrors
3551 if mirrorname in thirdpartymirrors:
3552 shuffle(thirdpartymirrors[mirrorname])
3554 uris = [locmirr.rstrip("/") + "/" + path \
3555 for locmirr in thirdpartymirrors[mirrorname]]
3556 filedict[myfile].extend(uris)
3557 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
3559 if not filedict[myfile]:
3560 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3562 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3563 writemsg(" %s\n" % (myuri), noiselevel=-1)
3566 # Only fetch from specific mirrors is allowed.
3568 if "primaryuri" in restrict:
3569 # Use the source site first.
3570 if myfile in primaryuri_indexes:
3571 primaryuri_indexes[myfile] += 1
3573 primaryuri_indexes[myfile] = 0
3574 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3576 filedict[myfile].append(myuri)
3577 primaryuris = primaryuri_dict.get(myfile)
3578 if primaryuris is None:
3580 primaryuri_dict[myfile] = primaryuris
3581 primaryuris.append(myuri)
3583 # Prefer thirdpartymirrors over normal mirrors in cases when
3584 # the file does not yet exist on the normal mirrors.
3585 for myfile, uris in thirdpartymirror_uris.iteritems():
3586 primaryuri_dict.setdefault(myfile, []).extend(uris)
3593 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3594 if not mysettings.get(var_name, None):
3597 if can_fetch and not fetch_to_ro:
3598 global _userpriv_test_write_file_cache
3602 dir_gid = portage_gid
3603 if "FAKED_MODE" in mysettings:
3604 # When inside fakeroot, directories with portage's gid appear
3605 # to have root's gid. Therefore, use root's gid instead of
3606 # portage's gid to avoid spurrious permissions adjustments
3607 # when inside fakeroot.
3610 if "distlocks" in features:
3611 distdir_dirs.append(".locks")
3614 for x in distdir_dirs:
3615 mydir = os.path.join(mysettings["DISTDIR"], x)
3616 write_test_file = os.path.join(
3617 mydir, ".__portage_test_write__")
3624 if st is not None and stat.S_ISDIR(st.st_mode):
3625 if not (userfetch or userpriv):
3627 if _userpriv_test_write_file(mysettings, write_test_file):
3630 _userpriv_test_write_file_cache.pop(write_test_file, None)
3631 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
3633 # The directory has just been created
3634 # and therefore it must be empty.
3636 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3639 raise # bail out on the first error that occurs during recursion
3640 if not apply_recursive_permissions(mydir,
3641 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
3642 filemode=filemode, filemask=modemask, onerror=onerror):
3643 raise portage.exception.OperationNotPermitted(
3644 "Failed to apply recursive permissions for the portage group.")
3645 except portage.exception.PortageException, e:
3646 if not os.path.isdir(mysettings["DISTDIR"]):
3647 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3648 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3649 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3652 not fetch_to_ro and \
3653 not os.access(mysettings["DISTDIR"], os.W_OK):
3654 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3658 if can_fetch and use_locks and locks_in_subdir:
3659 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3660 if not os.access(distlocks_subdir, os.W_OK):
3661 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3664 del distlocks_subdir
3666 distdir_writable = can_fetch and not fetch_to_ro
3667 failed_files = set()
3668 restrict_fetch_msg = False
3670 for myfile in filedict:
3674 1 partially downloaded
3675 2 completely downloaded
3679 orig_digests = mydigests.get(myfile, {})
3680 size = orig_digests.get("size")
3682 # Zero-byte distfiles are always invalid, so discard their digests.
3683 del mydigests[myfile]
3684 orig_digests.clear()
3686 pruned_digests = orig_digests
3687 if parallel_fetchonly:
3689 if size is not None:
3690 pruned_digests["size"] = size
3692 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3696 writemsg_stdout("\n", noiselevel=-1)
3698 # check if there is enough space in DISTDIR to completely store myfile
3699 # overestimate the filesize so we aren't bitten by FS overhead
3700 if size is not None and hasattr(os, "statvfs"):
3701 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3703 mysize = os.stat(myfile_path).st_size
3705 if e.errno != errno.ENOENT:
3709 if (size - mysize + vfs_stat.f_bsize) >= \
3710 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3711 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3714 if distdir_writable and use_locks:
3716 if not parallel_fetchonly and "parallel-fetch" in features:
3717 waiting_msg = ("Fetching '%s' " + \
3718 "in the background. " + \
3719 "To view fetch progress, run `tail -f " + \
3720 "/var/log/emerge-fetch.log` in another " + \
3721 "terminal.") % myfile
3722 msg_prefix = colorize("GOOD", " * ")
3723 from textwrap import wrap
3724 waiting_msg = "\n".join(msg_prefix + line \
3725 for line in wrap(waiting_msg, 65))
3728 lock_file = os.path.join(mysettings["DISTDIR"],
3729 locks_in_subdir, myfile)
3731 lock_file = myfile_path
3735 lock_kwargs["flags"] = os.O_NONBLOCK
3737 lock_kwargs["waiting_msg"] = waiting_msg
3740 file_lock = portage.locks.lockfile(myfile_path,
3741 wantnewlockfile=1, **lock_kwargs)
3742 except portage.exception.TryAgain:
3743 writemsg((">>> File '%s' is already locked by " + \
3744 "another fetcher. Continuing...\n") % myfile,
3750 eout = portage.output.EOutput()
3751 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
3752 match, mystat = _check_distfile(
3753 myfile_path, pruned_digests, eout)
3755 if distdir_writable:
3757 apply_secpass_permissions(myfile_path,
3758 gid=portage_gid, mode=0664, mask=02,
3760 except portage.exception.PortageException, e:
3761 if not os.access(myfile_path, os.R_OK):
3762 writemsg("!!! Failed to adjust permissions:" + \
3763 " %s\n" % str(e), noiselevel=-1)
3767 if distdir_writable and mystat is None:
3768 # Remove broken symlinks if necessary.
3770 os.unlink(myfile_path)
3774 if mystat is not None:
3775 if mystat.st_size == 0:
3776 if distdir_writable:
3778 os.unlink(myfile_path)
3781 elif distdir_writable:
3782 if mystat.st_size < fetch_resume_size and \
3783 mystat.st_size < size:
3784 writemsg((">>> Deleting distfile with size " + \
3785 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3786 "ME_MIN_SIZE)\n") % mystat.st_size)
3788 os.unlink(myfile_path)
3790 if e.errno != errno.ENOENT:
3793 elif mystat.st_size >= size:
3795 _checksum_failure_temp_file(
3796 mysettings["DISTDIR"], myfile)
3797 writemsg_stdout("Refetching... " + \
3798 "File renamed to '%s'\n\n" % \
3799 temp_filename, noiselevel=-1)
3801 if distdir_writable and ro_distdirs:
3802 readonly_file = None
3803 for x in ro_distdirs:
3804 filename = os.path.join(x, myfile)
3805 match, mystat = _check_distfile(
3806 filename, pruned_digests, eout)
3808 readonly_file = filename
3810 if readonly_file is not None:
3812 os.unlink(myfile_path)
3814 if e.errno != errno.ENOENT:
3817 os.symlink(readonly_file, myfile_path)
3820 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3821 for mydir in fsmirrors:
3822 mirror_file = os.path.join(mydir, myfile)
3824 shutil.copyfile(mirror_file, myfile_path)
3825 writemsg(_("Local mirror has file:" + \
3826 " %(file)s\n" % {"file":myfile}))
3828 except (IOError, OSError), e:
3829 if e.errno != errno.ENOENT:
3834 mystat = os.stat(myfile_path)
3836 if e.errno != errno.ENOENT:
3841 apply_secpass_permissions(
3842 myfile_path, gid=portage_gid, mode=0664, mask=02,
3844 except portage.exception.PortageException, e:
3845 if not os.access(myfile_path, os.R_OK):
3846 writemsg("!!! Failed to adjust permissions:" + \
3847 " %s\n" % str(e), noiselevel=-1)
3849 # If the file is empty then it's obviously invalid. Remove
3850 # the empty file and try to download if possible.
3851 if mystat.st_size == 0:
3852 if distdir_writable:
3854 os.unlink(myfile_path)
3855 except EnvironmentError:
3857 elif myfile not in mydigests:
3858 # We don't have a digest, but the file exists. We must
3859 # assume that it is fully downloaded.
3862 if mystat.st_size < mydigests[myfile]["size"] and \
3864 fetched = 1 # Try to resume this download.
3865 elif parallel_fetchonly and \
3866 mystat.st_size == mydigests[myfile]["size"]:
3867 eout = portage.output.EOutput()
3869 mysettings.get("PORTAGE_QUIET") == "1"
3871 "%s size ;-)" % (myfile, ))
3875 verified_ok, reason = portage.checksum.verify_all(
3876 myfile_path, mydigests[myfile])
3878 writemsg("!!! Previously fetched" + \
3879 " file: '%s'\n" % myfile, noiselevel=-1)
3880 writemsg("!!! Reason: %s\n" % reason[0],
3882 writemsg(("!!! Got: %s\n" + \
3883 "!!! Expected: %s\n") % \
3884 (reason[1], reason[2]), noiselevel=-1)
3885 if reason[0] == "Insufficient data for checksum verification":
3887 if distdir_writable:
3889 _checksum_failure_temp_file(
3890 mysettings["DISTDIR"], myfile)
3891 writemsg_stdout("Refetching... " + \
3892 "File renamed to '%s'\n\n" % \
3893 temp_filename, noiselevel=-1)
3895 eout = portage.output.EOutput()
3897 mysettings.get("PORTAGE_QUIET", None) == "1"
3898 digests = mydigests.get(myfile)
3900 digests = digests.keys()
3903 "%s %s ;-)" % (myfile, " ".join(digests)))
3905 continue # fetch any remaining files
3907 # Create a reversed list since that is optimal for list.pop().
3908 uri_list = filedict[myfile][:]
3910 checksum_failure_count = 0
3911 tried_locations = set()
3913 loc = uri_list.pop()
3914 # Eliminate duplicates here in case we've switched to
3915 # "primaryuri" mode on the fly due to a checksum failure.
3916 if loc in tried_locations:
3918 tried_locations.add(loc)
3920 writemsg_stdout(loc+" ", noiselevel=-1)
3922 # allow different fetchcommands per protocol
3923 protocol = loc[0:loc.find("://")]
3924 if "FETCHCOMMAND_" + protocol.upper() in mysettings:
3925 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
3927 fetchcommand=mysettings["FETCHCOMMAND"]
3928 if "RESUMECOMMAND_" + protocol.upper() in mysettings:
3929 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
3931 resumecommand=mysettings["RESUMECOMMAND"]
3936 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
3939 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
3941 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3942 if not mysettings.get(var_name, None):
3943 writemsg(("!!! %s is unset. It should " + \
3944 "have been defined in /etc/make.globals.\n") \
3945 % var_name, noiselevel=-1)
3950 if fetched != 2 and has_space:
3951 #we either need to resume or start the download
3954 mystat = os.stat(myfile_path)
3956 if e.errno != errno.ENOENT:
3961 if mystat.st_size < fetch_resume_size:
3962 writemsg((">>> Deleting distfile with size " + \
3963 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3964 "ME_MIN_SIZE)\n") % mystat.st_size)
3966 os.unlink(myfile_path)
3968 if e.errno != errno.ENOENT:
3974 writemsg(">>> Resuming download...\n")
3975 locfetch=resumecommand
3978 locfetch=fetchcommand
3979 writemsg_stdout(">>> Downloading '%s'\n" % \
3980 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
3982 "DISTDIR": mysettings["DISTDIR"],
3986 import shlex, StringIO
3987 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
3988 lexer.whitespace_split = True
3989 myfetch = [varexpand(x, mydict=variables) for x in lexer]
3993 myret = _spawn_fetch(mysettings, myfetch)
3997 apply_secpass_permissions(myfile_path,
3998 gid=portage_gid, mode=0664, mask=02)
3999 except portage.exception.FileNotFound, e:
4001 except portage.exception.PortageException, e:
4002 if not os.access(myfile_path, os.R_OK):
4003 writemsg("!!! Failed to adjust permissions:" + \
4004 " %s\n" % str(e), noiselevel=-1)
4006 # If the file is empty then it's obviously invalid. Don't
4007 # trust the return value from the fetcher. Remove the
4008 # empty file and try to download again.
4010 if os.stat(myfile_path).st_size == 0:
4011 os.unlink(myfile_path)
4014 except EnvironmentError:
4017 if mydigests is not None and myfile in mydigests:
4019 mystat = os.stat(myfile_path)
4021 if e.errno != errno.ENOENT:
4026 # no exception? file exists. let digestcheck() report
4027 # an appropriately for size or checksum errors
4029 # If the fetcher reported success and the file is
4030 # too small, it's probably because the digest is
4031 # bad (upstream changed the distfile). In this
4032 # case we don't want to attempt to resume. Show a
4033 # digest verification failure to that the user gets
4034 # a clue about what just happened.
4035 if myret != os.EX_OK and \
4036 mystat.st_size < mydigests[myfile]["size"]:
4037 # Fetch failed... Try the next one... Kill 404 files though.
4038 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4039 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4040 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
4042 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4043 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
4046 except (IOError, OSError):
4051 # File is the correct size--check the checksums for the fetched
4052 # file NOW, for those users who don't have a stable/continuous
4053 # net connection. This way we have a chance to try to download
4054 # from another mirror...
4055 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4058 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
4060 writemsg("!!! Reason: "+reason[0]+"\n",
4062 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
4063 (reason[1], reason[2]), noiselevel=-1)
4064 if reason[0] == "Insufficient data for checksum verification":
4067 _checksum_failure_temp_file(
4068 mysettings["DISTDIR"], myfile)
4069 writemsg_stdout("Refetching... " + \
4070 "File renamed to '%s'\n\n" % \
4071 temp_filename, noiselevel=-1)
4073 checksum_failure_count += 1
4074 if checksum_failure_count == \
4075 checksum_failure_primaryuri:
4076 # Switch to "primaryuri" mode in order
4077 # to increase the probablility of
4080 primaryuri_dict.get(myfile)
4083 reversed(primaryuris))
4084 if checksum_failure_count >= \
4085 checksum_failure_max_tries:
4088 eout = portage.output.EOutput()
4089 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4090 digests = mydigests.get(myfile)
4092 eout.ebegin("%s %s ;-)" % \
4093 (myfile, " ".join(sorted(digests))))
4101 elif mydigests!=None:
4102 writemsg("No digest file available and download failed.\n\n",
4105 if use_locks and file_lock:
4106 portage.locks.unlockfile(file_lock)
4109 writemsg_stdout("\n", noiselevel=-1)
4111 if restrict_fetch and not restrict_fetch_msg:
4112 restrict_fetch_msg = True
4113 msg = ("\n!!! %s/%s" + \
4114 " has fetch restriction turned on.\n" + \
4115 "!!! This probably means that this " + \
4116 "ebuild's files must be downloaded\n" + \
4117 "!!! manually. See the comments in" + \
4118 " the ebuild for more information.\n\n") % \
4119 (mysettings["CATEGORY"], mysettings["PF"])
4120 portage.util.writemsg_level(msg,
4121 level=logging.ERROR, noiselevel=-1)
4122 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
4123 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
4124 if not parallel_fetchonly and have_builddir:
4125 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
4126 # ensuring sane $PWD (bug #239560) and storing elog
4127 # messages. Therefore, calling code needs to ensure that
4128 # PORTAGE_BUILDDIR is already clean and locked here.
4130 # All the pkg_nofetch goes to stderr since it's considered
4131 # to be an error message.
4133 0 : sys.stdin.fileno(),
4134 1 : sys.stderr.fileno(),
4135 2 : sys.stderr.fileno(),
4138 ebuild_phase = mysettings.get("EBUILD_PHASE")
4140 mysettings["EBUILD_PHASE"] = "nofetch"
4141 spawn(_shell_quote(EBUILD_SH_BINARY) + \
4142 " nofetch", mysettings, fd_pipes=fd_pipes)
4144 if ebuild_phase is None:
4145 mysettings.pop("EBUILD_PHASE", None)
4147 mysettings["EBUILD_PHASE"] = ebuild_phase
4149 elif restrict_fetch:
4153 elif not filedict[myfile]:
4154 writemsg("Warning: No mirrors available for file" + \
4155 " '%s'\n" % (myfile), noiselevel=-1)
4157 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
4163 failed_files.add(myfile)
4170 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
4172 Generates a digest file if missing. Assumes all files are available.
4173 DEPRECATED: this now only is a compability wrapper for
4174 portage.manifest.Manifest()
4175 NOTE: manifestonly and overwrite are useless with manifest2 and
4176 are therefore ignored."""
4177 if myportdb is None:
4178 writemsg("Warning: myportdb not specified to digestgen\n")
4181 global _doebuild_manifest_exempt_depend
4183 _doebuild_manifest_exempt_depend += 1
4185 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
4186 for cpv in fetchlist_dict:
4188 for myfile in fetchlist_dict[cpv]:
4189 distfiles_map.setdefault(myfile, []).append(cpv)
4190 except portage.exception.InvalidDependString, e:
4191 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4194 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
4195 manifest1_compat = False
4196 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
4197 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
4198 # Don't require all hashes since that can trigger excessive
4199 # fetches when sufficient digests already exist. To ease transition
4200 # while Manifest 1 is being removed, only require hashes that will
4201 # exist before and after the transition.
4202 required_hash_types = set()
4203 required_hash_types.add("size")
4204 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4205 dist_hashes = mf.fhashdict.get("DIST", {})
4206 missing_hashes = set()
4207 for myfile in distfiles_map:
4208 myhashes = dist_hashes.get(myfile)
4210 missing_hashes.add(myfile)
4212 if required_hash_types.difference(myhashes):
4213 missing_hashes.add(myfile)
4215 if myhashes["size"] == 0:
4216 missing_hashes.add(myfile)
4219 for myfile in missing_hashes:
4221 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4223 if e.errno != errno.ENOENT:
4226 missing_files.append(myfile)
4228 # If the file is empty then it's obviously invalid.
4230 missing_files.append(myfile)
4232 mytree = os.path.realpath(os.path.dirname(
4233 os.path.dirname(mysettings["O"])))
4234 fetch_settings = config(clone=mysettings)
4235 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4236 for myfile in missing_files:
4238 for cpv in distfiles_map[myfile]:
4239 myebuild = os.path.join(mysettings["O"],
4240 catsplit(cpv)[1] + ".ebuild")
4241 # for RESTRICT=fetch, mirror, etc...
4242 doebuild_environment(myebuild, "fetch",
4243 mysettings["ROOT"], fetch_settings,
4245 uri_map = myportdb.getFetchMap(cpv, mytree=mytree)
4246 myuris = {myfile:uri_map[myfile]}
4247 fetch_settings["A"] = myfile # for use by pkg_nofetch()
4248 if fetch(myuris, fetch_settings):
4252 writemsg(("!!! File %s doesn't exist, can't update " + \
4253 "Manifest\n") % myfile, noiselevel=-1)
4255 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
4257 mf.create(requiredDistfiles=myarchives,
4258 assumeDistHashesSometimes=True,
4259 assumeDistHashesAlways=(
4260 "assume-digests" in mysettings.features))
4261 except portage.exception.FileNotFound, e:
4262 writemsg(("!!! File %s doesn't exist, can't update " + \
4263 "Manifest\n") % e, noiselevel=-1)
4265 except portage.exception.PortagePackageException, e:
4266 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
4269 mf.write(sign=False)
4270 except portage.exception.PermissionDenied, e:
4271 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
4273 if "assume-digests" not in mysettings.features:
4274 distlist = mf.fhashdict.get("DIST", {}).keys()
4277 for filename in distlist:
4278 if not os.path.exists(
4279 os.path.join(mysettings["DISTDIR"], filename)):
4280 auto_assumed.append(filename)
4282 mytree = os.path.realpath(
4283 os.path.dirname(os.path.dirname(mysettings["O"])))
4284 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
4285 pkgs = myportdb.cp_list(cp, mytree=mytree)
4287 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
4288 str(len(auto_assumed)).rjust(18)) + "\n")
4289 for pkg_key in pkgs:
4290 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
4291 pv = pkg_key.split("/")[1]
4292 for filename in auto_assumed:
4293 if filename in fetchlist:
4295 " %s::%s\n" % (pv, filename))
4298 _doebuild_manifest_exempt_depend -= 1
4300 def digestParseFile(myfilename, mysettings=None):
4301 """(filename) -- Parses a given file for entries matching:
4302 <checksumkey> <checksum_hex_string> <filename> <filesize>
4303 Ignores lines that don't start with a valid checksum identifier
4304 and returns a dict with the filenames as keys and {checksumkey:checksum}
4306 DEPRECATED: this function is now only a compability wrapper for
4307 portage.manifest.Manifest()."""
4309 mysplit = myfilename.split(os.sep)
4310 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
4311 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
4312 elif mysplit[-1] == "Manifest":
4313 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
4315 if mysettings is None:
4317 mysettings = config(clone=settings)
4319 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
4321 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
4322 """Verifies checksums. Assumes all files have been downloaded.
4323 DEPRECATED: this is now only a compability wrapper for
4324 portage.manifest.Manifest()."""
4325 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4327 pkgdir = mysettings["O"]
4328 manifest_path = os.path.join(pkgdir, "Manifest")
4329 if not os.path.exists(manifest_path):
4330 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4336 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4337 manifest_empty = True
4338 for d in mf.fhashdict.itervalues():
4340 manifest_empty = False
4343 writemsg("!!! Manifest is empty: '%s'\n" % manifest_path,
4349 eout = portage.output.EOutput()
4350 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4352 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
4353 eout.ebegin("checking ebuild checksums ;-)")
4354 mf.checkTypeHashes("EBUILD")
4356 eout.ebegin("checking auxfile checksums ;-)")
4357 mf.checkTypeHashes("AUX")
4359 eout.ebegin("checking miscfile checksums ;-)")
4360 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4363 eout.ebegin("checking %s ;-)" % f)
4364 mf.checkFileHashes(mf.findFile(f), f)
4368 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
4370 except portage.exception.FileNotFound, e:
4372 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
4375 except portage.exception.DigestException, e:
4377 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
4378 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4379 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4380 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4381 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4383 # Make sure that all of the ebuilds are actually listed in the Manifest.
4384 for f in os.listdir(pkgdir):
4385 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4386 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4387 os.path.join(pkgdir, f), noiselevel=-1)
4390 """ epatch will just grab all the patches out of a directory, so we have to
4391 make sure there aren't any foreign files that it might grab."""
4392 filesdir = os.path.join(pkgdir, "files")
4393 for parent, dirs, files in os.walk(filesdir):
4395 if d.startswith(".") or d == "CVS":
4398 if f.startswith("."):
4400 f = os.path.join(parent, f)[len(filesdir) + 1:]
4401 file_type = mf.findFile(f)
4402 if file_type != "AUX" and not f.startswith("digest-"):
4403 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4404 os.path.join(filesdir, f), noiselevel=-1)
4409 # parse actionmap to spawn ebuild with the appropriate args
4410 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
4411 logfile=None, fd_pipes=None, returnpid=False):
4412 if not returnpid and \
4413 (alwaysdep or "noauto" not in mysettings.features):
4414 # process dependency first
4415 if "dep" in actionmap[mydo]:
4416 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
4417 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
4418 fd_pipes=fd_pipes, returnpid=returnpid)
4422 eapi = mysettings["EAPI"]
4424 if mydo == "configure" and eapi in ("0", "1", "2_pre1"):
4427 if mydo == "prepare" and eapi in ("0", "1", "2_pre1", "2_pre2"):
4430 kwargs = actionmap[mydo]["args"]
4431 mysettings["EBUILD_PHASE"] = mydo
4432 _doebuild_exit_status_unlink(
4433 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4436 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
4437 mysettings, debug=debug, logfile=logfile,
4438 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
4440 mysettings["EBUILD_PHASE"] = ""
4444 msg = _doebuild_exit_status_check(mydo, mysettings)
4447 from textwrap import wrap
4448 from portage.elog.messages import eerror
4449 for l in wrap(msg, 72):
4450 eerror(l, phase=mydo, key=mysettings.mycpv)
4452 _post_phase_userpriv_perms(mysettings)
4453 if mydo == "install":
4454 _check_build_log(mysettings)
4455 if phase_retval == os.EX_OK:
4456 phase_retval = _post_src_install_checks(mysettings)
4459 _post_phase_cmds = {
4463 "install_symlink_html_docs"],
4468 "preinst_selinux_labels",
4469 "preinst_suid_scan",
4473 "postinst_bsdflags"]
4476 def _post_phase_userpriv_perms(mysettings):
4477 if "userpriv" in mysettings.features and secpass >= 2:
4478 """ Privileged phases may have left files that need to be made
4479 writable to a less privileged user."""
4480 apply_recursive_permissions(mysettings["T"],
4481 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4482 filemode=060, filemask=0)
4484 def _post_src_install_checks(mysettings):
4485 _post_src_install_uid_fix(mysettings)
4486 global _post_phase_cmds
4487 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
4488 if retval != os.EX_OK:
4489 writemsg("!!! install_qa_check failed; exiting.\n",
4493 def _check_build_log(mysettings, out=None):
4495 Search the content of $PORTAGE_LOG_FILE if it exists
4496 and generate the following QA Notices when appropriate:
4498 * Automake "maintainer mode"
4500 * Unrecognized configure options
4502 logfile = mysettings.get("PORTAGE_LOG_FILE")
4506 f = open(logfile, 'rb')
4507 except EnvironmentError:
4510 am_maintainer_mode = []
4511 bash_command_not_found = []
4512 bash_command_not_found_re = re.compile(
4513 r'(.*): line (\d*): (.*): command not found$')
4514 command_not_found_exclude_re = re.compile(r'/configure: line ')
4515 helper_missing_file = []
4516 helper_missing_file_re = re.compile(
4517 r'^!!! (do|new).*: .* does not exist$')
4519 configure_opts_warn = []
4520 configure_opts_warn_re = re.compile(
4521 r'^configure: WARNING: Unrecognized options: .*')
4522 am_maintainer_mode_re = re.compile(r'.*/missing --run .*')
4523 am_maintainer_mode_exclude_re = \
4524 re.compile(r'.*/missing --run (autoheader|makeinfo)')
4526 make_jobserver_re = \
4527 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
4532 if am_maintainer_mode_re.search(line) is not None and \
4533 am_maintainer_mode_exclude_re.search(line) is None:
4534 am_maintainer_mode.append(line.rstrip("\n"))
4536 if bash_command_not_found_re.match(line) is not None and \
4537 command_not_found_exclude_re.search(line) is None:
4538 bash_command_not_found.append(line.rstrip("\n"))
4540 if helper_missing_file_re.match(line) is not None:
4541 helper_missing_file.append(line.rstrip("\n"))
4543 if configure_opts_warn_re.match(line) is not None:
4544 configure_opts_warn.append(line.rstrip("\n"))
4546 if make_jobserver_re.match(line) is not None:
4547 make_jobserver.append(line.rstrip("\n"))
4552 from portage.elog.messages import eqawarn
4553 def _eqawarn(lines):
4555 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
4556 from textwrap import wrap
4559 if am_maintainer_mode:
4560 msg = ["QA Notice: Automake \"maintainer mode\" detected:"]
4562 msg.extend("\t" + line for line in am_maintainer_mode)
4565 "If you patch Makefile.am, " + \
4566 "configure.in, or configure.ac then you " + \
4567 "should use autotools.eclass and " + \
4568 "eautomake or eautoreconf. Exceptions " + \
4569 "are limited to system packages " + \
4570 "for which it is impossible to run " + \
4571 "autotools during stage building. " + \
4572 "See http://www.gentoo.org/p" + \
4573 "roj/en/qa/autofailure.xml for more information.",
4577 if bash_command_not_found:
4578 msg = ["QA Notice: command not found:"]
4580 msg.extend("\t" + line for line in bash_command_not_found)
4583 if helper_missing_file:
4584 msg = ["QA Notice: file does not exist:"]
4586 msg.extend("\t" + line[4:] for line in helper_missing_file)
4589 if configure_opts_warn:
4590 msg = ["QA Notice: Unrecognized configure options:"]
4592 msg.extend("\t" + line for line in configure_opts_warn)
4596 msg = ["QA Notice: make jobserver unavailable:"]
4598 msg.extend("\t" + line for line in make_jobserver)
4601 def _post_src_install_uid_fix(mysettings):
4603 Files in $D with user and group bits that match the "portage"
4604 user or group are automatically mapped to PORTAGE_INST_UID and
4605 PORTAGE_INST_GID if necessary. The chown system call may clear
4606 S_ISUID and S_ISGID bits, so those bits are restored if
4609 inst_uid = int(mysettings["PORTAGE_INST_UID"])
4610 inst_gid = int(mysettings["PORTAGE_INST_GID"])
4611 for parent, dirs, files in os.walk(mysettings["D"]):
4612 for fname in chain(dirs, files):
4613 fpath = os.path.join(parent, fname)
4614 mystat = os.lstat(fpath)
4615 if mystat.st_uid != portage_uid and \
4616 mystat.st_gid != portage_gid:
4620 if mystat.st_uid == portage_uid:
4622 if mystat.st_gid == portage_gid:
4624 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
4625 mode=mystat.st_mode, stat_cached=mystat,
4628 def _post_pkg_preinst_cmd(mysettings):
4630 Post phase logic and tasks that have been factored out of
4631 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4632 can be used to wipe out any gmon.out files created during
4633 previous functions (in case any tools were built with -pg
4637 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4638 misc_sh_binary = os.path.join(portage_bin_path,
4639 os.path.basename(MISC_SH_BINARY))
4641 mysettings["EBUILD_PHASE"] = ""
4642 global _post_phase_cmds
4643 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
4647 def _post_pkg_postinst_cmd(mysettings):
4649 Post phase logic and tasks that have been factored out of
4653 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4654 misc_sh_binary = os.path.join(portage_bin_path,
4655 os.path.basename(MISC_SH_BINARY))
4657 mysettings["EBUILD_PHASE"] = ""
4658 global _post_phase_cmds
4659 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
4663 def _spawn_misc_sh(mysettings, commands, **kwargs):
4665 @param mysettings: the ebuild config
4666 @type mysettings: config
4667 @param commands: a list of function names to call in misc-functions.sh
4668 @type commands: list
4670 @returns: the return value from the spawn() call
4673 # Note: PORTAGE_BIN_PATH may differ from the global
4674 # constant when portage is reinstalling itself.
4675 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4676 misc_sh_binary = os.path.join(portage_bin_path,
4677 os.path.basename(MISC_SH_BINARY))
4678 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
4679 _doebuild_exit_status_unlink(
4680 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4681 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4682 logfile = mysettings.get("PORTAGE_LOG_FILE")
4683 mydo = mysettings["EBUILD_PHASE"]
4685 rval = spawn(mycommand, mysettings, debug=debug,
4686 logfile=logfile, **kwargs)
4689 msg = _doebuild_exit_status_check(mydo, mysettings)
4692 from textwrap import wrap
4693 from portage.elog.messages import eerror
4694 for l in wrap(msg, 72):
4695 eerror(l, phase=mydo, key=mysettings.mycpv)
4698 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
4700 def _eapi_is_deprecated(eapi):
4701 return eapi in _deprecated_eapis
4703 def eapi_is_supported(eapi):
4704 eapi = str(eapi).strip()
4706 if _eapi_is_deprecated(eapi):
4715 return eapi <= portage.const.EAPI
4717 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
4719 ebuild_path = os.path.abspath(myebuild)
4720 pkg_dir = os.path.dirname(ebuild_path)
4722 if "CATEGORY" in mysettings.configdict["pkg"]:
4723 cat = mysettings.configdict["pkg"]["CATEGORY"]
4725 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
4726 mypv = os.path.basename(ebuild_path)[:-7]
4727 mycpv = cat+"/"+mypv
4728 mysplit=pkgsplit(mypv,silent=0)
4730 raise portage.exception.IncorrectParameter(
4731 "Invalid ebuild path: '%s'" % myebuild)
4733 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
4734 # so that the caller can override it.
4735 tmpdir = mysettings["PORTAGE_TMPDIR"]
4737 if mydo != "depend" and mycpv != mysettings.mycpv:
4738 """For performance reasons, setcpv only triggers reset when it
4739 detects a package-specific change in config. For the ebuild
4740 environment, a reset call is forced in order to ensure that the
4741 latest env.d variables are used."""
4743 mysettings.reset(use_cache=use_cache)
4744 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
4746 # config.reset() might have reverted a change made by the caller,
4747 # so restore it to it's original value.
4748 mysettings["PORTAGE_TMPDIR"] = tmpdir
4750 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
4751 mysettings["EBUILD_PHASE"] = mydo
4753 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
4755 # We are disabling user-specific bashrc files.
4756 mysettings["BASH_ENV"] = INVALID_ENV_FILE
4758 if debug: # Otherwise it overrides emerge's settings.
4759 # We have no other way to set debug... debug can't be passed in
4760 # due to how it's coded... Don't overwrite this so we can use it.
4761 mysettings["PORTAGE_DEBUG"] = "1"
4763 mysettings["ROOT"] = myroot
4764 mysettings["STARTDIR"] = getcwd()
4766 mysettings["PORTAGE_REPO_NAME"] = ""
4767 # bindbapi has no getRepositories() method
4768 if mydbapi and hasattr(mydbapi, "getRepositories"):
4769 # do we have a origin repository name for the current package
4770 repopath = os.sep.join(pkg_dir.split(os.path.sep)[:-2])
4771 for reponame in mydbapi.getRepositories():
4772 if mydbapi.getRepositoryPath(reponame) == repopath:
4773 mysettings["PORTAGE_REPO_NAME"] = reponame
4776 mysettings["EBUILD"] = ebuild_path
4777 mysettings["O"] = pkg_dir
4778 mysettings.configdict["pkg"]["CATEGORY"] = cat
4779 mysettings["FILESDIR"] = pkg_dir+"/files"
4780 mysettings["PF"] = mypv
4782 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
4783 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
4784 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
4786 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
4787 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
4789 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
4790 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
4791 mysettings["PN"] = mysplit[0]
4792 mysettings["PV"] = mysplit[1]
4793 mysettings["PR"] = mysplit[2]
4795 if portage.util.noiselimit < 0:
4796 mysettings["PORTAGE_QUIET"] = "1"
4798 if mydo != "depend":
4799 # Metadata vars such as EAPI and RESTRICT are
4800 # set by the above config.setcpv() call.
4801 eapi = mysettings["EAPI"]
4802 if not eapi_is_supported(eapi):
4803 # can't do anything with this.
4804 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
4806 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
4807 portage.dep.use_reduce(portage.dep.paren_reduce(
4808 mysettings["RESTRICT"]),
4809 uselist=mysettings["PORTAGE_USE"].split())))
4810 except portage.exception.InvalidDependString:
4811 # RESTRICT is validated again inside doebuild, so let this go
4812 mysettings["PORTAGE_RESTRICT"] = ""
4814 if mysplit[2] == "r0":
4815 mysettings["PVR"]=mysplit[1]
4817 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
4819 if "PATH" in mysettings:
4820 mysplit=mysettings["PATH"].split(":")
4823 # Note: PORTAGE_BIN_PATH may differ from the global constant
4824 # when portage is reinstalling itself.
4825 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4826 if portage_bin_path not in mysplit:
4827 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
4829 # Sandbox needs cannonical paths.
4830 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
4831 mysettings["PORTAGE_TMPDIR"])
4832 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
4833 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
4835 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
4836 # locations in order to prevent interference.
4837 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
4838 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4839 mysettings["PKG_TMPDIR"],
4840 mysettings["CATEGORY"], mysettings["PF"])
4842 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4843 mysettings["BUILD_PREFIX"],
4844 mysettings["CATEGORY"], mysettings["PF"])
4846 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
4847 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
4848 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
4849 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
4851 mysettings["PORTAGE_BASHRC"] = os.path.join(
4852 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
4853 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
4854 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
4856 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
4857 if mydo != "depend" and "KV" not in mysettings:
4858 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
4860 # Regular source tree
4861 mysettings["KV"]=mykv
4864 mysettings.backup_changes("KV")
4866 # Allow color.map to control colors associated with einfo, ewarn, etc...
4868 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
4869 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
4870 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
4872 def prepare_build_dirs(myroot, mysettings, cleanup):
4874 clean_dirs = [mysettings["HOME"]]
4876 # We enable cleanup when we want to make sure old cruft (such as the old
4877 # environment) doesn't interfere with the current phase.
4879 clean_dirs.append(mysettings["T"])
4881 for clean_dir in clean_dirs:
4883 shutil.rmtree(clean_dir)
4885 if errno.ENOENT == oe.errno:
4887 elif errno.EPERM == oe.errno:
4888 writemsg("%s\n" % oe, noiselevel=-1)
4889 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
4890 clean_dir, noiselevel=-1)
4895 def makedirs(dir_path):
4897 os.makedirs(dir_path)
4899 if errno.EEXIST == oe.errno:
4901 elif errno.EPERM == oe.errno:
4902 writemsg("%s\n" % oe, noiselevel=-1)
4903 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
4904 dir_path, noiselevel=-1)
4910 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
4912 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
4913 mydirs.append(os.path.dirname(mydirs[-1]))
4916 for mydir in mydirs:
4917 portage.util.ensure_dirs(mydir)
4918 portage.util.apply_secpass_permissions(mydir,
4919 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
4920 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
4921 """These directories don't necessarily need to be group writable.
4922 However, the setup phase is commonly run as a privileged user prior
4923 to the other phases being run by an unprivileged user. Currently,
4924 we use the portage group to ensure that the unprivleged user still
4925 has write access to these directories in any case."""
4926 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
4927 portage.util.apply_secpass_permissions(mysettings[dir_key],
4928 uid=portage_uid, gid=portage_gid)
4929 except portage.exception.PermissionDenied, e:
4930 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
4932 except portage.exception.OperationNotPermitted, e:
4933 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
4935 except portage.exception.FileNotFound, e:
4936 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
4939 _prepare_workdir(mysettings)
4940 _prepare_features_dirs(mysettings)
4942 def _adjust_perms_msg(settings, msg):
4945 writemsg(msg, noiselevel=-1)
4947 background = settings.get("PORTAGE_BACKGROUND") == "1"
4948 log_path = settings.get("PORTAGE_LOG_FILE")
4951 if background and log_path is not None:
4953 log_file = open(log_path, 'a')
4965 if log_file is not None:
4968 def _prepare_features_dirs(mysettings):
4972 "basedir_var":"CCACHE_DIR",
4973 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
4974 "always_recurse":False},
4976 "basedir_var":"DISTCC_DIR",
4977 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
4978 "subdirs":("lock", "state"),
4979 "always_recurse":True}
4984 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4985 from portage.data import secpass
4986 droppriv = secpass >= 2 and \
4987 "userpriv" in mysettings.features and \
4988 "userpriv" not in restrict
4989 for myfeature, kwargs in features_dirs.iteritems():
4990 if myfeature in mysettings.features:
4991 basedir = mysettings[kwargs["basedir_var"]]
4993 basedir = kwargs["default_dir"]
4994 mysettings[kwargs["basedir_var"]] = basedir
4996 mydirs = [mysettings[kwargs["basedir_var"]]]
4997 if "subdirs" in kwargs:
4998 for subdir in kwargs["subdirs"]:
4999 mydirs.append(os.path.join(basedir, subdir))
5000 for mydir in mydirs:
5001 modified = portage.util.ensure_dirs(mydir)
5002 # Generally, we only want to apply permissions for
5003 # initial creation. Otherwise, we don't know exactly what
5004 # permissions the user wants, so should leave them as-is.
5005 droppriv_fix = False
5008 if st.st_gid != portage_gid or \
5009 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
5011 if not droppriv_fix:
5012 # Check permissions of files in the directory.
5013 for filename in os.listdir(mydir):
5015 subdir_st = os.lstat(
5016 os.path.join(mydir, filename))
5019 if subdir_st.st_gid != portage_gid or \
5020 ((stat.S_ISDIR(subdir_st.st_mode) and \
5021 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
5026 _adjust_perms_msg(mysettings,
5027 colorize("WARN", " * ") + \
5028 "Adjusting permissions " + \
5029 "for FEATURES=userpriv: '%s'\n" % mydir)
5031 _adjust_perms_msg(mysettings,
5032 colorize("WARN", " * ") + \
5033 "Adjusting permissions " + \
5034 "for FEATURES=%s: '%s'\n" % (myfeature, mydir))
5036 if modified or kwargs["always_recurse"] or droppriv_fix:
5038 raise # The feature is disabled if a single error
5039 # occurs during permissions adjustment.
5040 if not apply_recursive_permissions(mydir,
5041 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5042 filemode=filemode, filemask=modemask, onerror=onerror):
5043 raise portage.exception.OperationNotPermitted(
5044 "Failed to apply recursive permissions for the portage group.")
5045 except portage.exception.PortageException, e:
5046 mysettings.features.remove(myfeature)
5047 mysettings["FEATURES"] = " ".join(mysettings.features)
5048 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5049 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
5050 (kwargs["basedir_var"], basedir), noiselevel=-1)
5051 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
5055 def _prepare_workdir(mysettings):
5058 mode = mysettings["PORTAGE_WORKDIR_MODE"]
5060 parsed_mode = int(mode, 8)
5065 if parsed_mode & 07777 != parsed_mode:
5066 raise ValueError("Invalid file mode: %s" % mode)
5068 workdir_mode = parsed_mode
5070 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
5071 except ValueError, e:
5073 writemsg("%s\n" % e)
5074 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
5075 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
5076 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
5078 apply_secpass_permissions(mysettings["WORKDIR"],
5079 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
5080 except portage.exception.FileNotFound:
5081 pass # ebuild.sh will create it
5083 if mysettings.get("PORT_LOGDIR", "") == "":
5084 while "PORT_LOGDIR" in mysettings:
5085 del mysettings["PORT_LOGDIR"]
5086 if "PORT_LOGDIR" in mysettings:
5088 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
5090 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
5091 uid=portage_uid, gid=portage_gid, mode=02770)
5092 except portage.exception.PortageException, e:
5093 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5094 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
5095 mysettings["PORT_LOGDIR"], noiselevel=-1)
5096 writemsg("!!! Disabling logging.\n", noiselevel=-1)
5097 while "PORT_LOGDIR" in mysettings:
5098 del mysettings["PORT_LOGDIR"]
5099 if "PORT_LOGDIR" in mysettings and \
5100 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
5101 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
5102 if not os.path.exists(logid_path):
5103 f = open(logid_path, "w")
5106 logid_time = time.strftime("%Y%m%d-%H%M%S",
5107 time.gmtime(os.stat(logid_path).st_mtime))
5108 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5109 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
5110 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
5111 del logid_path, logid_time
5113 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
5114 # enabled since it is possible that local SELinux security policies
5115 # do not allow ouput to be piped out of the sesandbox domain.
5116 if not (mysettings.selinux_enabled() and \
5117 "sesandbox" in mysettings.features):
5118 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5119 mysettings["T"], "build.log")
5121 def _doebuild_exit_status_check(mydo, settings):
5123 Returns an error string if the shell appeared
5124 to exit unsuccessfully, None otherwise.
5126 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
5127 if not exit_status_file or \
5128 os.path.exists(exit_status_file):
5130 msg = ("The ebuild phase '%s' has exited " % mydo) + \
5131 "unexpectedly. This type of behavior " + \
5132 "is known to be triggered " + \
5133 "by things such as failed variable " + \
5134 "assignments (bug #190128) or bad substitution " + \
5135 "errors (bug #200313). This behavior may also be " + \
5136 "triggered by a corrupt bash binary or a hardware " + \
5137 "problem such as memory or cpu malfunction."
5140 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
5141 if retval != os.EX_OK:
5143 msg = _doebuild_exit_status_check(mydo, settings)
5146 from textwrap import wrap
5147 from portage.elog.messages import eerror
5148 for l in wrap(msg, 72):
5149 eerror(l, phase=mydo, key=settings.mycpv)
5152 def _doebuild_exit_status_unlink(exit_status_file):
5154 Double check to make sure it really doesn't exist
5155 and raise an OSError if it still does (it shouldn't).
5156 OSError if necessary.
5158 if not exit_status_file:
5161 os.unlink(exit_status_file)
5164 if os.path.exists(exit_status_file):
5165 os.unlink(exit_status_file)
5167 _doebuild_manifest_exempt_depend = 0
5168 _doebuild_manifest_cache = None
5169 _doebuild_broken_ebuilds = set()
5170 _doebuild_broken_manifests = set()
5172 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
5173 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
5174 mydbapi=None, vartree=None, prev_mtimes=None,
5175 fd_pipes=None, returnpid=False):
5178 Wrapper function that invokes specific ebuild phases through the spawning
5181 @param myebuild: name of the ebuild to invoke the phase on (CPV)
5182 @type myebuild: String
5183 @param mydo: Phase to run
5185 @param myroot: $ROOT (usually '/', see man make.conf)
5186 @type myroot: String
5187 @param mysettings: Portage Configuration
5188 @type mysettings: instance of portage.config
5189 @param debug: Turns on various debug information (eg, debug for spawn)
5190 @type debug: Boolean
5191 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
5192 @type listonly: Boolean
5193 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
5194 @type fetchonly: Boolean
5195 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
5196 @type cleanup: Boolean
5197 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
5198 @type dbkey: Dict or String
5199 @param use_cache: Enables the cache
5200 @type use_cache: Boolean
5201 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
5202 @type fetchall: Boolean
5203 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
5205 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
5206 @type mydbapi: portdbapi instance
5207 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
5208 @type vartree: vartree instance
5209 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
5210 @type prev_mtimes: dictionary
5216 Most errors have an accompanying error message.
5218 listonly and fetchonly are only really necessary for operations involving 'fetch'
5219 prev_mtimes are only necessary for merge operations.
5220 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
5225 writemsg("Warning: tree not specified to doebuild\n")
5229 # chunked out deps for each phase, so that ebuild binary can use it
5230 # to collapse targets down.
5233 "unpack": ["setup"],
5234 "prepare": ["unpack"],
5235 "configure": ["prepare"],
5236 "compile":["configure"],
5237 "test": ["compile"],
5240 "package":["install"],
5244 mydbapi = db[myroot][tree].dbapi
5246 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
5247 vartree = db[myroot]["vartree"]
5249 features = mysettings.features
5250 noauto = "noauto" in features
5251 from portage.data import secpass
5253 clean_phases = ("clean", "cleanrm")
5254 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
5255 "config", "info", "setup", "depend",
5256 "fetch", "fetchall", "digest",
5257 "unpack", "prepare", "configure", "compile", "test",
5258 "install", "rpm", "qmerge", "merge",
5259 "package","unmerge", "manifest"]
5261 if mydo not in validcommands:
5262 validcommands.sort()
5263 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
5265 for vcount in range(len(validcommands)):
5267 writemsg("\n!!! ", noiselevel=-1)
5268 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
5269 writemsg("\n", noiselevel=-1)
5272 if mydo == "fetchall":
5276 if mydo not in clean_phases and not os.path.exists(myebuild):
5277 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
5281 global _doebuild_manifest_exempt_depend
5283 if "strict" in features and \
5284 "digest" not in features and \
5285 tree == "porttree" and \
5286 mydo not in ("digest", "manifest", "help") and \
5287 not _doebuild_manifest_exempt_depend:
5288 # Always verify the ebuild checksums before executing it.
5289 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
5290 _doebuild_broken_ebuilds
5292 if myebuild in _doebuild_broken_ebuilds:
5295 pkgdir = os.path.dirname(myebuild)
5296 manifest_path = os.path.join(pkgdir, "Manifest")
5298 # Avoid checking the same Manifest several times in a row during a
5299 # regen with an empty cache.
5300 if _doebuild_manifest_cache is None or \
5301 _doebuild_manifest_cache.getFullname() != manifest_path:
5302 _doebuild_manifest_cache = None
5303 if not os.path.exists(manifest_path):
5304 out = portage.output.EOutput()
5305 out.eerror("Manifest not found for '%s'" % (myebuild,))
5306 _doebuild_broken_ebuilds.add(myebuild)
5308 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5311 mf = _doebuild_manifest_cache
5314 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
5316 out = portage.output.EOutput()
5317 out.eerror("Missing digest for '%s'" % (myebuild,))
5318 _doebuild_broken_ebuilds.add(myebuild)
5320 except portage.exception.FileNotFound:
5321 out = portage.output.EOutput()
5322 out.eerror("A file listed in the Manifest " + \
5323 "could not be found: '%s'" % (myebuild,))
5324 _doebuild_broken_ebuilds.add(myebuild)
5326 except portage.exception.DigestException, e:
5327 out = portage.output.EOutput()
5328 out.eerror("Digest verification failed:")
5329 out.eerror("%s" % e.value[0])
5330 out.eerror("Reason: %s" % e.value[1])
5331 out.eerror("Got: %s" % e.value[2])
5332 out.eerror("Expected: %s" % e.value[3])
5333 _doebuild_broken_ebuilds.add(myebuild)
5336 if mf.getFullname() in _doebuild_broken_manifests:
5339 if mf is not _doebuild_manifest_cache:
5341 # Make sure that all of the ebuilds are
5342 # actually listed in the Manifest.
5343 for f in os.listdir(pkgdir):
5344 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
5345 f = os.path.join(pkgdir, f)
5346 if f not in _doebuild_broken_ebuilds:
5347 out = portage.output.EOutput()
5348 out.eerror("A file is not listed in the " + \
5349 "Manifest: '%s'" % (f,))
5350 _doebuild_broken_manifests.add(manifest_path)
5353 # Only cache it if the above stray files test succeeds.
5354 _doebuild_manifest_cache = mf
5356 def exit_status_check(retval):
5357 if retval != os.EX_OK:
5359 msg = _doebuild_exit_status_check(mydo, mysettings)
5362 from textwrap import wrap
5363 from portage.elog.messages import eerror
5364 for l in wrap(msg, 72):
5365 eerror(l, phase=mydo, key=mysettings.mycpv)
5368 # Note: PORTAGE_BIN_PATH may differ from the global
5369 # constant when portage is reinstalling itself.
5370 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5371 ebuild_sh_binary = os.path.join(portage_bin_path,
5372 os.path.basename(EBUILD_SH_BINARY))
5373 misc_sh_binary = os.path.join(portage_bin_path,
5374 os.path.basename(MISC_SH_BINARY))
5377 builddir_lock = None
5382 if mydo in ("digest", "manifest", "help"):
5383 # Temporarily exempt the depend phase from manifest checks, in case
5384 # aux_get calls trigger cache generation.
5385 _doebuild_manifest_exempt_depend += 1
5387 # If we don't need much space and we don't need a constant location,
5388 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
5389 # so that there's no need for locking and it can be used even if the
5390 # user isn't in the portage group.
5391 if mydo in ("info",):
5392 from tempfile import mkdtemp
5394 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
5395 mysettings["PORTAGE_TMPDIR"] = tmpdir
5397 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
5400 if mydo in clean_phases:
5401 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
5402 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
5403 logfile=None, returnpid=returnpid)
5406 # get possible slot information from the deps file
5407 if mydo == "depend":
5408 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
5409 droppriv = "userpriv" in mysettings.features
5411 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5412 mysettings, fd_pipes=fd_pipes, returnpid=True,
5415 elif isinstance(dbkey, dict):
5416 mysettings["dbkey"] = ""
5419 0:sys.stdin.fileno(),
5420 1:sys.stdout.fileno(),
5421 2:sys.stderr.fileno(),
5423 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5425 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
5426 os.close(pw) # belongs exclusively to the child process now
5430 mybytes.append(os.read(pr, maxbytes))
5434 mybytes = "".join(mybytes)
5436 for k, v in izip(auxdbkeys, mybytes.splitlines()):
5438 retval = os.waitpid(mypids[0], 0)[1]
5439 portage.process.spawned_pids.remove(mypids[0])
5440 # If it got a signal, return the signal that was sent, but
5441 # shift in order to distinguish it from a return value. (just
5442 # like portage.process.spawn() would do).
5444 return (retval & 0xff) << 8
5445 # Otherwise, return its exit code.
5448 mysettings["dbkey"] = dbkey
5450 mysettings["dbkey"] = \
5451 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
5453 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
5457 # Validate dependency metadata here to ensure that ebuilds with invalid
5458 # data are never installed via the ebuild command. Don't bother when
5459 # returnpid == True since there's no need to do this every time emerge
5462 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
5463 if rval != os.EX_OK:
5466 if "PORTAGE_TMPDIR" not in mysettings or \
5467 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
5468 writemsg("The directory specified in your " + \
5469 "PORTAGE_TMPDIR variable, '%s',\n" % \
5470 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
5471 writemsg("does not exist. Please create this directory or " + \
5472 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
5475 # as some people use a separate PORTAGE_TMPDIR mount
5476 # we prefer that as the checks below would otherwise be pointless
5478 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
5479 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
5481 checkdir = mysettings["PORTAGE_TMPDIR"]
5483 if not os.access(checkdir, os.W_OK):
5484 writemsg("%s is not writable.\n" % checkdir + \
5485 "Likely cause is that you've mounted it as readonly.\n" \
5489 from tempfile import NamedTemporaryFile
5490 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
5491 os.chmod(fd.name, 0755)
5492 if not os.access(fd.name, os.X_OK):
5493 writemsg("Can not execute files in %s\n" % checkdir + \
5494 "Likely cause is that you've mounted it with one of the\n" + \
5495 "following mount options: 'noexec', 'user', 'users'\n\n" + \
5496 "Please make sure that portage can execute files in this directory.\n" \
5503 if mydo == "unmerge":
5504 return unmerge(mysettings["CATEGORY"],
5505 mysettings["PF"], myroot, mysettings, vartree=vartree)
5507 # Build directory creation isn't required for any of these.
5508 have_build_dirs = False
5509 if not mydo in ("digest", "help", "manifest"):
5510 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
5513 have_build_dirs = True
5515 # emerge handles logging externally
5517 # PORTAGE_LOG_FILE is set by the
5518 # above prepare_build_dirs() call.
5519 logfile = mysettings.get("PORTAGE_LOG_FILE")
5522 env_file = os.path.join(mysettings["T"], "environment")
5526 env_stat = os.stat(env_file)
5528 if e.errno != errno.ENOENT:
5532 saved_env = os.path.join(
5533 os.path.dirname(myebuild), "environment.bz2")
5534 if not os.path.isfile(saved_env):
5538 "bzip2 -dc %s > %s" % \
5539 (_shell_quote(saved_env),
5540 _shell_quote(env_file)))
5542 env_stat = os.stat(env_file)
5544 if e.errno != errno.ENOENT:
5547 if os.WIFEXITED(retval) and \
5548 os.WEXITSTATUS(retval) == os.EX_OK and \
5549 env_stat and env_stat.st_size > 0:
5550 # This is a signal to ebuild.sh, so that it knows to filter
5551 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
5552 # would be preserved between normal phases.
5553 open(env_file + ".raw", "w")
5555 writemsg(("!!! Error extracting saved " + \
5556 "environment: '%s'\n") % \
5557 saved_env, noiselevel=-1)
5561 if e.errno != errno.ENOENT:
5568 for var in ("ARCH", ):
5569 value = mysettings.get(var)
5570 if value and value.strip():
5572 msg = ("%s is not set... " % var) + \
5573 ("Are you missing the '%setc/make.profile' symlink? " % \
5574 mysettings["PORTAGE_CONFIGROOT"]) + \
5575 "Is the symlink correct? " + \
5576 "Is your portage tree complete?"
5577 from portage.elog.messages import eerror
5578 from textwrap import wrap
5579 for line in wrap(msg, 70):
5580 eerror(line, phase="setup", key=mysettings.mycpv)
5581 from portage.elog import elog_process
5582 elog_process(mysettings.mycpv, mysettings)
5584 del env_file, env_stat, saved_env
5585 _doebuild_exit_status_unlink(
5586 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5588 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
5590 # if any of these are being called, handle them -- running them out of
5591 # the sandbox -- and stop now.
5593 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
5594 mysettings, debug=debug, free=1, logfile=logfile)
5595 elif mydo == "setup":
5597 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
5598 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
5599 returnpid=returnpid)
5602 retval = exit_status_check(retval)
5604 """ Privileged phases may have left files that need to be made
5605 writable to a less privileged user."""
5606 apply_recursive_permissions(mysettings["T"],
5607 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
5608 filemode=060, filemask=0)
5610 elif mydo == "preinst":
5611 phase_retval = spawn(
5612 _shell_quote(ebuild_sh_binary) + " " + mydo,
5613 mysettings, debug=debug, free=1, logfile=logfile,
5614 fd_pipes=fd_pipes, returnpid=returnpid)
5619 phase_retval = exit_status_check(phase_retval)
5620 if phase_retval == os.EX_OK:
5621 _doebuild_exit_status_unlink(
5622 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5623 mysettings.pop("EBUILD_PHASE", None)
5624 phase_retval = spawn(
5625 " ".join(_post_pkg_preinst_cmd(mysettings)),
5626 mysettings, debug=debug, free=1, logfile=logfile)
5627 phase_retval = exit_status_check(phase_retval)
5628 if phase_retval != os.EX_OK:
5629 writemsg("!!! post preinst failed; exiting.\n",
5632 elif mydo == "postinst":
5633 phase_retval = spawn(
5634 _shell_quote(ebuild_sh_binary) + " " + mydo,
5635 mysettings, debug=debug, free=1, logfile=logfile,
5636 fd_pipes=fd_pipes, returnpid=returnpid)
5641 phase_retval = exit_status_check(phase_retval)
5642 if phase_retval == os.EX_OK:
5643 _doebuild_exit_status_unlink(
5644 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5645 mysettings.pop("EBUILD_PHASE", None)
5646 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
5647 mysettings, debug=debug, free=1, logfile=logfile)
5648 phase_retval = exit_status_check(phase_retval)
5649 if phase_retval != os.EX_OK:
5650 writemsg("!!! post postinst failed; exiting.\n",
5653 elif mydo in ("prerm", "postrm", "config", "info"):
5655 _shell_quote(ebuild_sh_binary) + " " + mydo,
5656 mysettings, debug=debug, free=1, logfile=logfile,
5657 fd_pipes=fd_pipes, returnpid=returnpid)
5662 retval = exit_status_check(retval)
5665 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
5667 emerge_skip_distfiles = returnpid
5668 # Only try and fetch the files if we are going to need them ...
5669 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
5670 # unpack compile install`, we will try and fetch 4 times :/
5671 need_distfiles = not emerge_skip_distfiles and \
5672 (mydo in ("fetch", "unpack") or \
5673 mydo not in ("digest", "manifest") and "noauto" not in features)
5674 alist = mysettings.configdict["pkg"].get("A")
5675 aalist = mysettings.configdict["pkg"].get("AA")
5676 if need_distfiles or alist is None or aalist is None:
5677 # Make sure we get the correct tree in case there are overlays.
5678 mytree = os.path.realpath(
5679 os.path.dirname(os.path.dirname(mysettings["O"])))
5680 useflags = mysettings["PORTAGE_USE"].split()
5682 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
5684 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
5685 except portage.exception.InvalidDependString, e:
5686 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5687 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
5691 mysettings.configdict["pkg"]["A"] = " ".join(alist)
5692 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
5694 alist = set(alist.split())
5695 aalist = set(aalist.split())
5696 if ("mirror" in features) or fetchall:
5704 # Files are already checked inside fetch(),
5705 # so do not check them again.
5709 if not emerge_skip_distfiles and \
5710 need_distfiles and not fetch(
5711 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
5714 if mydo == "fetch" and listonly:
5718 if mydo == "manifest":
5719 return not digestgen(aalist, mysettings, overwrite=1,
5720 manifestonly=1, myportdb=mydbapi)
5721 elif mydo == "digest":
5722 return not digestgen(aalist, mysettings, overwrite=1,
5724 elif "digest" in mysettings.features:
5725 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
5726 except portage.exception.PermissionDenied, e:
5727 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
5728 if mydo in ("digest", "manifest"):
5731 # See above comment about fetching only when needed
5732 if not emerge_skip_distfiles and \
5733 not digestcheck(checkme, mysettings, "strict" in features):
5739 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
5740 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
5741 orig_distdir = mysettings["DISTDIR"]
5742 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
5743 edpath = mysettings["DISTDIR"] = \
5744 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
5745 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0755)
5747 # Remove any unexpected files or directories.
5748 for x in os.listdir(edpath):
5749 symlink_path = os.path.join(edpath, x)
5750 st = os.lstat(symlink_path)
5751 if x in alist and stat.S_ISLNK(st.st_mode):
5753 if stat.S_ISDIR(st.st_mode):
5754 shutil.rmtree(symlink_path)
5756 os.unlink(symlink_path)
5758 # Check for existing symlinks and recreate if necessary.
5760 symlink_path = os.path.join(edpath, x)
5761 target = os.path.join(orig_distdir, x)
5763 link_target = os.readlink(symlink_path)
5765 os.symlink(target, symlink_path)
5767 if link_target != target:
5768 os.unlink(symlink_path)
5769 os.symlink(target, symlink_path)
5771 #initial dep checks complete; time to process main commands
5773 restrict = mysettings["PORTAGE_RESTRICT"].split()
5774 nosandbox = (("userpriv" in features) and \
5775 ("usersandbox" not in features) and \
5776 "userpriv" not in restrict and \
5777 "nouserpriv" not in restrict)
5778 if nosandbox and ("userpriv" not in features or \
5779 "userpriv" in restrict or \
5780 "nouserpriv" in restrict):
5781 nosandbox = ("sandbox" not in features and \
5782 "usersandbox" not in features)
5784 sesandbox = mysettings.selinux_enabled() and \
5785 "sesandbox" in mysettings.features
5787 droppriv = "userpriv" in mysettings.features and \
5788 "userpriv" not in restrict and \
5791 fakeroot = "fakeroot" in mysettings.features
5793 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
5794 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
5796 # args are for the to spawn function
5798 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
5799 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5800 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5801 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5802 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5803 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5804 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
5805 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5806 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5809 # merge the deps in so we have again a 'full' actionmap
5810 # be glad when this can die.
5812 if len(actionmap_deps.get(x, [])):
5813 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
5815 if mydo in actionmap:
5816 if mydo == "package":
5817 # Make sure the package directory exists before executing
5818 # this phase. This can raise PermissionDenied if
5819 # the current user doesn't have write access to $PKGDIR.
5820 parent_dir = os.path.join(mysettings["PKGDIR"],
5821 mysettings["CATEGORY"])
5822 portage.util.ensure_dirs(parent_dir)
5823 if not os.access(parent_dir, os.W_OK):
5824 raise portage.exception.PermissionDenied(
5825 "access('%s', os.W_OK)" % parent_dir)
5826 retval = spawnebuild(mydo,
5827 actionmap, mysettings, debug, logfile=logfile,
5828 fd_pipes=fd_pipes, returnpid=returnpid)
5829 elif mydo=="qmerge":
5830 # check to ensure install was run. this *only* pops up when users
5831 # forget it and are using ebuild
5832 if not os.path.exists(
5833 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
5834 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
5837 # qmerge is a special phase that implies noclean.
5838 if "noclean" not in mysettings.features:
5839 mysettings.features.append("noclean")
5840 #qmerge is specifically not supposed to do a runtime dep check
5842 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
5843 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
5844 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
5845 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
5847 retval = spawnebuild("install", actionmap, mysettings, debug,
5848 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
5849 returnpid=returnpid)
5850 retval = exit_status_check(retval)
5851 if retval != os.EX_OK:
5852 # The merge phase handles this already. Callers don't know how
5853 # far this function got, so we have to call elog_process() here
5854 # so that it's only called once.
5855 from portage.elog import elog_process
5856 elog_process(mysettings.mycpv, mysettings)
5857 if retval == os.EX_OK:
5858 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
5859 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
5860 "build-info"), myroot, mysettings,
5861 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
5862 vartree=vartree, prev_mtimes=prev_mtimes)
5864 print "!!! Unknown mydo:",mydo
5872 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
5873 shutil.rmtree(tmpdir)
5875 portage.locks.unlockdir(builddir_lock)
5877 # Make sure that DISTDIR is restored to it's normal value before we return!
5878 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
5879 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
5880 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
5884 if os.stat(logfile).st_size == 0:
5889 if mydo in ("digest", "manifest", "help"):
5890 # If necessary, depend phase has been triggered by aux_get calls
5891 # and the exemption is no longer needed.
5892 _doebuild_manifest_exempt_depend -= 1
5894 def _validate_deps(mysettings, myroot, mydo, mydbapi):
5896 invalid_dep_exempt_phases = \
5897 set(["clean", "cleanrm", "help", "prerm", "postrm"])
5898 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
5899 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
5900 other_keys = ["SLOT"]
5901 all_keys = dep_keys + misc_keys + other_keys
5902 metadata = dict(izip(all_keys,
5903 mydbapi.aux_get(mysettings.mycpv, all_keys)))
5905 class FakeTree(object):
5906 def __init__(self, mydb):
5908 dep_check_trees = {myroot:{}}
5909 dep_check_trees[myroot]["porttree"] = \
5910 FakeTree(fakedbapi(settings=mysettings))
5913 for dep_type in dep_keys:
5914 mycheck = dep_check(metadata[dep_type], None, mysettings,
5915 myuse="all", myroot=myroot, trees=dep_check_trees)
5917 msgs.append(" %s: %s\n %s\n" % (
5918 dep_type, metadata[dep_type], mycheck[1]))
5922 portage.dep.use_reduce(
5923 portage.dep.paren_reduce(metadata[k]), matchall=True)
5924 except portage.exception.InvalidDependString, e:
5925 msgs.append(" %s: %s\n %s\n" % (
5926 k, metadata[k], str(e)))
5928 if not metadata["SLOT"]:
5929 msgs.append(" SLOT is undefined\n")
5932 portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \
5933 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
5935 portage.util.writemsg_level(x,
5936 level=logging.ERROR, noiselevel=-1)
5937 if mydo not in invalid_dep_exempt_phases:
5944 def _movefile(src, dest, **kwargs):
5945 """Calls movefile and raises a PortageException if an error occurs."""
5946 if movefile(src, dest, **kwargs) is None:
5947 raise portage.exception.PortageException(
5948 "mv '%s' '%s'" % (src, dest))
5950 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
5951 """moves a file from src to dest, preserving all permissions and attributes; mtime will
5952 be preserved even when moving across filesystems. Returns true on success and false on
5953 failure. Move is atomic."""
5954 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
5956 if mysettings is None:
5958 mysettings = settings
5959 selinux_enabled = mysettings.selinux_enabled()
5964 except SystemExit, e:
5966 except Exception, e:
5967 print "!!! Stating source file failed... movefile()"
5973 dstat=os.lstat(dest)
5974 except (OSError, IOError):
5975 dstat=os.lstat(os.path.dirname(dest))
5979 if destexists and dstat.st_flags != 0:
5980 bsd_chflags.lchflags(dest, 0)
5981 # Use normal stat/chflags for the parent since we want to
5982 # follow any symlinks to the real parent directory.
5983 pflags = os.stat(os.path.dirname(dest)).st_flags
5985 bsd_chflags.chflags(os.path.dirname(dest), 0)
5988 if stat.S_ISLNK(dstat[stat.ST_MODE]):
5992 except SystemExit, e:
5994 except Exception, e:
5997 if stat.S_ISLNK(sstat[stat.ST_MODE]):
5999 target=os.readlink(src)
6000 if mysettings and mysettings["D"]:
6001 if target.find(mysettings["D"])==0:
6002 target=target[len(mysettings["D"]):]
6003 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
6006 sid = selinux.get_lsid(src)
6007 selinux.secure_symlink(target,dest,sid)
6009 os.symlink(target,dest)
6010 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6011 # utime() only works on the target of a symlink, so it's not
6012 # possible to perserve mtime on symlinks.
6013 return os.lstat(dest)[stat.ST_MTIME]
6014 except SystemExit, e:
6016 except Exception, e:
6017 print "!!! failed to properly create symlink:"
6018 print "!!!",dest,"->",target
6023 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
6026 ret=selinux.secure_rename(src,dest)
6028 ret=os.rename(src,dest)
6030 except SystemExit, e:
6032 except Exception, e:
6033 if e[0]!=errno.EXDEV:
6034 # Some random error.
6035 print "!!! Failed to move",src,"to",dest
6038 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
6041 if stat.S_ISREG(sstat[stat.ST_MODE]):
6042 try: # For safety copy then move it over.
6044 selinux.secure_copy(src,dest+"#new")
6045 selinux.secure_rename(dest+"#new",dest)
6047 shutil.copyfile(src,dest+"#new")
6048 os.rename(dest+"#new",dest)
6050 except SystemExit, e:
6052 except Exception, e:
6053 print '!!! copy',src,'->',dest,'failed.'
6057 #we don't yet handle special, so we need to fall back to /bin/mv
6059 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
6061 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
6063 print "!!! Failed to move special file:"
6064 print "!!! '"+src+"' to '"+dest+"'"
6066 return None # failure
6069 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6070 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6072 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6073 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
6075 except SystemExit, e:
6077 except Exception, e:
6078 print "!!! Failed to chown/chmod/unlink in movefile()"
6084 if newmtime is not None:
6085 os.utime(dest, (newmtime, newmtime))
6087 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
6088 newmtime = long(sstat.st_mtime)
6090 # The utime can fail here with EPERM even though the move succeeded.
6091 # Instead of failing, use stat to return the mtime if possible.
6093 newmtime = long(os.stat(dest).st_mtime)
6095 writemsg("!!! Failed to stat in movefile()\n", noiselevel=-1)
6096 writemsg("!!! %s\n" % dest, noiselevel=-1)
6097 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6101 # Restore the flags we saved before moving
6103 bsd_chflags.chflags(os.path.dirname(dest), pflags)
6107 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
6108 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
6110 if not os.access(myroot, os.W_OK):
6111 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
6114 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
6115 vartree=vartree, blockers=blockers, scheduler=scheduler)
6116 return mylink.merge(pkgloc, infloc, myroot, myebuild,
6117 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6119 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
6120 ldpath_mtimes=None, scheduler=None):
6121 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
6122 vartree=vartree, scheduler=scheduler)
6123 vartree = mylink.vartree
6127 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
6128 ldpath_mtimes=ldpath_mtimes)
6129 if retval == os.EX_OK:
6136 def getCPFromCPV(mycpv):
6137 """Calls pkgsplit on a cpv and returns only the cp."""
6138 return pkgsplit(mycpv)[0]
6140 def dep_virtual(mysplit, mysettings):
6141 "Does virtual dependency conversion"
6143 myvirtuals = mysettings.getvirtuals()
6145 if isinstance(x, list):
6146 newsplit.append(dep_virtual(x, mysettings))
6149 mychoices = myvirtuals.get(mykey, None)
6151 if len(mychoices) == 1:
6152 a = x.replace(mykey, mychoices[0])
6155 # blocker needs "and" not "or(||)".
6160 a.append(x.replace(mykey, y))
6166 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
6167 trees=None, use_mask=None, use_force=None, **kwargs):
6168 """Recursively expand new-style virtuals so as to collapse one or more
6169 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
6170 zero cost regardless of whether or not they are currently installed. Virtual
6171 blockers are supported but only when the virtual expands to a single
6172 atom because it wouldn't necessarily make sense to block all the components
6173 of a compound virtual. When more than one new-style virtual is matched,
6174 the matches are sorted from highest to lowest versions and the atom is
6175 expanded to || ( highest match ... lowest match )."""
6177 # According to GLEP 37, RDEPEND is the only dependency type that is valid
6178 # for new-style virtuals. Repoman should enforce this.
6179 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
6180 portdb = trees[myroot]["porttree"].dbapi
6181 repoman = isinstance(mydbapi, portdbapi)
6182 if kwargs["use_binaries"]:
6183 portdb = trees[myroot]["bintree"].dbapi
6184 myvirtuals = mysettings.getvirtuals()
6185 myuse = kwargs["myuse"]
6190 elif isinstance(x, list):
6191 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
6192 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
6193 use_force=use_force, **kwargs))
6196 if not isinstance(x, portage.dep.Atom):
6198 x = portage.dep.Atom(x)
6199 except portage.exception.InvalidAtom:
6200 if portage.dep._dep_check_strict:
6201 raise portage.exception.ParseError(
6202 "invalid atom: '%s'" % x)
6204 if repoman and x.use and x.use.conditional:
6205 evaluated_atom = portage.dep.remove_slot(x)
6207 evaluated_atom += ":%s" % x.slot
6208 evaluated_atom += str(x.use._eval_qa_conditionals(
6209 use_mask, use_force))
6210 x = portage.dep.Atom(evaluated_atom)
6212 if not repoman and \
6213 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
6214 if x.use.conditional:
6215 evaluated_atom = portage.dep.remove_slot(x)
6217 evaluated_atom += ":%s" % x.slot
6218 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
6219 x = portage.dep.Atom(evaluated_atom)
6221 mykey = dep_getkey(x)
6222 if not mykey.startswith("virtual/"):
6225 mychoices = myvirtuals.get(mykey, [])
6226 isblocker = x.startswith("!")
6228 # Virtual blockers are no longer expanded here since
6229 # the un-expanded virtual atom is more useful for
6230 # maintaining a cache of blocker atoms.
6237 matches = portdb.match(match_atom)
6238 # Use descending order to prefer higher versions.
6241 # only use new-style matches
6242 if cpv.startswith("virtual/"):
6243 pkgs.append((cpv, catpkgsplit(cpv)[1:], portdb))
6244 if not (pkgs or mychoices):
6245 # This one couldn't be expanded as a new-style virtual. Old-style
6246 # virtuals have already been expanded by dep_virtual, so this one
6247 # is unavailable and dep_zapdeps will identify it as such. The
6248 # atom is not eliminated here since it may still represent a
6249 # dependency that needs to be satisfied.
6252 if not pkgs and len(mychoices) == 1:
6253 newsplit.append(x.replace(mykey, mychoices[0]))
6260 cpv, pv_split, db = y
6261 depstring = " ".join(db.aux_get(cpv, dep_keys))
6262 pkg_kwargs = kwargs.copy()
6263 if isinstance(db, portdbapi):
6268 use_split = db.aux_get(cpv, ["USE"])[0].split()
6269 pkg_kwargs["myuse"] = use_split
6271 print "Virtual Parent: ", y[0]
6272 print "Virtual Depstring:", depstring
6273 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
6274 trees=trees, **pkg_kwargs)
6276 raise portage.exception.ParseError(
6277 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
6279 virtual_atoms = [atom for atom in mycheck[1] \
6280 if not atom.startswith("!")]
6281 if len(virtual_atoms) == 1:
6282 # It wouldn't make sense to block all the components of a
6283 # compound virtual, so only a single atom block is allowed.
6284 a.append("!" + virtual_atoms[0])
6286 mycheck[1].append("="+y[0]) # pull in the new-style virtual
6287 a.append(mycheck[1])
6288 # Plain old-style virtuals. New-style virtuals are preferred.
6290 a.append(x.replace(mykey, y))
6291 if isblocker and not a:
6292 # Probably a compound virtual. Pass the atom through unprocessed.
6298 def dep_eval(deplist):
6301 if deplist[0]=="||":
6302 #or list; we just need one "1"
6303 for x in deplist[1:]:
6304 if isinstance(x, list):
6309 #XXX: unless there's no available atoms in the list
6310 #in which case we need to assume that everything is
6311 #okay as some ebuilds are relying on an old bug.
6312 if len(deplist) == 1:
6317 if isinstance(x, list):
6324 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
6325 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
6326 Returned deplist contains steps that must be taken to satisfy dependencies."""
6330 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
6331 if not reduced or unreduced == ["||"] or dep_eval(reduced):
6334 if unreduced[0] != "||":
6336 for dep, satisfied in izip(unreduced, reduced):
6337 if isinstance(dep, list):
6338 unresolved += dep_zapdeps(dep, satisfied, myroot,
6339 use_binaries=use_binaries, trees=trees)
6341 unresolved.append(dep)
6344 # We're at a ( || atom ... ) type level and need to make a choice
6345 deps = unreduced[1:]
6346 satisfieds = reduced[1:]
6348 # Our preference order is for an the first item that:
6349 # a) contains all unmasked packages with the same key as installed packages
6350 # b) contains all unmasked packages
6351 # c) contains masked installed packages
6352 # d) is the first item
6355 preferred_not_installed = []
6356 preferred_any_slot = []
6357 possible_upgrades = []
6360 # Alias the trees we'll be checking availability against
6361 parent = trees[myroot].get("parent")
6362 graph_db = trees[myroot].get("graph_db")
6364 if "vartree" in trees[myroot]:
6365 vardb = trees[myroot]["vartree"].dbapi
6367 mydbapi = trees[myroot]["bintree"].dbapi
6369 mydbapi = trees[myroot]["porttree"].dbapi
6371 # Sort the deps into preferred (installed) and other
6372 # with values of [[required_atom], availablility]
6373 for dep, satisfied in izip(deps, satisfieds):
6374 if isinstance(dep, list):
6375 atoms = dep_zapdeps(dep, satisfied, myroot,
6376 use_binaries=use_binaries, trees=trees)
6382 other.append((atoms, None, False))
6385 all_available = True
6390 avail_pkg = mydbapi.match(atom)
6392 avail_pkg = avail_pkg[-1] # highest (ascending order)
6393 avail_slot = "%s:%s" % (dep_getkey(atom),
6394 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
6396 all_available = False
6399 versions[avail_slot] = avail_pkg
6401 this_choice = (atoms, versions, all_available)
6403 # The "all installed" criterion is not version or slot specific.
6404 # If any version of a package is installed then we assume that it
6405 # is preferred over other possible packages choices.
6406 all_installed = True
6407 for atom in set([dep_getkey(atom) for atom in atoms \
6408 if atom[:1] != "!"]):
6409 # New-style virtuals have zero cost to install.
6410 if not vardb.match(atom) and not atom.startswith("virtual/"):
6411 all_installed = False
6413 all_installed_slots = False
6415 all_installed_slots = True
6416 for slot_atom in versions:
6417 # New-style virtuals have zero cost to install.
6418 if not vardb.match(slot_atom) and \
6419 not slot_atom.startswith("virtual/"):
6420 all_installed_slots = False
6423 if all_installed_slots:
6424 preferred.append(this_choice)
6426 preferred_any_slot.append(this_choice)
6427 elif graph_db is None:
6428 possible_upgrades.append(this_choice)
6431 for slot_atom in versions:
6432 # New-style virtuals have zero cost to install.
6433 if not graph_db.match(slot_atom) and \
6434 not slot_atom.startswith("virtual/"):
6435 all_in_graph = False
6439 preferred_not_installed.append(this_choice)
6441 # Check if the atom would result in a direct circular
6442 # dependency and try to avoid that if it seems likely
6443 # to be unresolvable.
6444 cpv_slot_list = [parent]
6445 circular_atom = None
6449 if vardb.match(atom):
6450 # If the atom is satisfied by an installed
6451 # version then it's not a circular dep.
6453 if dep_getkey(atom) != parent.cp:
6455 if match_from_list(atom, cpv_slot_list):
6456 circular_atom = atom
6458 if circular_atom is None:
6459 preferred_not_installed.append(this_choice)
6461 other.append(this_choice)
6463 possible_upgrades.append(this_choice)
6465 other.append(this_choice)
6467 # Compare the "all_installed" choices against the "all_available" choices
6468 # for possible missed upgrades. The main purpose of this code is to find
6469 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
6470 # into || ( highest version ... lowest version ). We want to prefer the
6471 # highest all_available version of the new-style virtual when there is a
6472 # lower all_installed version.
6473 preferred.extend(preferred_not_installed)
6474 preferred.extend(preferred_any_slot)
6475 preferred.extend(possible_upgrades)
6476 possible_upgrades = preferred[1:]
6477 for possible_upgrade in possible_upgrades:
6478 atoms, versions, all_available = possible_upgrade
6479 myslots = set(versions)
6480 for other_choice in preferred:
6481 if possible_upgrade is other_choice:
6482 # possible_upgrade will not be promoted, so move on
6484 o_atoms, o_versions, o_all_available = other_choice
6485 intersecting_slots = myslots.intersection(o_versions)
6486 if not intersecting_slots:
6489 has_downgrade = False
6490 for myslot in intersecting_slots:
6491 myversion = versions[myslot]
6492 o_version = o_versions[myslot]
6493 difference = pkgcmp(catpkgsplit(myversion)[1:],
6494 catpkgsplit(o_version)[1:])
6499 has_downgrade = True
6501 if has_upgrade and not has_downgrade:
6502 preferred.remove(possible_upgrade)
6503 o_index = preferred.index(other_choice)
6504 preferred.insert(o_index, possible_upgrade)
6507 # preferred now contains a) and c) from the order above with
6508 # the masked flag differentiating the two. other contains b)
6509 # and d) so adding other to preferred will give us a suitable
6510 # list to iterate over.
6511 preferred.extend(other)
6513 for allow_masked in (False, True):
6514 for atoms, versions, all_available in preferred:
6515 if all_available or allow_masked:
6518 assert(False) # This point should not be reachable
6521 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
6527 mydep = dep_getcpv(orig_dep)
6528 myindex = orig_dep.index(mydep)
6529 prefix = orig_dep[:myindex]
6530 postfix = orig_dep[myindex+len(mydep):]
6531 expanded = cpv_expand(mydep, mydb=mydb,
6532 use_cache=use_cache, settings=settings)
6534 return portage.dep.Atom(prefix + expanded + postfix)
6535 except portage.exception.InvalidAtom:
6536 # Missing '=' prefix is allowed for backward compatibility.
6537 if not isvalidatom("=" + prefix + expanded + postfix):
6539 return portage.dep.Atom("=" + prefix + expanded + postfix)
6541 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
6542 use_cache=1, use_binaries=0, myroot="/", trees=None):
6543 """Takes a depend string and parses the condition."""
6544 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
6545 #check_config_instance(mysettings)
6547 trees = globals()["db"]
6551 myusesplit = mysettings["PORTAGE_USE"].split()
6554 # We've been given useflags to use.
6555 #print "USE FLAGS PASSED IN."
6557 #if "bindist" in myusesplit:
6558 # print "BINDIST is set!"
6560 # print "BINDIST NOT set."
6562 #we are being run by autouse(), don't consult USE vars yet.
6563 # WE ALSO CANNOT USE SETTINGS
6566 #convert parenthesis to sublists
6568 mysplit = portage.dep.paren_reduce(depstring)
6569 except portage.exception.InvalidDependString, e:
6574 useforce.add(mysettings["ARCH"])
6576 # This masking/forcing is only for repoman. In other cases, relevant
6577 # masking/forcing should have already been applied via
6578 # config.regenerate(). Also, binary or installed packages may have
6579 # been built with flags that are now masked, and it would be
6580 # inconsistent to mask them now. Additionally, myuse may consist of
6581 # flags from a parent package that is being merged to a $ROOT that is
6582 # different from the one that mysettings represents.
6583 mymasks.update(mysettings.usemask)
6584 mymasks.update(mysettings.archlist())
6585 mymasks.discard(mysettings["ARCH"])
6586 useforce.update(mysettings.useforce)
6587 useforce.difference_update(mymasks)
6589 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
6590 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
6591 except portage.exception.InvalidDependString, e:
6594 # Do the || conversions
6595 mysplit=portage.dep.dep_opconvert(mysplit)
6598 #dependencies were reduced to nothing
6601 # Recursively expand new-style virtuals so as to
6602 # collapse one or more levels of indirection.
6604 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
6605 use=use, mode=mode, myuse=myuse,
6606 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
6607 use_binaries=use_binaries, myroot=myroot, trees=trees)
6608 except portage.exception.ParseError, e:
6612 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
6613 if mysplit2 is None:
6614 return [0,"Invalid token"]
6616 writemsg("\n\n\n", 1)
6617 writemsg("mysplit: %s\n" % (mysplit), 1)
6618 writemsg("mysplit2: %s\n" % (mysplit2), 1)
6621 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
6622 use_binaries=use_binaries, trees=trees)
6623 except portage.exception.InvalidAtom, e:
6624 if portage.dep._dep_check_strict:
6625 raise # This shouldn't happen.
6626 # dbapi.match() failed due to an invalid atom in
6627 # the dependencies of an installed package.
6628 return [0, "Invalid atom: '%s'" % (e,)]
6630 mylist = flatten(myzaps)
6631 writemsg("myzaps: %s\n" % (myzaps), 1)
6632 writemsg("mylist: %s\n" % (mylist), 1)
6637 writemsg("mydict: %s\n" % (mydict), 1)
6638 return [1,mydict.keys()]
6640 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
6641 "Reduces the deplist to ones and zeros"
6642 deplist=mydeplist[:]
6643 for mypos, token in enumerate(deplist):
6644 if isinstance(deplist[mypos], list):
6646 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
6647 elif deplist[mypos]=="||":
6649 elif token[:1] == "!":
6650 deplist[mypos] = False
6652 mykey = dep_getkey(deplist[mypos])
6653 if mysettings and mykey in mysettings.pprovideddict and \
6654 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
6656 elif mydbapi is None:
6657 # Assume nothing is satisfied. This forces dep_zapdeps to
6658 # return all of deps the deps that have been selected
6659 # (excluding those satisfied by package.provided).
6660 deplist[mypos] = False
6663 x = mydbapi.xmatch(mode, deplist[mypos])
6664 if mode.startswith("minimum-"):
6671 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
6674 if deplist[mypos][0]=="!":
6678 #encountered invalid string
6682 def cpv_getkey(mycpv):
6683 myslash=mycpv.split("/")
6684 mysplit=pkgsplit(myslash[-1])
6687 return myslash[0]+"/"+mysplit[0]
6693 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
6694 mysplit=mykey.split("/")
6695 if settings is None:
6696 settings = globals()["settings"]
6697 virts = settings.getvirtuals("/")
6698 virts_p = settings.get_virts_p("/")
6700 if hasattr(mydb, "cp_list"):
6701 for x in mydb.categories:
6702 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
6704 if mykey in virts_p:
6705 return(virts_p[mykey][0])
6706 return "null/"+mykey
6708 if hasattr(mydb, "cp_list"):
6709 if not mydb.cp_list(mykey, use_cache=use_cache) and \
6710 virts and mykey in virts:
6711 return virts[mykey][0]
6714 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
6715 """Given a string (packagename or virtual) expand it into a valid
6716 cat/package string. Virtuals use the mydb to determine which provided
6717 virtual is a valid choice and defaults to the first element when there
6718 are no installed/available candidates."""
6719 myslash=mycpv.split("/")
6720 mysplit=pkgsplit(myslash[-1])
6721 if settings is None:
6722 settings = globals()["settings"]
6723 virts = settings.getvirtuals("/")
6724 virts_p = settings.get_virts_p("/")
6726 # this is illegal case.
6729 elif len(myslash)==2:
6731 mykey=myslash[0]+"/"+mysplit[0]
6734 if mydb and virts and mykey in virts:
6735 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
6736 if hasattr(mydb, "cp_list"):
6737 if not mydb.cp_list(mykey, use_cache=use_cache):
6738 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
6739 mykey_orig = mykey[:]
6740 for vkey in virts[mykey]:
6741 # The virtuals file can contain a versioned atom, so
6742 # it may be necessary to remove the operator and
6743 # version from the atom before it is passed into
6745 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
6747 writemsg("virts chosen: %s\n" % (mykey), 1)
6749 if mykey == mykey_orig:
6750 mykey=virts[mykey][0]
6751 writemsg("virts defaulted: %s\n" % (mykey), 1)
6752 #we only perform virtual expansion if we are passed a dbapi
6754 #specific cpv, no category, ie. "foo-1.0"
6762 if mydb and hasattr(mydb, "categories"):
6763 for x in mydb.categories:
6764 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
6765 matches.append(x+"/"+myp)
6766 if len(matches) > 1:
6767 virtual_name_collision = False
6768 if len(matches) == 2:
6770 if not x.startswith("virtual/"):
6771 # Assume that the non-virtual is desired. This helps
6772 # avoid the ValueError for invalid deps that come from
6773 # installed packages (during reverse blocker detection,
6777 virtual_name_collision = True
6778 if not virtual_name_collision:
6779 # AmbiguousPackageName inherits from ValueError,
6780 # for backward compatibility with calling code
6781 # that already handles ValueError.
6782 raise portage.exception.AmbiguousPackageName(matches)
6786 if not mykey and not isinstance(mydb, list):
6788 mykey=virts_p[myp][0]
6789 #again, we only perform virtual expansion if we have a dbapi (not a list)
6793 if mysplit[2]=="r0":
6794 return mykey+"-"+mysplit[1]
6796 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
6800 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
6801 from portage.util import grablines
6802 if settings is None:
6803 settings = globals()["settings"]
6805 portdb = globals()["portdb"]
6806 mysplit = catpkgsplit(mycpv)
6808 raise ValueError("invalid CPV: %s" % mycpv)
6809 if metadata is None:
6810 db_keys = list(portdb._aux_cache_keys)
6812 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6814 if not portdb.cpv_exists(mycpv):
6816 if metadata is None:
6817 # Can't access SLOT due to corruption.
6818 cpv_slot_list = [mycpv]
6820 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6821 mycp=mysplit[0]+"/"+mysplit[1]
6823 # XXX- This is a temporary duplicate of code from the config constructor.
6824 locations = [os.path.join(settings["PORTDIR"], "profiles")]
6825 locations.extend(settings.profiles)
6826 for ov in settings["PORTDIR_OVERLAY"].split():
6827 profdir = os.path.join(normalize_path(ov), "profiles")
6828 if os.path.isdir(profdir):
6829 locations.append(profdir)
6830 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
6831 USER_CONFIG_PATH.lstrip(os.path.sep)))
6833 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
6835 if mycp in settings.pmaskdict:
6836 for x in settings.pmaskdict[mycp]:
6837 if match_from_list(x, cpv_slot_list):
6841 for pmask in pmasklists:
6842 pmask_filename = os.path.join(pmask[0], "package.mask")
6843 for i in xrange(len(pmask[1])):
6844 l = pmask[1][i].strip()
6850 comment_valid = i + 1
6852 if comment_valid != i:
6855 return (comment, pmask_filename)
6858 elif comment_valid != -1:
6859 # Apparently this comment applies to muliple masks, so
6860 # it remains valid until a blank line is encountered.
6867 def getmaskingstatus(mycpv, settings=None, portdb=None):
6868 if settings is None:
6869 settings = config(clone=globals()["settings"])
6871 portdb = globals()["portdb"]
6875 if not isinstance(mycpv, basestring):
6876 # emerge passed in a Package instance
6879 metadata = pkg.metadata
6880 installed = pkg.installed
6882 mysplit = catpkgsplit(mycpv)
6884 raise ValueError("invalid CPV: %s" % mycpv)
6885 if metadata is None:
6886 db_keys = list(portdb._aux_cache_keys)
6888 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6890 if not portdb.cpv_exists(mycpv):
6892 return ["corruption"]
6893 if "?" in metadata["LICENSE"]:
6894 settings.setcpv(mycpv, mydb=metadata)
6895 metadata["USE"] = settings["PORTAGE_USE"]
6897 metadata["USE"] = ""
6898 mycp=mysplit[0]+"/"+mysplit[1]
6903 if settings._getProfileMaskAtom(mycpv, metadata):
6904 rValue.append("profile")
6906 # package.mask checking
6907 if settings._getMaskAtom(mycpv, metadata):
6908 rValue.append("package.mask")
6911 eapi = metadata["EAPI"]
6912 mygroups = metadata["KEYWORDS"]
6913 licenses = metadata["LICENSE"]
6914 slot = metadata["SLOT"]
6915 if eapi.startswith("-"):
6917 if not eapi_is_supported(eapi):
6918 return ["EAPI %s" % eapi]
6919 elif _eapi_is_deprecated(eapi) and not installed:
6920 return ["EAPI %s" % eapi]
6921 egroups = settings.configdict["backupenv"].get(
6922 "ACCEPT_KEYWORDS", "").split()
6923 mygroups = mygroups.split()
6924 pgroups = settings["ACCEPT_KEYWORDS"].split()
6925 myarch = settings["ARCH"]
6926 if pgroups and myarch not in pgroups:
6927 """For operating systems other than Linux, ARCH is not necessarily a
6929 myarch = pgroups[0].lstrip("~")
6931 cp = dep_getkey(mycpv)
6932 pkgdict = settings.pkeywordsdict.get(cp)
6935 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6936 for atom, pkgkeywords in pkgdict.iteritems():
6937 if match_from_list(atom, cpv_slot_list):
6939 pgroups.extend(pkgkeywords)
6940 if matches or egroups:
6941 pgroups.extend(egroups)
6944 if x.startswith("-"):
6948 inc_pgroups.discard(x[1:])
6951 pgroups = inc_pgroups
6956 for keyword in pgroups:
6957 if keyword in mygroups:
6966 elif gp=="-"+myarch and myarch in pgroups:
6969 elif gp=="~"+myarch and myarch in pgroups:
6974 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
6975 if missing_licenses:
6976 allowed_tokens = set(["||", "(", ")"])
6977 allowed_tokens.update(missing_licenses)
6978 license_split = licenses.split()
6979 license_split = [x for x in license_split \
6980 if x in allowed_tokens]
6981 msg = license_split[:]
6982 msg.append("license(s)")
6983 rValue.append(" ".join(msg))
6984 except portage.exception.InvalidDependString, e:
6985 rValue.append("LICENSE: "+str(e))
6987 # Only show KEYWORDS masks for installed packages
6988 # if they're not masked for any other reason.
6989 if kmask and (not installed or not rValue):
6990 rValue.append(kmask+" keyword")
6996 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
6997 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
6998 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
6999 'PDEPEND', 'PROVIDE', 'EAPI',
7000 'PROPERTIES', 'UNUSED_06', 'UNUSED_05', 'UNUSED_04',
7001 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
7003 auxdbkeylen=len(auxdbkeys)
7005 from portage.dbapi import dbapi
7006 from portage.dbapi.virtual import fakedbapi
7007 from portage.dbapi.bintree import bindbapi, binarytree
7008 from portage.dbapi.vartree import vardbapi, vartree, dblink
7009 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
7011 class FetchlistDict(UserDict.DictMixin):
7012 """This provide a mapping interface to retrieve fetch lists. It's used
7013 to allow portage.manifest.Manifest to access fetch lists via a standard
7014 mapping interface rather than use the dbapi directly."""
7015 def __init__(self, pkgdir, settings, mydbapi):
7016 """pkgdir is a directory containing ebuilds and settings is passed into
7017 portdbapi.getfetchlist for __getitem__ calls."""
7018 self.pkgdir = pkgdir
7019 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7020 self.settings = settings
7021 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7022 self.portdb = mydbapi
7023 def __getitem__(self, pkg_key):
7024 """Returns the complete fetch list for a given package."""
7025 return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
7026 def __contains__(self, cpv):
7027 return cpv in self.keys()
7028 def has_key(self, pkg_key):
7029 """Returns true if the given package exists within pkgdir."""
7030 return pkg_key in self
7032 """Returns keys for all packages within pkgdir"""
7033 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7035 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
7036 vartree=None, prev_mtimes=None, blockers=None):
7037 """will merge a .tbz2 file, returning a list of runtime dependencies
7038 that must be satisfied, or None if there was a merge error. This
7039 code assumes the package exists."""
7042 mydbapi = db[myroot]["bintree"].dbapi
7044 vartree = db[myroot]["vartree"]
7045 if mytbz2[-5:]!=".tbz2":
7046 print "!!! Not a .tbz2 file"
7052 did_merge_phase = False
7055 """ Don't lock the tbz2 file because the filesytem could be readonly or
7056 shared by a cluster."""
7057 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
7059 mypkg = os.path.basename(mytbz2)[:-5]
7060 xptbz2 = portage.xpak.tbz2(mytbz2)
7061 mycat = xptbz2.getfile("CATEGORY")
7063 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7066 mycat = mycat.strip()
7068 # These are the same directories that would be used at build time.
7069 builddir = os.path.join(
7070 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7071 catdir = os.path.dirname(builddir)
7072 pkgloc = os.path.join(builddir, "image")
7073 infloc = os.path.join(builddir, "build-info")
7074 myebuild = os.path.join(
7075 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7076 portage.util.ensure_dirs(os.path.dirname(catdir),
7077 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7078 catdir_lock = portage.locks.lockdir(catdir)
7079 portage.util.ensure_dirs(catdir,
7080 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7082 shutil.rmtree(builddir)
7083 except (IOError, OSError), e:
7084 if e.errno != errno.ENOENT:
7087 for mydir in (builddir, pkgloc, infloc):
7088 portage.util.ensure_dirs(mydir, uid=portage_uid,
7089 gid=portage_gid, mode=0755)
7090 writemsg_stdout(">>> Extracting info\n")
7091 xptbz2.unpackinfo(infloc)
7092 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
7093 # Store the md5sum in the vdb.
7094 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7095 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
7098 # This gives bashrc users an opportunity to do various things
7099 # such as remove binary packages after they're installed.
7100 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
7101 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
7102 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7104 # Eventually we'd like to pass in the saved ebuild env here.
7105 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7106 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7107 if retval != os.EX_OK:
7108 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7111 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7112 retval = portage.process.spawn_bash(
7113 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7114 env=mysettings.environ())
7115 if retval != os.EX_OK:
7116 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7118 #portage.locks.unlockfile(tbz2_lock)
7121 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7122 treetype="bintree", blockers=blockers)
7123 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7124 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7125 did_merge_phase = True
7126 success = retval == os.EX_OK
7129 mysettings.pop("PORTAGE_BINPKG_FILE", None)
7131 portage.locks.unlockfile(tbz2_lock)
7133 if not did_merge_phase:
7134 # The merge phase handles this already. Callers don't know how
7135 # far this function got, so we have to call elog_process() here
7136 # so that it's only called once.
7137 from portage.elog import elog_process
7138 elog_process(mycat + "/" + mypkg, mysettings)
7141 shutil.rmtree(builddir)
7142 except (IOError, OSError), e:
7143 if e.errno != errno.ENOENT:
7147 def deprecated_profile_check():
7148 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7150 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7151 dcontent = deprecatedfile.readlines()
7152 deprecatedfile.close()
7153 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7156 writemsg(red("!!! Please refer to the Gentoo Upgrading Guide.\n"),
7159 newprofile = dcontent[0]
7160 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7162 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7163 if len(dcontent) > 1:
7164 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7165 for myline in dcontent[1:]:
7166 writemsg(myline, noiselevel=-1)
7167 writemsg("\n\n", noiselevel=-1)
7170 # gets virtual package settings
7171 def getvirtuals(myroot):
7173 writemsg("--- DEPRECATED call to getvirtual\n")
7174 return settings.getvirtuals(myroot)
7176 def commit_mtimedb(mydict=None, filename=None):
7179 if "mtimedb" not in globals() or mtimedb is None:
7183 if filename is None:
7185 filename = mtimedbfile
7186 mydict["version"] = VERSION
7187 d = {} # for full backward compat, pickle it as a plain dict object.
7190 f = atomic_ofstream(filename)
7191 pickle.dump(d, f, -1)
7193 portage.util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7194 except (IOError, OSError), e:
7198 global uid,portage_gid,portdb,db
7199 if secpass and os.environ.get("SANDBOX_ON") != "1":
7200 close_portdbapi_caches()
7203 atexit_register(portageexit)
7205 def _global_updates(trees, prev_mtimes):
7207 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7209 @param trees: A dictionary containing portage trees.
7211 @param prev_mtimes: A dictionary containing mtimes of files located in
7212 $PORTDIR/profiles/updates/.
7213 @type prev_mtimes: dict
7214 @rtype: None or List
7215 @return: None if no were no updates, otherwise a list of update commands
7216 that have been performed.
7218 # only do this if we're root and not running repoman/ebuild digest
7220 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7223 mysettings = trees["/"]["vartree"].settings
7224 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7227 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7228 update_data = grab_updates(updpath)
7230 update_data = grab_updates(updpath, prev_mtimes)
7231 except portage.exception.DirectoryNotFound:
7232 writemsg("--- 'profiles/updates' is empty or " + \
7233 "not available. Empty portage tree?\n", noiselevel=1)
7236 if len(update_data) > 0:
7237 do_upgrade_packagesmessage = 0
7240 for mykey, mystat, mycontent in update_data:
7241 writemsg_stdout("\n\n")
7242 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
7243 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7244 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("%")+"='binary move' "+bold("S")+"='binary SLOT move'\n"+bold("p")+"='update /etc/portage/package.*'\n")
7245 valid_updates, errors = parse_updates(mycontent)
7246 myupd.extend(valid_updates)
7247 writemsg_stdout(len(valid_updates) * "." + "\n")
7248 if len(errors) == 0:
7249 # Update our internal mtime since we
7250 # processed all of our directives.
7251 timestamps[mykey] = long(mystat.st_mtime)
7254 writemsg("%s\n" % msg, noiselevel=-1)
7256 world_file = os.path.join(root, WORLD_FILE)
7257 world_list = grabfile(world_file)
7258 world_modified = False
7259 for update_cmd in myupd:
7260 for pos, atom in enumerate(world_list):
7261 new_atom = update_dbentry(update_cmd, atom)
7262 if atom != new_atom:
7263 world_list[pos] = new_atom
7264 world_modified = True
7267 write_atomic(world_file,
7268 "".join("%s\n" % (x,) for x in world_list))
7270 update_config_files("/",
7271 mysettings.get("CONFIG_PROTECT","").split(),
7272 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7275 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7276 settings=mysettings)
7277 vardb = trees["/"]["vartree"].dbapi
7278 bindb = trees["/"]["bintree"].dbapi
7279 if not os.access(bindb.bintree.pkgdir, os.W_OK):
7281 for update_cmd in myupd:
7282 if update_cmd[0] == "move":
7283 moves = vardb.move_ent(update_cmd)
7285 writemsg_stdout(moves * "@")
7287 moves = bindb.move_ent(update_cmd)
7289 writemsg_stdout(moves * "%")
7290 elif update_cmd[0] == "slotmove":
7291 moves = vardb.move_slot_ent(update_cmd)
7293 writemsg_stdout(moves * "s")
7295 moves = bindb.move_slot_ent(update_cmd)
7297 writemsg_stdout(moves * "S")
7299 # The above global updates proceed quickly, so they
7300 # are considered a single mtimedb transaction.
7301 if len(timestamps) > 0:
7302 # We do not update the mtime in the mtimedb
7303 # until after _all_ of the above updates have
7304 # been processed because the mtimedb will
7305 # automatically commit when killed by ctrl C.
7306 for mykey, mtime in timestamps.iteritems():
7307 prev_mtimes[mykey] = mtime
7309 # We gotta do the brute force updates for these now.
7310 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7311 "fixpackages" in mysettings.features:
7312 def onProgress(maxval, curval):
7313 writemsg_stdout("*")
7314 vardb.update_ents(myupd, onProgress=onProgress)
7316 bindb.update_ents(myupd, onProgress=onProgress)
7318 do_upgrade_packagesmessage = 1
7320 # Update progress above is indicated by characters written to stdout so
7321 # we print a couple new lines here to separate the progress output from
7326 if do_upgrade_packagesmessage and bindb and \
7328 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7329 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7330 writemsg_stdout("\n")
7334 #continue setting up other trees
7336 class MtimeDB(dict):
7337 def __init__(self, filename):
7339 self.filename = filename
7340 self._load(filename)
7342 def _load(self, filename):
7345 mypickle = pickle.Unpickler(f)
7346 mypickle.find_global = None
7350 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
7351 if isinstance(e, pickle.UnpicklingError):
7352 writemsg("!!! Error loading '%s': %s\n" % \
7353 (filename, str(e)), noiselevel=-1)
7358 d["updates"] = d["old"]
7363 d.setdefault("starttime", 0)
7364 d.setdefault("version", "")
7365 for k in ("info", "ldpath", "updates"):
7368 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7369 "starttime", "updates", "version"))
7372 if k not in mtimedbkeys:
7373 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7376 self._clean_data = copy.deepcopy(d)
7379 if not self.filename:
7383 # Only commit if the internal state has changed.
7384 if d != self._clean_data:
7385 commit_mtimedb(mydict=d, filename=self.filename)
7386 self._clean_data = copy.deepcopy(d)
7388 def create_trees(config_root=None, target_root=None, trees=None):
7392 # clean up any existing portdbapi instances
7393 for myroot in trees:
7394 portdb = trees[myroot]["porttree"].dbapi
7395 portdb.close_caches()
7396 portdbapi.portdbapi_instances.remove(portdb)
7397 del trees[myroot]["porttree"], myroot, portdb
7399 settings = config(config_root=config_root, target_root=target_root,
7400 config_incrementals=portage.const.INCREMENTALS)
7403 myroots = [(settings["ROOT"], settings)]
7404 if settings["ROOT"] != "/":
7405 settings = config(config_root=None, target_root="/",
7406 config_incrementals=portage.const.INCREMENTALS)
7407 # When ROOT != "/" we only want overrides from the calling
7408 # environment to apply to the config that's associated
7409 # with ROOT != "/", so we wipe out the "backupenv" for the
7410 # config that is associated with ROOT == "/" and regenerate
7411 # it's incrementals.
7412 # Preserve backupenv values that are initialized in the config
7413 # constructor. Also, preserve XARGS since it is set by the
7414 # portage.data module.
7416 backupenv_whitelist = settings._environ_whitelist
7417 backupenv = settings.configdict["backupenv"]
7418 env_d = settings.configdict["env.d"]
7419 for k, v in os.environ.iteritems():
7420 if k in backupenv_whitelist:
7423 v == backupenv.get(k):
7424 backupenv.pop(k, None)
7425 settings.regenerate()
7427 myroots.append((settings["ROOT"], settings))
7429 for myroot, mysettings in myroots:
7430 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, None))
7431 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
7432 trees[myroot].addLazySingleton(
7433 "vartree", vartree, myroot, categories=mysettings.categories,
7434 settings=mysettings)
7435 trees[myroot].addLazySingleton("porttree",
7436 portagetree, myroot, settings=mysettings)
7437 trees[myroot].addLazySingleton("bintree",
7438 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
7441 class _LegacyGlobalProxy(portage.util.ObjectProxy):
7443 Instances of these serve as proxies to global variables
7444 that are initialized on demand.
7446 def __init__(self, name):
7447 portage.util.ObjectProxy.__init__(self)
7448 object.__setattr__(self, '_name', name)
7450 def _get_target(self):
7451 init_legacy_globals()
7452 name = object.__getattribute__(self, '_name')
7453 return globals()[name]
7455 class _PortdbProxy(portage.util.ObjectProxy):
7457 The portdb is initialized separately from the rest
7458 of the variables, since sometimes the other variables
7459 are needed while the portdb is not.
7462 def _get_target(self):
7463 init_legacy_globals()
7464 global db, portdb, root, _portdb_initialized
7465 if not _portdb_initialized:
7466 portdb = db[root]["porttree"].dbapi
7467 _portdb_initialized = True
7470 class _MtimedbProxy(portage.util.ObjectProxy):
7472 The mtimedb is independent from the portdb and other globals.
7475 def __init__(self, name):
7476 portage.util.ObjectProxy.__init__(self)
7477 object.__setattr__(self, '_name', name)
7479 def _get_target(self):
7480 global mtimedb, mtimedbfile, _mtimedb_initialized
7481 if not _mtimedb_initialized:
7482 mtimedbfile = os.path.join("/",
7483 CACHE_PATH.lstrip(os.path.sep), "mtimedb")
7484 mtimedb = MtimeDB(mtimedbfile)
7485 _mtimedb_initialized = True
7486 name = object.__getattribute__(self, '_name')
7487 return globals()[name]
7489 _legacy_global_var_names = ("archlist", "db", "features",
7490 "groups", "mtimedb", "mtimedbfile", "pkglines",
7491 "portdb", "profiledir", "root", "selinux_enabled",
7492 "settings", "thirdpartymirrors", "usedefaults")
7494 def _disable_legacy_globals():
7496 This deletes the ObjectProxy instances that are used
7497 for lazy initialization of legacy global variables.
7498 The purpose of deleting them is to prevent new code
7499 from referencing these deprecated variables.
7501 global _legacy_global_var_names
7502 for k in _legacy_global_var_names:
7503 globals().pop(k, None)
7505 # Initialization of legacy globals. No functions/classes below this point
7506 # please! When the above functions and classes become independent of the
7507 # below global variables, it will be possible to make the below code
7508 # conditional on a backward compatibility flag (backward compatibility could
7509 # be disabled via an environment variable, for example). This will enable new
7510 # code that is aware of this flag to import portage without the unnecessary
7511 # overhead (and other issues!) of initializing the legacy globals.
7513 def init_legacy_globals():
7514 global _globals_initialized
7515 if _globals_initialized:
7517 _globals_initialized = True
7519 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
7520 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
7521 profiledir, flushmtimedb
7523 # Portage needs to ensure a sane umask for the files it creates.
7527 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
7528 kwargs[k] = os.environ.get(envvar, "/")
7530 global _initializing_globals
7531 _initializing_globals = True
7532 db = create_trees(**kwargs)
7533 del _initializing_globals
7535 settings = db["/"]["vartree"].settings
7539 settings = db[myroot]["vartree"].settings
7542 root = settings["ROOT"]
7545 # ========================================================================
7547 # These attributes should not be used
7548 # within Portage under any circumstances.
7549 # ========================================================================
7550 archlist = settings.archlist()
7551 features = settings.features
7552 groups = settings["ACCEPT_KEYWORDS"].split()
7553 pkglines = settings.packages
7554 selinux_enabled = settings.selinux_enabled()
7555 thirdpartymirrors = settings.thirdpartymirrors()
7556 usedefaults = settings.use_defs
7558 if os.path.isdir(PROFILE_PATH):
7559 profiledir = PROFILE_PATH
7560 def flushmtimedb(record):
7561 writemsg("portage.flushmtimedb() is DEPRECATED\n")
7562 # ========================================================================
7564 # These attributes should not be used
7565 # within Portage under any circumstances.
7566 # ========================================================================
7570 _mtimedb_initialized = False
7571 mtimedb = _MtimedbProxy("mtimedb")
7572 mtimedbfile = _MtimedbProxy("mtimedbfile")
7574 _portdb_initialized = False
7575 portdb = _PortdbProxy()
7577 _globals_initialized = False
7579 for k in ("db", "settings", "root", "selinux_enabled",
7580 "archlist", "features", "groups",
7581 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
7583 globals()[k] = _LegacyGlobalProxy(k)
7588 # ============================================================================
7589 # ============================================================================