1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
23 import cPickle as pickle
29 from time import sleep
30 from random import shuffle
32 from itertools import chain, izip
35 except ImportError, e:
36 sys.stderr.write("\n\n")
37 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
38 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
39 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
41 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
42 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
43 sys.stderr.write(" "+str(e)+"\n\n");
47 if platform.system() in ["FreeBSD"]:
50 def _chflags(path, flags, opts=""):
51 cmd = "chflags %s %o '%s'" % (opts, flags, path)
52 status, output = commands.getstatusoutput(cmd)
53 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
55 # Try to generate an ENOENT error if appropriate.
60 # Make sure the binary exists.
61 if not portage.process.find_binary("chflags"):
62 raise portage.exception.CommandNotFound("chflags")
63 # Now we're not sure exactly why it failed or what
64 # the real errno was, so just report EPERM.
65 e = OSError(errno.EPERM, output)
70 def _lchflags(path, flags):
71 return _chflags(path, flags, opts="-h")
72 bsd_chflags.chflags = _chflags
73 bsd_chflags.lchflags = _lchflags
76 from portage.cache.cache_errors import CacheError
77 import portage.cvstree
79 import portage.getbinpkg
81 from portage.dep import dep_getcpv, dep_getkey, get_operator, \
82 isjustname, isspecific, isvalidatom, \
83 match_from_list, match_to_list, best_match_to_list
85 # XXX: This needs to get cleaned up.
87 from portage.output import bold, colorize, green, red, yellow
90 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
91 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
92 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
93 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
94 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
95 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
96 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
97 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
99 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
100 portage_uid, portage_gid, userpriv_groups
101 from portage.manifest import Manifest
104 from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
105 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
106 map_dictlist_vals, new_protect_filename, normalize_path, \
107 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
108 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
109 import portage.exception
111 import portage.process
112 from portage.process import atexit_register, run_exitfuncs
113 from portage.locks import unlockfile,unlockdir,lockfile,lockdir
114 import portage.checksum
115 from portage.checksum import perform_md5,perform_checksum,prelink_capable
116 import portage.eclass_cache
117 from portage.localization import _
118 from portage.update import dep_transform, fixdbentries, grab_updates, \
119 parse_updates, update_config_files, update_dbentries, update_dbentry
121 # Need these functions directly in portage namespace to not break every external tool in existence
122 from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \
123 pkgsplit, vercmp, ververify
125 # endversion and endversion_keys are for backward compatibility only.
126 from portage.versions import endversion_keys
127 from portage.versions import suffix_value as endversion
129 except ImportError, e:
130 sys.stderr.write("\n\n")
131 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
132 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
133 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
134 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
135 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
136 sys.stderr.write("!!! a recovery of portage.\n")
137 sys.stderr.write(" "+str(e)+"\n\n")
142 import portage._selinux as selinux
144 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
149 # ===========================================================================
150 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
151 # ===========================================================================
155 modname = ".".join(name.split(".")[:-1])
156 mod = __import__(modname)
157 components = name.split('.')
158 for comp in components[1:]:
159 mod = getattr(mod, comp)
162 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
164 if x in top_dict and key in top_dict[x]:
166 return copy.deepcopy(top_dict[x][key])
168 return top_dict[x][key]
172 raise KeyError("Key not found in list; '%s'" % key)
175 "this fixes situations where the current directory doesn't exist"
178 except OSError: #dir doesn't exist
183 def abssymlink(symlink):
184 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
185 mylink=os.readlink(symlink)
187 mydir=os.path.dirname(symlink)
188 mylink=mydir+"/"+mylink
189 return os.path.normpath(mylink)
195 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
196 global cacheHit,cacheMiss,cacheStale
197 mypath = normalize_path(my_original_path)
198 if mypath in dircache:
200 cached_mtime, list, ftype = dircache[mypath]
203 cached_mtime, list, ftype = -1, [], []
205 pathstat = os.stat(mypath)
206 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
207 mtime = pathstat.st_mtime
209 raise portage.exception.DirectoryNotFound(mypath)
210 except EnvironmentError, e:
211 if e.errno == portage.exception.PermissionDenied.errno:
212 raise portage.exception.PermissionDenied(mypath)
217 except portage.exception.PortageException:
221 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
222 if mtime != cached_mtime or time.time() - mtime < 4:
223 if mypath in dircache:
226 list = os.listdir(mypath)
227 except EnvironmentError, e:
228 if e.errno != errno.EACCES:
231 raise portage.exception.PermissionDenied(mypath)
236 pathstat = os.stat(mypath+"/"+x)
238 pathstat = os.lstat(mypath+"/"+x)
240 if stat.S_ISREG(pathstat[stat.ST_MODE]):
242 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
244 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
248 except (IOError, OSError):
250 dircache[mypath] = mtime, list, ftype
254 for x in range(0, len(list)):
255 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
256 ret_list.append(list[x])
257 ret_ftype.append(ftype[x])
258 elif (list[x] not in ignorelist):
259 ret_list.append(list[x])
260 ret_ftype.append(ftype[x])
262 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
263 return ret_list, ret_ftype
265 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
266 EmptyOnError=False, dirsonly=False):
268 Portage-specific implementation of os.listdir
270 @param mypath: Path whose contents you wish to list
272 @param recursive: Recursively scan directories contained within mypath
273 @type recursive: Boolean
274 @param filesonly; Only return files, not more directories
275 @type filesonly: Boolean
276 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
277 @type ignorecvs: Boolean
278 @param ignorelist: List of filenames/directories to exclude
279 @type ignorelist: List
280 @param followSymlinks: Follow Symlink'd files and directories
281 @type followSymlinks: Boolean
282 @param EmptyOnError: Return [] if an error occurs.
283 @type EmptyOnError: Boolean
284 @param dirsonly: Only return directories.
285 @type dirsonly: Boolean
287 @returns: A list of files and directories (or just files or just directories) or an empty list.
290 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
297 if not (filesonly or dirsonly or recursive):
303 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
304 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
308 for y in range(0,len(l)):
309 l[y]=list[x]+"/"+l[y]
315 for x in range(0,len(ftype)):
317 rlist=rlist+[list[x]]
320 for x in range(0, len(ftype)):
322 rlist = rlist + [list[x]]
328 def flatten(mytokens):
329 """this function now turns a [1,[2,3]] list into
330 a [1,2,3] list and returns it."""
333 if isinstance(x, list):
334 newlist.extend(flatten(x))
339 #beautiful directed graph object
341 class digraph(object):
343 """Create an empty digraph"""
345 # { node : ( { child : priority } , { parent : priority } ) }
349 def add(self, node, parent, priority=0):
350 """Adds the specified node with the specified parent.
352 If the dep is a soft-dep and the node already has a hard
353 relationship to the parent, the relationship is left as hard."""
355 if node not in self.nodes:
356 self.nodes[node] = ({}, {}, node)
357 self.order.append(node)
362 if parent not in self.nodes:
363 self.nodes[parent] = ({}, {}, parent)
364 self.order.append(parent)
366 if parent in self.nodes[node][1]:
367 if priority > self.nodes[node][1][parent]:
368 self.nodes[node][1][parent] = priority
370 self.nodes[node][1][parent] = priority
372 if node in self.nodes[parent][0]:
373 if priority > self.nodes[parent][0][node]:
374 self.nodes[parent][0][node] = priority
376 self.nodes[parent][0][node] = priority
378 def remove(self, node):
379 """Removes the specified node from the digraph, also removing
380 and ties to other nodes in the digraph. Raises KeyError if the
381 node doesn't exist."""
383 if node not in self.nodes:
386 for parent in self.nodes[node][1]:
387 del self.nodes[parent][0][node]
388 for child in self.nodes[node][0]:
389 del self.nodes[child][1][node]
392 self.order.remove(node)
394 def difference_update(self, t):
396 Remove all given nodes from node_set. This is more efficient
397 than multiple calls to the remove() method.
399 if isinstance(t, (list, tuple)) or \
400 not hasattr(t, "__contains__"):
403 for node in self.order:
407 for parent in self.nodes[node][1]:
408 del self.nodes[parent][0][node]
409 for child in self.nodes[node][0]:
410 del self.nodes[child][1][node]
414 def remove_edge(self, child, parent):
416 Remove edge in the direction from child to parent. Note that it is
417 possible for a remaining edge to exist in the opposite direction.
418 Any endpoint vertices that become isolated will remain in the graph.
421 # Nothing should be modified when a KeyError is raised.
422 for k in parent, child:
423 if k not in self.nodes:
426 # Make sure the edge exists.
427 if child not in self.nodes[parent][0]:
428 raise KeyError(child)
429 if parent not in self.nodes[child][1]:
430 raise KeyError(parent)
433 del self.nodes[child][1][parent]
434 del self.nodes[parent][0][child]
437 return iter(self.order)
439 def contains(self, node):
440 """Checks if the digraph contains mynode"""
441 return node in self.nodes
443 def get(self, key, default=None):
444 node_data = self.nodes.get(key, self)
445 if node_data is self:
450 """Return a list of all nodes in the graph"""
453 def child_nodes(self, node, ignore_priority=None):
454 """Return all children of the specified node"""
455 if ignore_priority is None:
456 return self.nodes[node][0].keys()
458 for child, priority in self.nodes[node][0].iteritems():
459 if priority > ignore_priority:
460 children.append(child)
463 def parent_nodes(self, node):
464 """Return all parents of the specified node"""
465 return self.nodes[node][1].keys()
467 def leaf_nodes(self, ignore_priority=None):
468 """Return all nodes that have no children
470 If ignore_soft_deps is True, soft deps are not counted as
471 children in calculations."""
474 for node in self.order:
476 for child in self.nodes[node][0]:
477 if self.nodes[node][0][child] > ignore_priority:
481 leaf_nodes.append(node)
484 def root_nodes(self, ignore_priority=None):
485 """Return all nodes that have no parents.
487 If ignore_soft_deps is True, soft deps are not counted as
488 parents in calculations."""
491 for node in self.order:
493 for parent in self.nodes[node][1]:
494 if self.nodes[node][1][parent] > ignore_priority:
498 root_nodes.append(node)
502 """Checks if the digraph is empty"""
503 return len(self.nodes) == 0
508 for k, v in self.nodes.iteritems():
509 clone.nodes[k] = (v[0].copy(), v[1].copy(), v[2])
510 clone.order = self.order[:]
513 # Backward compatibility
516 allzeros = leaf_nodes
518 __contains__ = contains
522 def delnode(self, node):
529 leaf_nodes = self.leaf_nodes()
534 def hasallzeros(self, ignore_priority=None):
535 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
538 def debug_print(self):
540 writemsg(s, noiselevel=-1)
541 for node in self.nodes:
542 output("%s " % (node,))
543 if self.nodes[node][0]:
544 output("depends on\n")
546 output("(no children)\n")
547 for child in self.nodes[node][0]:
548 output(" %s (%s)\n" % \
549 (child, self.nodes[node][0][child],))
551 #parse /etc/env.d and generate /etc/profile.env
553 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
554 env=None, writemsg_level=portage.util.writemsg_level):
555 if target_root is None:
557 target_root = settings["ROOT"]
558 if prev_mtimes is None:
560 prev_mtimes = mtimedb["ldpath"]
563 envd_dir = os.path.join(target_root, "etc", "env.d")
564 portage.util.ensure_dirs(envd_dir, mode=0755)
565 fns = listdir(envd_dir, EmptyOnError=1)
571 if not x[0].isdigit() or not x[1].isdigit():
573 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
579 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
580 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
581 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
582 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
583 "PYTHONPATH", "ROOTPATH"])
588 file_path = os.path.join(envd_dir, x)
590 myconfig = getconfig(file_path, expand=False)
591 except portage.exception.ParseError, e:
592 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
596 # broken symlink or file removed by a concurrent process
597 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
599 config_list.append(myconfig)
600 if "SPACE_SEPARATED" in myconfig:
601 space_separated.update(myconfig["SPACE_SEPARATED"].split())
602 del myconfig["SPACE_SEPARATED"]
603 if "COLON_SEPARATED" in myconfig:
604 colon_separated.update(myconfig["COLON_SEPARATED"].split())
605 del myconfig["COLON_SEPARATED"]
609 for var in space_separated:
611 for myconfig in config_list:
613 for item in myconfig[var].split():
614 if item and not item in mylist:
616 del myconfig[var] # prepare for env.update(myconfig)
618 env[var] = " ".join(mylist)
619 specials[var] = mylist
621 for var in colon_separated:
623 for myconfig in config_list:
625 for item in myconfig[var].split(":"):
626 if item and not item in mylist:
628 del myconfig[var] # prepare for env.update(myconfig)
630 env[var] = ":".join(mylist)
631 specials[var] = mylist
633 for myconfig in config_list:
634 """Cumulative variables have already been deleted from myconfig so that
635 they won't be overwritten by this dict.update call."""
638 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
640 myld = open(ldsoconf_path)
641 myldlines=myld.readlines()
645 #each line has at least one char (a newline)
649 except (IOError, OSError), e:
650 if e.errno != errno.ENOENT:
654 ld_cache_update=False
656 newld = specials["LDPATH"]
658 #ld.so.conf needs updating and ldconfig needs to be run
659 myfd = atomic_ofstream(ldsoconf_path)
660 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
661 myfd.write("# contents of /etc/env.d directory\n")
662 for x in specials["LDPATH"]:
667 # Update prelink.conf if we are prelink-enabled
669 newprelink = atomic_ofstream(
670 os.path.join(target_root, "etc", "prelink.conf"))
671 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
672 newprelink.write("# contents of /etc/env.d directory\n")
674 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
675 newprelink.write("-l "+x+"\n");
676 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
682 for y in specials["PRELINK_PATH_MASK"]:
691 newprelink.write("-h "+x+"\n")
692 for x in specials["PRELINK_PATH_MASK"]:
693 newprelink.write("-b "+x+"\n")
696 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
697 # granularity is possible. In order to avoid the potential ambiguity of
698 # mtimes that differ by less than 1 second, sleep here if any of the
699 # directories have been modified during the current second.
700 sleep_for_mtime_granularity = False
701 current_time = long(time.time())
702 mtime_changed = False
704 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
705 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
707 newldpathtime = long(os.stat(x).st_mtime)
708 lib_dirs.add(normalize_path(x))
710 if oe.errno == errno.ENOENT:
715 # ignore this path because it doesn't exist
718 if newldpathtime == current_time:
719 sleep_for_mtime_granularity = True
721 if prev_mtimes[x] == newldpathtime:
724 prev_mtimes[x] = newldpathtime
727 prev_mtimes[x] = newldpathtime
731 ld_cache_update = True
734 not ld_cache_update and \
735 contents is not None:
736 libdir_contents_changed = False
737 for mypath, mydata in contents.iteritems():
738 if mydata[0] not in ("obj","sym"):
740 head, tail = os.path.split(mypath)
742 libdir_contents_changed = True
744 if not libdir_contents_changed:
747 ldconfig = "/sbin/ldconfig"
748 if "CHOST" in env and "CBUILD" in env and \
749 env["CHOST"] != env["CBUILD"]:
750 from portage.process import find_binary
751 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
753 # Only run ldconfig as needed
754 if (ld_cache_update or makelinks) and ldconfig:
755 # ldconfig has very different behaviour between FreeBSD and Linux
756 if ostype=="Linux" or ostype.lower().endswith("gnu"):
757 # We can't update links if we haven't cleaned other versions first, as
758 # an older package installed ON TOP of a newer version will cause ldconfig
759 # to overwrite the symlinks we just made. -X means no links. After 'clean'
760 # we can safely create links.
761 writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \
764 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
766 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
767 elif ostype in ("FreeBSD","DragonFly"):
768 writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
770 os.system(("cd / ; %s -elf -i " + \
771 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
772 (ldconfig, target_root, target_root))
774 del specials["LDPATH"]
776 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
777 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
778 cenvnotice = penvnotice[:]
779 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
780 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
782 #create /etc/profile.env for bash support
783 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
784 outfile.write(penvnotice)
786 env_keys = [ x for x in env if x != "LDPATH" ]
790 if v.startswith('$') and not v.startswith('${'):
791 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
793 outfile.write("export %s='%s'\n" % (k, v))
796 #create /etc/csh.env for (t)csh support
797 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
798 outfile.write(cenvnotice)
800 outfile.write("setenv %s '%s'\n" % (x, env[x]))
803 if sleep_for_mtime_granularity:
804 while current_time == long(time.time()):
807 def ExtractKernelVersion(base_dir):
809 Try to figure out what kernel version we are running
810 @param base_dir: Path to sources (usually /usr/src/linux)
811 @type base_dir: string
812 @rtype: tuple( version[string], error[string])
814 1. tuple( version[string], error[string])
815 Either version or error is populated (but never both)
819 pathname = os.path.join(base_dir, 'Makefile')
821 f = open(pathname, 'r')
822 except OSError, details:
823 return (None, str(details))
824 except IOError, details:
825 return (None, str(details))
829 lines.append(f.readline())
830 except OSError, details:
831 return (None, str(details))
832 except IOError, details:
833 return (None, str(details))
835 lines = [l.strip() for l in lines]
839 #XXX: The following code relies on the ordering of vars within the Makefile
841 # split on the '=' then remove annoying whitespace
842 items = line.split("=")
843 items = [i.strip() for i in items]
844 if items[0] == 'VERSION' or \
845 items[0] == 'PATCHLEVEL':
848 elif items[0] == 'SUBLEVEL':
850 elif items[0] == 'EXTRAVERSION' and \
851 items[-1] != items[0]:
854 # Grab a list of files named localversion* and sort them
855 localversions = os.listdir(base_dir)
856 for x in range(len(localversions)-1,-1,-1):
857 if localversions[x][:12] != "localversion":
861 # Append the contents of each to the version string, stripping ALL whitespace
862 for lv in localversions:
863 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
865 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
866 kernelconfig = getconfig(base_dir+"/.config")
867 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
868 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
870 return (version,None)
872 def autouse(myvartree, use_cache=1, mysettings=None):
874 autuse returns a list of USE variables auto-enabled to packages being installed
876 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
877 @type myvartree: vartree
878 @param use_cache: read values from cache
879 @type use_cache: Boolean
880 @param mysettings: Instance of config
881 @type mysettings: config
883 @returns: A string containing a list of USE variables that are enabled via use.defaults
885 if mysettings is None:
887 mysettings = settings
888 if mysettings.profile_path is None:
891 usedefaults = mysettings.use_defs
892 for myuse in usedefaults:
894 for mydep in usedefaults[myuse]:
895 if not myvartree.dep_match(mydep,use_cache=True):
899 myusevars += " "+myuse
902 def check_config_instance(test):
903 if not isinstance(test, config):
904 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
906 class config(object):
908 This class encompasses the main portage configuration. Data is pulled from
909 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
910 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
913 Generally if you need data like USE flags, FEATURES, environment variables,
914 virtuals ...etc you look in here.
918 "A", "AA", "CATEGORY", "EBUILD_PHASE", "EMERGE_FROM",
919 "PF", "PKGUSE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
920 "PORTAGE_REPO_NAME", "PORTAGE_USE", "ROOT"
923 _environ_whitelist = []
925 # Whitelisted variables are always allowed to enter the ebuild
926 # environment. Generally, this only includes special portage
927 # variables. Ebuilds can unset variables that are not whitelisted
928 # and rely on them remaining unset for future phases, without them
929 # leaking back in from various locations (bug #189417). It's very
930 # important to set our special BASH_ENV variable in the ebuild
931 # environment in order to prevent sandbox from sourcing /etc/profile
932 # in it's bashrc (causing major leakage).
933 _environ_whitelist += [
934 "BASH_ENV", "BUILD_PREFIX", "D",
935 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
936 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
937 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
938 "FEATURES", "FILESDIR", "HOME", "PATH",
940 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
941 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
943 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
944 "PORTAGE_BINPKG_TMPFILE",
946 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
947 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
948 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
950 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
951 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
952 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
953 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
954 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
955 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
956 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
957 "USE_EXPAND", "USE_ORDER", "WORKDIR",
961 # user config variables
962 _environ_whitelist += [
963 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
966 _environ_whitelist += [
967 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
970 # misc variables inherited from the calling environment
971 _environ_whitelist += [
972 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
973 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
974 "TERM", "TERMCAP", "USER",
977 # other variables inherited from the calling environment
978 _environ_whitelist += [
979 "CVS_RSH", "ECHANGELOG_USER",
981 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
982 "STY", "WINDOW", "XAUTHORITY",
985 _environ_whitelist = frozenset(_environ_whitelist)
987 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
989 # Filter selected variables in the config.environ() method so that
990 # they don't needlessly propagate down into the ebuild environment.
993 # misc variables inherited from the calling environment
995 "INFOPATH", "MANPATH",
998 # variables that break bash
1003 # portage config variables and variables set directly by portage
1004 _environ_filter += [
1005 "ACCEPT_KEYWORDS", "AUTOCLEAN",
1006 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1007 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
1008 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1009 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1010 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1011 "PORTAGE_BACKGROUND",
1012 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1013 "PORTAGE_COUNTER_HASH",
1014 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
1015 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1016 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1017 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1019 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1020 "PORTAGE_PACKAGE_EMPTY_ABORT",
1021 "PORTAGE_RO_DISTDIRS",
1022 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1023 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1024 "QUICKPKG_DEFAULT_OPTS",
1025 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1026 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1029 _environ_filter = frozenset(_environ_filter)
1031 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1032 config_incrementals=None, config_root=None, target_root=None,
1035 @param clone: If provided, init will use deepcopy to copy by value the instance.
1036 @type clone: Instance of config class.
1037 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1038 and then calling instance.setcpv(mycpv).
1040 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1041 @type config_profile_path: String
1042 @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS)
1043 @type config_incrementals: List
1044 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1045 @type config_root: String
1046 @param target_root: __init__ override of $ROOT env variable.
1047 @type target_root: String
1048 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1049 ignore local config (keywording and unmasking)
1050 @type local_config: Boolean
1053 # When initializing the global portage.settings instance, avoid
1054 # raising exceptions whenever possible since exceptions thrown
1055 # from 'import portage' or 'import portage.exceptions' statements
1056 # can practically render the api unusable for api consumers.
1057 tolerant = "_initializing_globals" in globals()
1059 self.already_in_regenerate = 0
1064 self.modifiedkeys = []
1066 self._accept_chost_re = None
1070 self.dirVirtuals = None
1073 # Virtuals obtained from the vartree
1074 self.treeVirtuals = {}
1075 # Virtuals by user specification. Includes negatives.
1076 self.userVirtuals = {}
1077 # Virtual negatives from user specifications.
1078 self.negVirtuals = {}
1079 # Virtuals added by the depgraph via self.setinst().
1080 self._depgraphVirtuals = {}
1082 self.user_profile_dir = None
1083 self.local_config = local_config
1086 self.incrementals = copy.deepcopy(clone.incrementals)
1087 self.profile_path = copy.deepcopy(clone.profile_path)
1088 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1089 self.local_config = copy.deepcopy(clone.local_config)
1091 self.module_priority = copy.deepcopy(clone.module_priority)
1092 self.modules = copy.deepcopy(clone.modules)
1094 self.depcachedir = copy.deepcopy(clone.depcachedir)
1096 self.packages = copy.deepcopy(clone.packages)
1097 self.virtuals = copy.deepcopy(clone.virtuals)
1099 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1100 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1101 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1102 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1103 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1105 self.use_defs = copy.deepcopy(clone.use_defs)
1106 self.usemask = copy.deepcopy(clone.usemask)
1107 self.usemask_list = copy.deepcopy(clone.usemask_list)
1108 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1109 self.useforce = copy.deepcopy(clone.useforce)
1110 self.useforce_list = copy.deepcopy(clone.useforce_list)
1111 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1112 self.puse = copy.deepcopy(clone.puse)
1113 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1114 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1115 self.mycpv = copy.deepcopy(clone.mycpv)
1117 self.configlist = copy.deepcopy(clone.configlist)
1118 self.lookuplist = self.configlist[:]
1119 self.lookuplist.reverse()
1121 "env.d": self.configlist[0],
1122 "pkginternal": self.configlist[1],
1123 "globals": self.configlist[2],
1124 "defaults": self.configlist[3],
1125 "conf": self.configlist[4],
1126 "pkg": self.configlist[5],
1127 "auto": self.configlist[6],
1128 "backupenv": self.configlist[7],
1129 "env": self.configlist[8] }
1130 self.profiles = copy.deepcopy(clone.profiles)
1131 self.backupenv = self.configdict["backupenv"]
1132 self.pusedict = copy.deepcopy(clone.pusedict)
1133 self.categories = copy.deepcopy(clone.categories)
1134 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1135 self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1136 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1137 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1138 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1139 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1140 self.features = copy.deepcopy(clone.features)
1142 self._accept_license = copy.deepcopy(clone._accept_license)
1143 self._plicensedict = copy.deepcopy(clone._plicensedict)
1146 def check_var_directory(varname, var):
1147 if not os.path.isdir(var):
1148 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1149 "Please correct this.\n") % (varname, var),
1151 raise portage.exception.DirectoryNotFound(var)
1153 if config_root is None:
1156 config_root = normalize_path(os.path.abspath(
1157 config_root)).rstrip(os.path.sep) + os.path.sep
1159 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1161 self.depcachedir = DEPCACHE_PATH
1163 if not config_profile_path:
1164 config_profile_path = \
1165 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1166 if os.path.isdir(config_profile_path):
1167 self.profile_path = config_profile_path
1169 self.profile_path = None
1171 self.profile_path = config_profile_path[:]
1173 if not config_incrementals:
1174 writemsg("incrementals not specified to class config\n")
1175 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1177 self.incrementals = copy.deepcopy(config_incrementals)
1179 self.module_priority = ["user","default"]
1181 self.modules["user"] = getconfig(
1182 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1183 if self.modules["user"] is None:
1184 self.modules["user"] = {}
1185 self.modules["default"] = {
1186 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1187 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1193 # back up our incremental variables:
1195 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1196 self.configlist.append({})
1197 self.configdict["env.d"] = self.configlist[-1]
1199 self.configlist.append({})
1200 self.configdict["pkginternal"] = self.configlist[-1]
1202 # The symlink might not exist or might not be a symlink.
1203 if self.profile_path is None:
1207 def addProfile(currentPath):
1208 parentsFile = os.path.join(currentPath, "parent")
1209 eapi_file = os.path.join(currentPath, "eapi")
1211 eapi = open(eapi_file).readline().strip()
1215 if not eapi_is_supported(eapi):
1216 raise portage.exception.ParseError(
1217 "Profile contains unsupported " + \
1218 "EAPI '%s': '%s'" % \
1219 (eapi, os.path.realpath(eapi_file),))
1220 if os.path.exists(parentsFile):
1221 parents = grabfile(parentsFile)
1223 raise portage.exception.ParseError(
1224 "Empty parent file: '%s'" % parentsFile)
1225 for parentPath in parents:
1226 parentPath = normalize_path(os.path.join(
1227 currentPath, parentPath))
1228 if os.path.exists(parentPath):
1229 addProfile(parentPath)
1231 raise portage.exception.ParseError(
1232 "Parent '%s' not found: '%s'" % \
1233 (parentPath, parentsFile))
1234 self.profiles.append(currentPath)
1236 addProfile(os.path.realpath(self.profile_path))
1237 except portage.exception.ParseError, e:
1238 writemsg("!!! Unable to parse profile: '%s'\n" % \
1239 self.profile_path, noiselevel=-1)
1240 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1243 if local_config and self.profiles:
1244 custom_prof = os.path.join(
1245 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1246 if os.path.exists(custom_prof):
1247 self.user_profile_dir = custom_prof
1248 self.profiles.append(custom_prof)
1251 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1252 self.packages = stack_lists(self.packages_list, incremental=1)
1253 del self.packages_list
1254 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1257 self.prevmaskdict={}
1258 for x in self.packages:
1259 mycatpkg=dep_getkey(x)
1260 if mycatpkg not in self.prevmaskdict:
1261 self.prevmaskdict[mycatpkg]=[x]
1263 self.prevmaskdict[mycatpkg].append(x)
1265 self._pkeywords_list = []
1266 rawpkeywords = [grabdict_package(
1267 os.path.join(x, "package.keywords"), recursive=1) \
1268 for x in self.profiles]
1269 for i in xrange(len(self.profiles)):
1271 for k, v in rawpkeywords[i].iteritems():
1272 cpdict.setdefault(dep_getkey(k), {})[k] = v
1273 self._pkeywords_list.append(cpdict)
1275 # get profile-masked use flags -- INCREMENTAL Child over parent
1276 self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1277 recursive=1) for x in self.profiles]
1278 self.usemask = set(stack_lists(
1279 self.usemask_list, incremental=True))
1280 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1281 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1284 self.pusemask_list = []
1285 rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1286 recursive=1) for x in self.profiles]
1287 for i in xrange(len(self.profiles)):
1289 for k, v in rawpusemask[i].iteritems():
1290 cpdict.setdefault(dep_getkey(k), {})[k] = v
1291 self.pusemask_list.append(cpdict)
1294 self.pkgprofileuse = []
1295 rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1296 juststrings=True, recursive=1) for x in self.profiles]
1297 for i in xrange(len(self.profiles)):
1299 for k, v in rawprofileuse[i].iteritems():
1300 cpdict.setdefault(dep_getkey(k), {})[k] = v
1301 self.pkgprofileuse.append(cpdict)
1304 self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1305 recursive=1) for x in self.profiles]
1306 self.useforce = set(stack_lists(
1307 self.useforce_list, incremental=True))
1309 self.puseforce_list = []
1310 rawpuseforce = [grabdict_package(
1311 os.path.join(x, "package.use.force"), recursive=1) \
1312 for x in self.profiles]
1313 for i in xrange(len(self.profiles)):
1315 for k, v in rawpuseforce[i].iteritems():
1316 cpdict.setdefault(dep_getkey(k), {})[k] = v
1317 self.puseforce_list.append(cpdict)
1320 make_conf = getconfig(
1321 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1322 tolerant=tolerant, allow_sourcing=True)
1323 if make_conf is None:
1326 # Allow ROOT setting to come from make.conf if it's not overridden
1327 # by the constructor argument (from the calling environment).
1328 if target_root is None and "ROOT" in make_conf:
1329 target_root = make_conf["ROOT"]
1330 if not target_root.strip():
1332 if target_root is None:
1335 target_root = normalize_path(os.path.abspath(
1336 target_root)).rstrip(os.path.sep) + os.path.sep
1338 portage.util.ensure_dirs(target_root)
1339 check_var_directory("ROOT", target_root)
1341 # The expand_map is used for variable substitution
1342 # in getconfig() calls, and the getconfig() calls
1343 # update expand_map with the value of each variable
1344 # assignment that occurs. Variable substitution occurs
1345 # in the following order, which corresponds to the
1346 # order of appearance in self.lookuplist:
1353 # Notably absent is "env", since we want to avoid any
1354 # interaction with the calling environment that might
1355 # lead to unexpected results.
1358 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1360 # env_d will be None if profile.env doesn't exist.
1362 self.configdict["env.d"].update(env_d)
1363 expand_map.update(env_d)
1365 # backupenv is used for calculating incremental variables.
1366 self.backupenv = os.environ.copy()
1369 # Remove duplicate values so they don't override updated
1370 # profile.env values later (profile.env is reloaded in each
1371 # call to self.regenerate).
1372 for k, v in env_d.iteritems():
1374 if self.backupenv[k] == v:
1375 del self.backupenv[k]
1380 self.configdict["env"] = self.backupenv.copy()
1382 # make.globals should not be relative to config_root
1383 # because it only contains constants.
1384 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1385 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1390 if self.mygcfg is None:
1393 self.configlist.append(self.mygcfg)
1394 self.configdict["globals"]=self.configlist[-1]
1396 self.make_defaults_use = []
1399 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1400 expand=expand_map) for x in self.profiles]
1402 for cfg in mygcfg_dlists:
1404 self.make_defaults_use.append(cfg.get("USE", ""))
1406 self.make_defaults_use.append("")
1407 self.mygcfg = stack_dicts(mygcfg_dlists,
1408 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1409 if self.mygcfg is None:
1411 self.configlist.append(self.mygcfg)
1412 self.configdict["defaults"]=self.configlist[-1]
1414 self.mygcfg = getconfig(
1415 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1416 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1417 if self.mygcfg is None:
1420 # Don't allow the user to override certain variables in make.conf
1421 profile_only_variables = self.configdict["defaults"].get(
1422 "PROFILE_ONLY_VARIABLES", "").split()
1423 for k in profile_only_variables:
1424 self.mygcfg.pop(k, None)
1426 self.configlist.append(self.mygcfg)
1427 self.configdict["conf"]=self.configlist[-1]
1429 self.configlist.append({})
1430 self.configdict["pkg"]=self.configlist[-1]
1433 self.configlist.append({})
1434 self.configdict["auto"]=self.configlist[-1]
1436 self.configlist.append(self.backupenv) # XXX Why though?
1437 self.configdict["backupenv"]=self.configlist[-1]
1439 # Don't allow the user to override certain variables in the env
1440 for k in profile_only_variables:
1441 self.backupenv.pop(k, None)
1443 self.configlist.append(self.configdict["env"])
1445 # make lookuplist for loading package.*
1446 self.lookuplist=self.configlist[:]
1447 self.lookuplist.reverse()
1449 # Blacklist vars that could interfere with portage internals.
1450 for blacklisted in self._env_blacklist:
1451 for cfg in self.lookuplist:
1452 cfg.pop(blacklisted, None)
1453 del blacklisted, cfg
1455 self["PORTAGE_CONFIGROOT"] = config_root
1456 self.backup_changes("PORTAGE_CONFIGROOT")
1457 self["ROOT"] = target_root
1458 self.backup_changes("ROOT")
1461 self.pkeywordsdict = {}
1462 self._plicensedict = {}
1463 self.punmaskdict = {}
1464 abs_user_config = os.path.join(config_root,
1465 USER_CONFIG_PATH.lstrip(os.path.sep))
1467 # locations for "categories" and "arch.list" files
1468 locations = [os.path.join(self["PORTDIR"], "profiles")]
1469 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1470 pmask_locations.extend(self.profiles)
1472 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1473 special cases are needed here."""
1474 overlay_profiles = []
1475 for ov in self["PORTDIR_OVERLAY"].split():
1476 ov = normalize_path(ov)
1477 profiles_dir = os.path.join(ov, "profiles")
1478 if os.path.isdir(profiles_dir):
1479 overlay_profiles.append(profiles_dir)
1480 locations += overlay_profiles
1482 pmask_locations.extend(overlay_profiles)
1485 locations.append(abs_user_config)
1486 pmask_locations.append(abs_user_config)
1487 pusedict = grabdict_package(
1488 os.path.join(abs_user_config, "package.use"), recursive=1)
1489 for key in pusedict.keys():
1490 cp = dep_getkey(key)
1491 if cp not in self.pusedict:
1492 self.pusedict[cp] = {}
1493 self.pusedict[cp][key] = pusedict[key]
1496 pkgdict = grabdict_package(
1497 os.path.join(abs_user_config, "package.keywords"),
1499 for key in pkgdict.keys():
1500 # default to ~arch if no specific keyword is given
1501 if not pkgdict[key]:
1503 if self.configdict["defaults"] and \
1504 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1505 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1508 for keyword in groups:
1509 if not keyword[0] in "~-":
1510 mykeywordlist.append("~"+keyword)
1511 pkgdict[key] = mykeywordlist
1512 cp = dep_getkey(key)
1513 if cp not in self.pkeywordsdict:
1514 self.pkeywordsdict[cp] = {}
1515 self.pkeywordsdict[cp][key] = pkgdict[key]
1518 licdict = grabdict_package(os.path.join(
1519 abs_user_config, "package.license"), recursive=1)
1520 for k, v in licdict.iteritems():
1522 cp_dict = self._plicensedict.get(cp)
1525 self._plicensedict[cp] = cp_dict
1526 cp_dict[k] = self.expandLicenseTokens(v)
1528 #getting categories from an external file now
1529 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1530 self.categories = stack_lists(categories, incremental=1)
1533 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1534 archlist = stack_lists(archlist, incremental=1)
1535 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1537 # package.mask and package.unmask
1540 for x in pmask_locations:
1541 pkgmasklines.append(grabfile_package(
1542 os.path.join(x, "package.mask"), recursive=1))
1543 pkgunmasklines.append(grabfile_package(
1544 os.path.join(x, "package.unmask"), recursive=1))
1545 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1546 pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
1549 for x in pkgmasklines:
1550 mycatpkg=dep_getkey(x)
1551 if mycatpkg in self.pmaskdict:
1552 self.pmaskdict[mycatpkg].append(x)
1554 self.pmaskdict[mycatpkg]=[x]
1556 for x in pkgunmasklines:
1557 mycatpkg=dep_getkey(x)
1558 if mycatpkg in self.punmaskdict:
1559 self.punmaskdict[mycatpkg].append(x)
1561 self.punmaskdict[mycatpkg]=[x]
1563 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1564 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1565 has_invalid_data = False
1566 for x in range(len(pkgprovidedlines)-1, -1, -1):
1567 myline = pkgprovidedlines[x]
1568 if not isvalidatom("=" + myline):
1569 writemsg("Invalid package name in package.provided:" + \
1570 " %s\n" % myline, noiselevel=-1)
1571 has_invalid_data = True
1572 del pkgprovidedlines[x]
1574 cpvr = catpkgsplit(pkgprovidedlines[x])
1575 if not cpvr or cpvr[0] == "null":
1576 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1578 has_invalid_data = True
1579 del pkgprovidedlines[x]
1581 if cpvr[0] == "virtual":
1582 writemsg("Virtual package in package.provided: %s\n" % \
1583 myline, noiselevel=-1)
1584 has_invalid_data = True
1585 del pkgprovidedlines[x]
1587 if has_invalid_data:
1588 writemsg("See portage(5) for correct package.provided usage.\n",
1590 self.pprovideddict = {}
1591 for x in pkgprovidedlines:
1595 mycatpkg=dep_getkey(x)
1596 if mycatpkg in self.pprovideddict:
1597 self.pprovideddict[mycatpkg].append(x)
1599 self.pprovideddict[mycatpkg]=[x]
1601 # parse licensegroups
1602 self._license_groups = {}
1604 self._license_groups.update(
1605 grabdict(os.path.join(x, "license_groups")))
1607 # reasonable defaults; this is important as without USE_ORDER,
1608 # USE will always be "" (nothing set)!
1609 if "USE_ORDER" not in self:
1610 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1612 self["PORTAGE_GID"] = str(portage_gid)
1613 self.backup_changes("PORTAGE_GID")
1615 if self.get("PORTAGE_DEPCACHEDIR", None):
1616 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1617 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1618 self.backup_changes("PORTAGE_DEPCACHEDIR")
1620 overlays = self.get("PORTDIR_OVERLAY","").split()
1624 ov = normalize_path(ov)
1625 if os.path.isdir(ov):
1628 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1629 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1630 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1631 self.backup_changes("PORTDIR_OVERLAY")
1633 if "CBUILD" not in self and "CHOST" in self:
1634 self["CBUILD"] = self["CHOST"]
1635 self.backup_changes("CBUILD")
1637 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1638 self.backup_changes("PORTAGE_BIN_PATH")
1639 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1640 self.backup_changes("PORTAGE_PYM_PATH")
1642 # Expand license groups
1643 # This has to do be done for each config layer before regenerate()
1644 # in order for incremental negation to work properly.
1646 for c in self.configdict.itervalues():
1647 v = c.get("ACCEPT_LICENSE")
1650 v = " ".join(self.expandLicenseTokens(v.split()))
1651 c["ACCEPT_LICENSE"] = v
1654 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1656 self[var] = str(int(self.get(var, "0")))
1658 writemsg(("!!! %s='%s' is not a valid integer. " + \
1659 "Falling back to '0'.\n") % (var, self[var]),
1662 self.backup_changes(var)
1664 # initialize self.features
1668 self._accept_license = \
1669 set(self.get("ACCEPT_LICENSE", "").split())
1670 # In order to enforce explicit acceptance for restrictive
1671 # licenses that require it, "*" will not be allowed in the
1672 # user config. Don't enforce this until license groups are
1673 # fully implemented in the tree.
1674 #self._accept_license.discard("*")
1675 if not self._accept_license:
1676 self._accept_license = set(["*"])
1678 # repoman will accept any license
1679 self._accept_license = set(["*"])
1681 if not portage.process.sandbox_capable and \
1682 ("sandbox" in self.features or "usersandbox" in self.features):
1683 if self.profile_path is not None and \
1684 os.path.realpath(self.profile_path) == \
1685 os.path.realpath(PROFILE_PATH):
1686 """ Don't show this warning when running repoman and the
1687 sandbox feature came from a profile that doesn't belong to
1689 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1690 " binary. Disabling...\n\n"), noiselevel=-1)
1691 if "sandbox" in self.features:
1692 self.features.remove("sandbox")
1693 if "usersandbox" in self.features:
1694 self.features.remove("usersandbox")
1696 self.features.sort()
1697 self["FEATURES"] = " ".join(self.features)
1698 self.backup_changes("FEATURES")
1705 def _init_dirs(self):
1707 Create a few directories that are critical to portage operation
1709 if not os.access(self["ROOT"], os.W_OK):
1712 # gid, mode, mask, preserve_perms
1714 "tmp" : ( -1, 01777, 0, True),
1715 "var/tmp" : ( -1, 01777, 0, True),
1716 PRIVATE_PATH : ( portage_gid, 02750, 02, False),
1717 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False)
1720 for mypath, (gid, mode, modemask, preserve_perms) \
1721 in dir_mode_map.iteritems():
1722 mydir = os.path.join(self["ROOT"], mypath)
1723 if preserve_perms and os.path.isdir(mydir):
1724 # Only adjust permissions on some directories if
1725 # they don't exist yet. This gives freedom to the
1726 # user to adjust permissions to suit their taste.
1729 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1730 except portage.exception.PortageException, e:
1731 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1733 writemsg("!!! %s\n" % str(e),
1736 def expandLicenseTokens(self, tokens):
1737 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1738 if it's a group token (indicated by @) or just return it if it's not a
1739 group. If a group is negated then negate all group elements."""
1740 expanded_tokens = []
1742 expanded_tokens.extend(self._expandLicenseToken(x, None))
1743 return expanded_tokens
1745 def _expandLicenseToken(self, token, traversed_groups):
1748 if token.startswith("-"):
1750 license_name = token[1:]
1752 license_name = token
1753 if not license_name.startswith("@"):
1754 rValue.append(token)
1756 group_name = license_name[1:]
1757 if not traversed_groups:
1758 traversed_groups = set()
1759 license_group = self._license_groups.get(group_name)
1760 if group_name in traversed_groups:
1761 writemsg(("Circular license group reference" + \
1762 " detected in '%s'\n") % group_name, noiselevel=-1)
1763 rValue.append("@"+group_name)
1765 traversed_groups.add(group_name)
1766 for l in license_group:
1767 if l.startswith("-"):
1768 writemsg(("Skipping invalid element %s" + \
1769 " in license group '%s'\n") % (l, group_name),
1772 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1774 writemsg("Undefined license group '%s'\n" % group_name,
1776 rValue.append("@"+group_name)
1778 rValue = ["-" + token for token in rValue]
1782 """Validate miscellaneous settings and display warnings if necessary.
1783 (This code was previously in the global scope of portage.py)"""
1785 groups = self["ACCEPT_KEYWORDS"].split()
1786 archlist = self.archlist()
1788 writemsg("--- 'profiles/arch.list' is empty or " + \
1789 "not available. Empty portage tree?\n", noiselevel=1)
1791 for group in groups:
1792 if group not in archlist and \
1793 not (group.startswith("-") and group[1:] in archlist) and \
1794 group not in ("*", "~*", "**"):
1795 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1798 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1799 PROFILE_PATH.lstrip(os.path.sep))
1800 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1801 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1802 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1803 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1805 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1806 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1808 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1809 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1810 if os.path.exists(abs_user_virtuals):
1811 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1812 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1813 writemsg("!!! this new location.\n\n")
1815 if "fakeroot" in self.features and \
1816 not portage.process.fakeroot_capable:
1817 writemsg("!!! FEATURES=fakeroot is enabled, but the " + \
1818 "fakeroot binary is not installed.\n", noiselevel=-1)
1820 def loadVirtuals(self,root):
1821 """Not currently used by portage."""
1822 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1823 self.getvirtuals(root)
1825 def load_best_module(self,property_string):
1826 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1829 mod = load_mod(best_mod)
1831 if best_mod.startswith("cache."):
1832 best_mod = "portage." + best_mod
1834 mod = load_mod(best_mod)
1847 def modifying(self):
1849 raise Exception("Configuration is locked.")
1851 def backup_changes(self,key=None):
1853 if key and key in self.configdict["env"]:
1854 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1856 raise KeyError("No such key defined in environment: %s" % key)
1858 def reset(self,keeping_pkg=0,use_cache=1):
1860 Restore environment from self.backupenv, call self.regenerate()
1861 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1862 @type keeping_pkg: Boolean
1863 @param use_cache: Should self.regenerate use the cache or not
1864 @type use_cache: Boolean
1868 self.configdict["env"].clear()
1869 self.configdict["env"].update(self.backupenv)
1871 self.modifiedkeys = []
1875 self.configdict["pkg"].clear()
1876 self.configdict["pkginternal"].clear()
1877 self.configdict["defaults"]["USE"] = \
1878 " ".join(self.make_defaults_use)
1879 self.usemask = set(stack_lists(
1880 self.usemask_list, incremental=True))
1881 self.useforce = set(stack_lists(
1882 self.useforce_list, incremental=True))
1883 self.regenerate(use_cache=use_cache)
1885 def load_infodir(self,infodir):
1886 warnings.warn("portage.config.load_infodir() is deprecated",
1890 def setcpv(self, mycpv, use_cache=1, mydb=None):
1892 Load a particular CPV into the config, this lets us see the
1893 Default USE flags for a particular ebuild as well as the USE
1894 flags from package.use.
1896 @param mycpv: A cpv to load
1898 @param use_cache: Enables caching
1899 @type use_cache: Boolean
1900 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1901 @type mydb: dbapi or derivative.
1908 if not isinstance(mycpv, basestring):
1913 if self.mycpv == mycpv:
1917 cat, pf = catsplit(mycpv)
1918 cp = dep_getkey(mycpv)
1919 cpv_slot = self.mycpv
1922 env_configdict = self.configdict["env"]
1923 pkg_configdict = self.configdict["pkg"]
1924 previous_iuse = pkg_configdict.get("IUSE")
1925 pkg_configdict["CATEGORY"] = cat
1926 pkg_configdict["PF"] = pf
1928 if not hasattr(mydb, "aux_get"):
1929 pkg_configdict.update(mydb)
1931 aux_keys = [k for k in auxdbkeys \
1932 if not k.startswith("UNUSED_")]
1933 aux_keys.append("repository")
1934 for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
1935 pkg_configdict[k] = v
1936 repository = pkg_configdict.pop("repository", None)
1937 if repository is not None:
1938 pkg_configdict["PORTAGE_REPO_NAME"] = repository
1939 for k in pkg_configdict:
1941 env_configdict.pop(k, None)
1942 slot = pkg_configdict["SLOT"]
1943 iuse = pkg_configdict["IUSE"]
1945 cpv_slot = "%s:%s" % (self.mycpv, slot)
1949 for x in iuse.split():
1950 if x.startswith("+"):
1951 pkginternaluse.append(x[1:])
1952 elif x.startswith("-"):
1953 pkginternaluse.append(x)
1954 pkginternaluse = " ".join(pkginternaluse)
1955 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1956 self.configdict["pkginternal"]["USE"] = pkginternaluse
1960 for i in xrange(len(self.profiles)):
1961 cpdict = self.pkgprofileuse[i].get(cp, None)
1963 keys = cpdict.keys()
1965 bestmatch = best_match_to_list(cpv_slot, keys)
1967 keys.remove(bestmatch)
1968 defaults.insert(pos, cpdict[bestmatch])
1972 if self.make_defaults_use[i]:
1973 defaults.insert(pos, self.make_defaults_use[i])
1975 defaults = " ".join(defaults)
1976 if defaults != self.configdict["defaults"].get("USE",""):
1977 self.configdict["defaults"]["USE"] = defaults
1980 useforce = self._getUseForce(cpv_slot)
1981 if useforce != self.useforce:
1982 self.useforce = useforce
1985 usemask = self._getUseMask(cpv_slot)
1986 if usemask != self.usemask:
1987 self.usemask = usemask
1991 cpdict = self.pusedict.get(cp)
1993 keys = cpdict.keys()
1995 self.pusekey = best_match_to_list(cpv_slot, keys)
1997 keys.remove(self.pusekey)
1998 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2002 if oldpuse != self.puse:
2004 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2005 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2008 self.reset(keeping_pkg=1,use_cache=use_cache)
2010 # If reset() has not been called, it's safe to return
2011 # early if IUSE has not changed.
2012 if not has_changed and previous_iuse == iuse:
2015 # Filter out USE flags that aren't part of IUSE. This has to
2016 # be done for every setcpv() call since practically every
2017 # package has different IUSE.
2018 use = set(self["USE"].split())
2019 iuse_implicit = self._get_implicit_iuse()
2020 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2022 # Escape anything except ".*" which is supposed
2023 # to pass through from _get_implicit_iuse()
2024 regex = sorted(re.escape(x) for x in iuse_implicit)
2025 regex = "^(%s)$" % "|".join(regex)
2026 regex = regex.replace("\\.\\*", ".*")
2027 self.configdict["pkg"]["PORTAGE_IUSE"] = regex
2029 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2030 if ebuild_force_test and \
2031 not hasattr(self, "_ebuild_force_test_msg_shown"):
2032 self._ebuild_force_test_msg_shown = True
2033 writemsg("Forcing test.\n", noiselevel=-1)
2034 if "test" in self.features and "test" in iuse_implicit:
2035 if "test" in self.usemask and not ebuild_force_test:
2036 # "test" is in IUSE and USE=test is masked, so execution
2037 # of src_test() probably is not reliable. Therefore,
2038 # temporarily disable FEATURES=test just for this package.
2039 self["FEATURES"] = " ".join(x for x in self.features \
2044 if ebuild_force_test:
2045 self.usemask.discard("test")
2047 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2048 # that they are consistent. For optimal performance, use slice
2049 # comparison instead of startswith().
2050 use_expand = self.get("USE_EXPAND", "").split()
2051 for var in use_expand:
2052 prefix = var.lower() + "_"
2053 prefix_len = len(prefix)
2054 expand_flags = set([ x[prefix_len:] for x in use \
2055 if x[:prefix_len] == prefix ])
2056 var_split = self.get(var, "").split()
2057 # Preserve the order of var_split because it can matter for things
2059 var_split = [ x for x in var_split if x in expand_flags ]
2060 var_split.extend(expand_flags.difference(var_split))
2061 has_wildcard = "*" in var_split
2063 var_split = [ x for x in var_split if x != "*" ]
2065 for x in iuse_implicit:
2066 if x[:prefix_len] == prefix:
2067 has_iuse.add(x[prefix_len:])
2069 # * means to enable everything in IUSE that's not masked
2071 for x in iuse_implicit:
2072 if x[:prefix_len] == prefix and x not in self.usemask:
2073 suffix = x[prefix_len:]
2074 var_split.append(suffix)
2077 # If there is a wildcard and no matching flags in IUSE then
2078 # LINGUAS should be unset so that all .mo files are
2081 # Make the flags unique and filter them according to IUSE.
2082 # Also, continue to preserve order for things like LINGUAS
2083 # and filter any duplicates that variable may contain.
2084 filtered_var_split = []
2085 remaining = has_iuse.intersection(var_split)
2089 filtered_var_split.append(x)
2090 var_split = filtered_var_split
2093 self[var] = " ".join(var_split)
2095 # Don't export empty USE_EXPAND vars unless the user config
2096 # exports them as empty. This is required for vars such as
2097 # LINGUAS, where unset and empty have different meanings.
2099 # ebuild.sh will see this and unset the variable so
2100 # that things like LINGUAS work properly
2106 # It's not in IUSE, so just allow the variable content
2107 # to pass through if it is defined somewhere. This
2108 # allows packages that support LINGUAS but don't
2109 # declare it in IUSE to use the variable outside of the
2110 # USE_EXPAND context.
2113 # Filtered for the ebuild environment. Store this in a separate
2114 # attribute since we still want to be able to see global USE
2115 # settings for things like emerge --info.
2117 self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted(
2119 x in iuse_implicit))
2121 def _get_implicit_iuse(self):
2123 Some flags are considered to
2124 be implicit members of IUSE:
2125 * Flags derived from ARCH
2126 * Flags derived from USE_EXPAND_HIDDEN variables
2127 * Masked flags, such as those from {,package}use.mask
2128 * Forced flags, such as those from {,package}use.force
2129 * build and bootstrap flags used by bootstrap.sh
2131 iuse_implicit = set()
2132 # Flags derived from ARCH.
2133 arch = self.configdict["defaults"].get("ARCH")
2135 iuse_implicit.add(arch)
2136 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2138 # Flags derived from USE_EXPAND_HIDDEN variables
2139 # such as ELIBC, KERNEL, and USERLAND.
2140 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2141 for x in use_expand_hidden:
2142 iuse_implicit.add(x.lower() + "_.*")
2144 # Flags that have been masked or forced.
2145 iuse_implicit.update(self.usemask)
2146 iuse_implicit.update(self.useforce)
2148 # build and bootstrap flags used by bootstrap.sh
2149 iuse_implicit.add("build")
2150 iuse_implicit.add("bootstrap")
2151 return iuse_implicit
2153 def _getUseMask(self, pkg):
2154 cp = getattr(pkg, "cp", None)
2156 cp = dep_getkey(pkg)
2159 for i in xrange(len(self.profiles)):
2160 cpdict = self.pusemask_list[i].get(cp, None)
2162 keys = cpdict.keys()
2164 best_match = best_match_to_list(pkg, keys)
2166 keys.remove(best_match)
2167 usemask.insert(pos, cpdict[best_match])
2171 if self.usemask_list[i]:
2172 usemask.insert(pos, self.usemask_list[i])
2174 return set(stack_lists(usemask, incremental=True))
2176 def _getUseForce(self, pkg):
2177 cp = getattr(pkg, "cp", None)
2179 cp = dep_getkey(pkg)
2182 for i in xrange(len(self.profiles)):
2183 cpdict = self.puseforce_list[i].get(cp, None)
2185 keys = cpdict.keys()
2187 best_match = best_match_to_list(pkg, keys)
2189 keys.remove(best_match)
2190 useforce.insert(pos, cpdict[best_match])
2194 if self.useforce_list[i]:
2195 useforce.insert(pos, self.useforce_list[i])
2197 return set(stack_lists(useforce, incremental=True))
2199 def _getMaskAtom(self, cpv, metadata):
2201 Take a package and return a matching package.mask atom, or None if no
2202 such atom exists or it has been cancelled by package.unmask. PROVIDE
2203 is not checked, so atoms will not be found for old-style virtuals.
2205 @param cpv: The package name
2207 @param metadata: A dictionary of raw package metadata
2208 @type metadata: dict
2210 @return: An matching atom string or None if one is not found.
2213 cp = cpv_getkey(cpv)
2214 mask_atoms = self.pmaskdict.get(cp)
2216 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2217 unmask_atoms = self.punmaskdict.get(cp)
2218 for x in mask_atoms:
2219 if not match_from_list(x, pkg_list):
2222 for y in unmask_atoms:
2223 if match_from_list(y, pkg_list):
2228 def _getProfileMaskAtom(self, cpv, metadata):
2230 Take a package and return a matching profile atom, or None if no
2231 such atom exists. Note that a profile atom may or may not have a "*"
2232 prefix. PROVIDE is not checked, so atoms will not be found for
2235 @param cpv: The package name
2237 @param metadata: A dictionary of raw package metadata
2238 @type metadata: dict
2240 @return: An matching profile atom string or None if one is not found.
2243 cp = cpv_getkey(cpv)
2244 profile_atoms = self.prevmaskdict.get(cp)
2246 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2247 for x in profile_atoms:
2248 if match_from_list(x.lstrip("*"), pkg_list):
2253 def _getKeywords(self, cpv, metadata):
2254 cp = dep_getkey(cpv)
2255 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2256 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2258 for i in xrange(len(self.profiles)):
2259 cpdict = self._pkeywords_list[i].get(cp, None)
2263 best_match = best_match_to_list(pkg, keys)
2265 keys.remove(best_match)
2266 keywords.insert(pos, cpdict[best_match])
2270 return stack_lists(keywords, incremental=True)
2272 def _getMissingKeywords(self, cpv, metadata):
2274 Take a package and return a list of any KEYWORDS that the user may
2275 may need to accept for the given package. If the KEYWORDS are empty
2276 and the the ** keyword has not been accepted, the returned list will
2277 contain ** alone (in order to distiguish from the case of "none
2280 @param cpv: The package name (for package.keywords support)
2282 @param metadata: A dictionary of raw package metadata
2283 @type metadata: dict
2285 @return: A list of KEYWORDS that have not been accepted.
2288 # Hack: Need to check the env directly here as otherwise stacking
2289 # doesn't work properly as negative values are lost in the config
2290 # object (bug #139600)
2291 egroups = self.configdict["backupenv"].get(
2292 "ACCEPT_KEYWORDS", "").split()
2293 mygroups = self._getKeywords(cpv, metadata)
2294 # Repoman may modify this attribute as necessary.
2295 pgroups = self["ACCEPT_KEYWORDS"].split()
2297 cp = dep_getkey(cpv)
2298 pkgdict = self.pkeywordsdict.get(cp)
2301 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2302 for atom, pkgkeywords in pkgdict.iteritems():
2303 if match_from_list(atom, cpv_slot_list):
2305 pgroups.extend(pkgkeywords)
2306 if matches or egroups:
2307 pgroups.extend(egroups)
2310 if x.startswith("-"):
2314 inc_pgroups.discard(x[1:])
2317 pgroups = inc_pgroups
2322 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2323 writemsg(("--- WARNING: Package '%s' uses" + \
2324 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2331 elif gp.startswith("~"):
2333 elif not gp.startswith("-"):
2336 ((hastesting and "~*" in pgroups) or \
2337 (hasstable and "*" in pgroups) or "**" in pgroups):
2343 # If KEYWORDS is empty then we still have to return something
2344 # in order to distiguish from the case of "none missing".
2345 mygroups.append("**")
2349 def _getMissingLicenses(self, cpv, metadata):
2351 Take a LICENSE string and return a list any licenses that the user may
2352 may need to accept for the given package. The returned list will not
2353 contain any licenses that have already been accepted. This method
2354 can throw an InvalidDependString exception.
2356 @param cpv: The package name (for package.license support)
2358 @param metadata: A dictionary of raw package metadata
2359 @type metadata: dict
2361 @return: A list of licenses that have not been accepted.
2363 if "*" in self._accept_license:
2365 acceptable_licenses = self._accept_license
2366 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2368 acceptable_licenses = self._accept_license.copy()
2369 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2370 for atom in match_to_list(cpv_slot, cpdict.keys()):
2371 acceptable_licenses.update(cpdict[atom])
2373 license_str = metadata["LICENSE"]
2374 if "?" in license_str:
2375 use = metadata["USE"].split()
2379 license_struct = portage.dep.use_reduce(
2380 portage.dep.paren_reduce(license_str), uselist=use)
2381 license_struct = portage.dep.dep_opconvert(license_struct)
2382 return self._getMaskedLicenses(license_struct, acceptable_licenses)
2384 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2385 if not license_struct:
2387 if license_struct[0] == "||":
2389 for element in license_struct[1:]:
2390 if isinstance(element, list):
2392 ret.append(self._getMaskedLicenses(
2393 element, acceptable_licenses))
2397 if element in acceptable_licenses:
2400 # Return all masked licenses, since we don't know which combination
2401 # (if any) the user will decide to unmask.
2405 for element in license_struct:
2406 if isinstance(element, list):
2408 ret.extend(self._getMaskedLicenses(element,
2409 acceptable_licenses))
2411 if element not in acceptable_licenses:
2415 def _accept_chost(self, pkg):
2417 @return True if pkg CHOST is accepted, False otherwise.
2419 if self._accept_chost_re is None:
2420 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2421 if not accept_chost:
2422 chost = self.get("CHOST")
2424 accept_chost.append(chost)
2425 if not accept_chost:
2426 self._accept_chost_re = re.compile(".*")
2427 elif len(accept_chost) == 1:
2429 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2431 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2432 (accept_chost[0], e), noiselevel=-1)
2433 self._accept_chost_re = re.compile("^$")
2436 self._accept_chost_re = re.compile(
2437 r'^(%s)$' % "|".join(accept_chost))
2439 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2440 (" ".join(accept_chost), e), noiselevel=-1)
2441 self._accept_chost_re = re.compile("^$")
2443 return self._accept_chost_re.match(
2444 pkg.metadata.get("CHOST", "")) is not None
2446 def setinst(self,mycpv,mydbapi):
2447 """This updates the preferences for old-style virtuals,
2448 affecting the behavior of dep_expand() and dep_check()
2449 calls. It can change dbapi.match() behavior since that
2450 calls dep_expand(). However, dbapi instances have
2451 internal match caches that are not invalidated when
2452 preferences are updated here. This can potentially
2453 lead to some inconsistency (relevant to bug #1343)."""
2455 if len(self.virtuals) == 0:
2457 # Grab the virtuals this package provides and add them into the tree virtuals.
2458 if not hasattr(mydbapi, "aux_get"):
2459 provides = mydbapi["PROVIDE"]
2461 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2464 if isinstance(mydbapi, portdbapi):
2465 self.setcpv(mycpv, mydb=mydbapi)
2466 myuse = self["PORTAGE_USE"]
2467 elif not hasattr(mydbapi, "aux_get"):
2468 myuse = mydbapi["USE"]
2470 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2471 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2474 cp = dep_getkey(mycpv)
2476 virt = dep_getkey(virt)
2477 providers = self.virtuals.get(virt)
2478 if providers and cp in providers:
2480 providers = self._depgraphVirtuals.get(virt)
2481 if providers is None:
2483 self._depgraphVirtuals[virt] = providers
2484 if cp not in providers:
2485 providers.append(cp)
2489 self.virtuals = self.__getvirtuals_compile()
2492 """Reload things like /etc/profile.env that can change during runtime."""
2493 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2494 self.configdict["env.d"].clear()
2495 env_d = getconfig(env_d_filename, expand=False)
2497 # env_d will be None if profile.env doesn't exist.
2498 self.configdict["env.d"].update(env_d)
2500 def regenerate(self,useonly=0,use_cache=1):
2503 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2504 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2505 variables. This also updates the env.d configdict; useful in case an ebuild
2506 changes the environment.
2508 If FEATURES has already stacked, it is not stacked twice.
2510 @param useonly: Only regenerate USE flags (not any other incrementals)
2511 @type useonly: Boolean
2512 @param use_cache: Enable Caching (only for autouse)
2513 @type use_cache: Boolean
2518 if self.already_in_regenerate:
2519 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2520 writemsg("!!! Looping in regenerate.\n",1)
2523 self.already_in_regenerate = 1
2526 myincrementals=["USE"]
2528 myincrementals = self.incrementals
2529 myincrementals = set(myincrementals)
2530 # If self.features exists, it has already been stacked and may have
2531 # been mutated, so don't stack it again or else any mutations will be
2533 if "FEATURES" in myincrementals and hasattr(self, "features"):
2534 myincrementals.remove("FEATURES")
2536 if "USE" in myincrementals:
2537 # Process USE last because it depends on USE_EXPAND which is also
2539 myincrementals.remove("USE")
2541 for mykey in myincrementals:
2543 mydbs=self.configlist[:-1]
2547 if mykey not in curdb:
2549 #variables are already expanded
2550 mysplit = curdb[mykey].split()
2554 # "-*" is a special "minus" var that means "unset all settings".
2555 # so USE="-* gnome" will have *just* gnome enabled.
2560 # Not legal. People assume too much. Complain.
2561 writemsg(colorize("BAD",
2562 "USE flags should not start with a '+': %s" % x) \
2563 + "\n", noiselevel=-1)
2569 if (x[1:] in myflags):
2571 del myflags[myflags.index(x[1:])]
2574 # We got here, so add it now.
2575 if x not in myflags:
2579 #store setting in last element of configlist, the original environment:
2580 if myflags or mykey in self:
2581 self.configlist[-1][mykey] = " ".join(myflags)
2584 # Do the USE calculation last because it depends on USE_EXPAND.
2585 if "auto" in self["USE_ORDER"].split(":"):
2586 self.configdict["auto"]["USE"] = autouse(
2587 vartree(root=self["ROOT"], categories=self.categories,
2589 use_cache=use_cache, mysettings=self)
2591 self.configdict["auto"]["USE"] = ""
2593 use_expand = self.get("USE_EXPAND", "").split()
2596 for x in self["USE_ORDER"].split(":"):
2597 if x in self.configdict:
2598 self.uvlist.append(self.configdict[x])
2599 self.uvlist.reverse()
2601 # For optimal performance, use slice
2602 # comparison instead of startswith().
2604 for curdb in self.uvlist:
2605 cur_use_expand = [x for x in use_expand if x in curdb]
2606 mysplit = curdb.get("USE", "").split()
2607 if not mysplit and not cur_use_expand:
2615 writemsg(colorize("BAD", "USE flags should not start " + \
2616 "with a '+': %s\n" % x), noiselevel=-1)
2622 myflags.discard(x[1:])
2627 for var in cur_use_expand:
2628 var_lower = var.lower()
2629 is_not_incremental = var not in myincrementals
2630 if is_not_incremental:
2631 prefix = var_lower + "_"
2632 prefix_len = len(prefix)
2633 for x in list(myflags):
2634 if x[:prefix_len] == prefix:
2636 for x in curdb[var].split():
2638 if is_not_incremental:
2639 writemsg(colorize("BAD", "Invalid '+' " + \
2640 "operator in non-incremental variable " + \
2641 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2644 writemsg(colorize("BAD", "Invalid '+' " + \
2645 "operator in incremental variable " + \
2646 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2649 if is_not_incremental:
2650 writemsg(colorize("BAD", "Invalid '-' " + \
2651 "operator in non-incremental variable " + \
2652 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2654 myflags.discard(var_lower + "_" + x[1:])
2656 myflags.add(var_lower + "_" + x)
2658 if not hasattr(self, "features"):
2659 self.features = sorted(set(
2660 self.configlist[-1].get("FEATURES","").split()))
2661 self["FEATURES"] = " ".join(self.features)
2663 myflags.update(self.useforce)
2664 arch = self.configdict["defaults"].get("ARCH")
2668 myflags.difference_update(self.usemask)
2669 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2671 self.already_in_regenerate = 0
2673 def get_virts_p(self, myroot=None):
2676 virts = self.getvirtuals()
2679 vkeysplit = x.split("/")
2680 if vkeysplit[1] not in self.virts_p:
2681 self.virts_p[vkeysplit[1]] = virts[x]
2684 def getvirtuals(self, myroot=None):
2685 """myroot is now ignored because, due to caching, it has always been
2686 broken for all but the first call."""
2687 myroot = self["ROOT"]
2689 return self.virtuals
2692 for x in self.profiles:
2693 virtuals_file = os.path.join(x, "virtuals")
2694 virtuals_dict = grabdict(virtuals_file)
2695 for k in virtuals_dict.keys():
2696 if not isvalidatom(k) or dep_getkey(k) != k:
2697 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2698 (virtuals_file, k), noiselevel=-1)
2699 del virtuals_dict[k]
2701 myvalues = virtuals_dict[k]
2704 if x.startswith("-"):
2705 # allow incrementals
2707 if not isvalidatom(myatom):
2708 writemsg("--- Invalid atom in %s: %s\n" % \
2709 (virtuals_file, x), noiselevel=-1)
2712 del virtuals_dict[k]
2714 virtuals_list.append(virtuals_dict)
2716 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2719 for virt in self.dirVirtuals:
2720 # Preference for virtuals decreases from left to right.
2721 self.dirVirtuals[virt].reverse()
2723 # Repoman does not use user or tree virtuals.
2724 if self.local_config and not self.treeVirtuals:
2725 temp_vartree = vartree(myroot, None,
2726 categories=self.categories, settings=self)
2727 # Reduce the provides into a list by CP.
2728 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2730 self.virtuals = self.__getvirtuals_compile()
2731 return self.virtuals
2733 def __getvirtuals_compile(self):
2734 """Stack installed and profile virtuals. Preference for virtuals
2735 decreases from left to right.
2736 Order of preference:
2737 1. installed and in profile
2742 # Virtuals by profile+tree preferences.
2745 for virt, installed_list in self.treeVirtuals.iteritems():
2746 profile_list = self.dirVirtuals.get(virt, None)
2747 if not profile_list:
2749 for cp in installed_list:
2750 if cp in profile_list:
2751 ptVirtuals.setdefault(virt, [])
2752 ptVirtuals[virt].append(cp)
2754 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2755 self.dirVirtuals, self._depgraphVirtuals])
2758 def __delitem__(self,mykey):
2760 for x in self.lookuplist:
2765 def __getitem__(self,mykey):
2766 for d in self.lookuplist:
2769 return '' # for backward compat, don't raise KeyError
2771 def get(self, k, x=None):
2772 for d in self.lookuplist:
2777 def pop(self, key, *args):
2780 "pop expected at most 2 arguments, got " + \
2781 repr(1 + len(args)))
2783 for d in reversed(self.lookuplist):
2791 def has_key(self,mykey):
2792 warnings.warn("portage.config.has_key() is deprecated, "
2793 "use the in operator instead",
2795 return mykey in self
2797 def __contains__(self, mykey):
2798 """Called to implement membership test operators (in and not in)."""
2799 for d in self.lookuplist:
2804 def setdefault(self, k, x=None):
2817 for d in self.lookuplist:
2824 def iteritems(self):
2829 return list(self.iteritems())
2831 def __setitem__(self,mykey,myvalue):
2832 "set a value; will be thrown away at reset() time"
2833 if not isinstance(myvalue, str):
2834 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2836 self.modifiedkeys += [mykey]
2837 self.configdict["env"][mykey]=myvalue
2840 "return our locally-maintained environment"
2842 environ_filter = self._environ_filter
2844 filter_calling_env = False
2845 temp_dir = self.get("T")
2846 if temp_dir is not None and \
2847 os.path.exists(os.path.join(temp_dir, "environment")):
2848 filter_calling_env = True
2850 environ_whitelist = self._environ_whitelist
2851 env_d = self.configdict["env.d"]
2853 if x in environ_filter:
2856 if not isinstance(myvalue, basestring):
2857 writemsg("!!! Non-string value in config: %s=%s\n" % \
2858 (x, myvalue), noiselevel=-1)
2860 if filter_calling_env and \
2861 x not in environ_whitelist and \
2862 not self._environ_whitelist_re.match(x):
2863 # Do not allow anything to leak into the ebuild
2864 # environment unless it is explicitly whitelisted.
2865 # This ensures that variables unset by the ebuild
2869 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
2870 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2871 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2873 if filter_calling_env:
2874 phase = self.get("EBUILD_PHASE")
2878 whitelist.append("RPMDIR")
2884 # Filtered by IUSE and implicit IUSE.
2885 mydict["USE"] = self.get("PORTAGE_USE", "")
2887 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
2888 # so we have to back it up and restore it.
2889 rootpath = mydict.get("ROOTPATH")
2891 mydict["PORTAGE_ROOTPATH"] = rootpath
2895 def thirdpartymirrors(self):
2896 if getattr(self, "_thirdpartymirrors", None) is None:
2897 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2898 for x in self["PORTDIR_OVERLAY"].split():
2899 profileroots.insert(0, os.path.join(x, "profiles"))
2900 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2901 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2902 return self._thirdpartymirrors
2905 return flatten([[myarch, "~" + myarch] \
2906 for myarch in self["PORTAGE_ARCHLIST"].split()])
2908 def selinux_enabled(self):
2909 if getattr(self, "_selinux_enabled", None) is None:
2910 self._selinux_enabled = 0
2911 if "selinux" in self["USE"].split():
2912 if "selinux" in globals():
2913 if selinux.is_selinux_enabled() == 1:
2914 self._selinux_enabled = 1
2916 self._selinux_enabled = 0
2918 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2920 self._selinux_enabled = 0
2921 if self._selinux_enabled == 0:
2923 del sys.modules["selinux"]
2926 return self._selinux_enabled
2928 def _shell_quote(s):
2930 Quote a string in double-quotes and use backslashes to
2931 escape any backslashes, double-quotes, dollar signs, or
2932 backquotes in the string.
2934 for letter in "\\\"$`":
2936 s = s.replace(letter, "\\" + letter)
2939 # In some cases, openpty can be slow when it fails. Therefore,
2940 # stop trying to use it after the first failure.
2941 _disable_openpty = False
2943 def _create_pty_or_pipe(copy_term_size=None):
2945 Try to create a pty and if then fails then create a normal
2948 @param copy_term_size: If a tty file descriptor is given
2949 then the term size will be copied to the pty.
2950 @type copy_term_size: int
2952 @returns: A tuple of (is_pty, master_fd, slave_fd) where
2953 is_pty is True if a pty was successfully allocated, and
2954 False if a normal pipe was allocated.
2959 global _disable_openpty
2960 if _disable_openpty:
2961 master_fd, slave_fd = os.pipe()
2963 from pty import openpty
2965 master_fd, slave_fd = openpty()
2967 except EnvironmentError, e:
2968 _disable_openpty = True
2969 writemsg("openpty failed: '%s'\n" % str(e),
2972 master_fd, slave_fd = os.pipe()
2975 # Disable post-processing of output since otherwise weird
2976 # things like \n -> \r\n transformations may occur.
2978 mode = termios.tcgetattr(slave_fd)
2979 mode[1] &= ~termios.OPOST
2980 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
2983 copy_term_size is not None and \
2984 os.isatty(copy_term_size):
2985 from portage.output import get_term_size, set_term_size
2986 rows, columns = get_term_size()
2987 set_term_size(rows, columns, slave_fd)
2989 return (got_pty, master_fd, slave_fd)
2991 # XXX This would be to replace getstatusoutput completely.
2992 # XXX Issue: cannot block execution. Deadlock condition.
2993 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
2995 Spawn a subprocess with extra portage-specific options.
2998 Sandbox: Sandbox means the spawned process will be limited in its ability t
2999 read and write files (normally this means it is restricted to ${IMAGE}/)
3000 SElinux Sandbox: Enables sandboxing on SElinux
3001 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3004 Notes: os.system cannot be used because it messes with signal handling. Instead we
3005 use the portage.process spawn* family of functions.
3007 This function waits for the process to terminate.
3009 @param mystring: Command to run
3010 @type mystring: String
3011 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3012 @type mysettings: Dictionary or config instance
3013 @param debug: Ignored
3014 @type debug: Boolean
3015 @param free: Enable sandboxing for this process
3017 @param droppriv: Drop to portage:portage when running this command
3018 @type droppriv: Boolean
3019 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3020 @type sesandbox: Boolean
3021 @param fakeroot: Run this command with faked root privileges
3022 @type fakeroot: Boolean
3023 @param keywords: Extra options encoded as a dict, to be passed to spawn
3024 @type keywords: Dictionary
3027 1. The return code of the spawned process.
3030 if isinstance(mysettings, dict):
3032 keywords["opt_name"]="[ %s ]" % "portage"
3034 check_config_instance(mysettings)
3035 env=mysettings.environ()
3036 if mysettings.mycpv is not None:
3037 keywords["opt_name"] = "[%s]" % mysettings.mycpv
3039 fd_pipes = keywords.get("fd_pipes")
3040 if fd_pipes is None:
3042 0:sys.stdin.fileno(),
3043 1:sys.stdout.fileno(),
3044 2:sys.stderr.fileno(),
3046 # In some cases the above print statements don't flush stdout, so
3047 # it needs to be flushed before allowing a child process to use it
3048 # so that output always shows in the correct order.
3049 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3050 for fd in fd_pipes.itervalues():
3051 if fd in stdout_filenos:
3056 # The default policy for the sesandbox domain only allows entry (via exec)
3057 # from shells and from binaries that belong to portage (the number of entry
3058 # points is minimized). The "tee" binary is not among the allowed entry
3059 # points, so it is spawned outside of the sesandbox domain and reads from a
3060 # pseudo-terminal that connects two domains.
3061 logfile = keywords.get("logfile")
3065 fd_pipes_orig = None
3068 del keywords["logfile"]
3069 if 1 not in fd_pipes or 2 not in fd_pipes:
3070 raise ValueError(fd_pipes)
3072 fd_pipes.setdefault(0, sys.stdin.fileno())
3073 fd_pipes_orig = fd_pipes.copy()
3075 got_pty, master_fd, slave_fd = \
3076 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3078 # We must set non-blocking mode before we close the slave_fd
3079 # since otherwise the fcntl call can fail on FreeBSD (the child
3080 # process might have already exited and closed slave_fd so we
3081 # have to keep it open in order to avoid FreeBSD potentially
3082 # generating an EAGAIN exception).
3084 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3085 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3087 fd_pipes[0] = fd_pipes_orig[0]
3088 fd_pipes[1] = slave_fd
3089 fd_pipes[2] = slave_fd
3090 keywords["fd_pipes"] = fd_pipes
3092 features = mysettings.features
3093 # TODO: Enable fakeroot to be used together with droppriv. The
3094 # fake ownership/permissions will have to be converted to real
3095 # permissions in the merge phase.
3096 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3097 if droppriv and not uid and portage_gid and portage_uid:
3098 keywords.update({"uid":portage_uid,"gid":portage_gid,
3099 "groups":userpriv_groups,"umask":002})
3101 free=((droppriv and "usersandbox" not in features) or \
3102 (not droppriv and "sandbox" not in features and \
3103 "usersandbox" not in features and not fakeroot))
3105 if free or "SANDBOX_ACTIVE" in os.environ:
3106 keywords["opt_name"] += " bash"
3107 spawn_func = portage.process.spawn_bash
3109 keywords["opt_name"] += " fakeroot"
3110 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3111 spawn_func = portage.process.spawn_fakeroot
3113 keywords["opt_name"] += " sandbox"
3114 spawn_func = portage.process.spawn_sandbox
3117 con = selinux.getcontext()
3118 con = con.replace(mysettings["PORTAGE_T"],
3119 mysettings["PORTAGE_SANDBOX_T"])
3120 selinux.setexec(con)
3122 returnpid = keywords.get("returnpid")
3123 keywords["returnpid"] = True
3125 mypids.extend(spawn_func(mystring, env=env, **keywords))
3130 selinux.setexec(None)
3136 log_file = open(logfile, 'a')
3137 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
3138 master_file = os.fdopen(master_fd, 'r')
3139 iwtd = [master_file]
3142 import array, select
3146 events = select.select(iwtd, owtd, ewtd)
3148 # Use non-blocking mode to prevent read
3149 # calls from blocking indefinitely.
3150 buf = array.array('B')
3152 buf.fromfile(f, buffsize)
3158 if f is master_file:
3159 buf.tofile(stdout_file)
3161 buf.tofile(log_file)
3167 retval = os.waitpid(pid, 0)[1]
3168 portage.process.spawned_pids.remove(pid)
3169 if retval != os.EX_OK:
3171 return (retval & 0xff) << 8
3175 _userpriv_spawn_kwargs = (
3176 ("uid", portage_uid),
3177 ("gid", portage_gid),
3178 ("groups", userpriv_groups),
3182 def _spawn_fetch(settings, args, **kwargs):
3184 Spawn a process with appropriate settings for fetching, including
3185 userfetch and selinux support.
3188 global _userpriv_spawn_kwargs
3190 # Redirect all output to stdout since some fetchers like
3191 # wget pollute stderr (if portage detects a problem then it
3192 # can send it's own message to stderr).
3193 if "fd_pipes" not in kwargs:
3195 kwargs["fd_pipes"] = {
3196 0 : sys.stdin.fileno(),
3197 1 : sys.stdout.fileno(),
3198 2 : sys.stdout.fileno(),
3201 if "userfetch" in settings.features and \
3202 os.getuid() == 0 and portage_gid and portage_uid:
3203 kwargs.update(_userpriv_spawn_kwargs)
3207 if settings.selinux_enabled():
3208 con = selinux.getcontext()
3209 con = con.replace(settings["PORTAGE_T"], settings["PORTAGE_FETCH_T"])
3210 selinux.setexec(con)
3211 # bash is an allowed entrypoint, while most binaries are not
3212 if args[0] != BASH_BINARY:
3213 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
3215 rval = portage.process.spawn(args,
3216 env=dict(settings.iteritems()), **kwargs)
3219 if settings.selinux_enabled():
3220 selinux.setexec(None)
3224 _userpriv_test_write_file_cache = {}
3225 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
3226 "rm -f %(file_path)s ; exit $rval"
3228 def _userpriv_test_write_file(settings, file_path):
3230 Drop privileges and try to open a file for writing. The file may or
3231 may not exist, and the parent directory is assumed to exist. The file
3232 is removed before returning.
3234 @param settings: A config instance which is passed to _spawn_fetch()
3235 @param file_path: A file path to open and write.
3236 @return: True if write succeeds, False otherwise.
3239 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
3240 rval = _userpriv_test_write_file_cache.get(file_path)
3241 if rval is not None:
3244 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
3245 {"file_path" : _shell_quote(file_path)}]
3247 returncode = _spawn_fetch(settings, args)
3249 rval = returncode == os.EX_OK
3250 _userpriv_test_write_file_cache[file_path] = rval
3253 def _checksum_failure_temp_file(distdir, basename):
3255 First try to find a duplicate temp file with the same checksum and return
3256 that filename if available. Otherwise, use mkstemp to create a new unique
3257 filename._checksum_failure_.$RANDOM, rename the given file, and return the
3258 new filename. In any case, filename will be renamed or removed before this
3259 function returns a temp filename.
3262 filename = os.path.join(distdir, basename)
3263 size = os.stat(filename).st_size
3265 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
3266 for temp_filename in os.listdir(distdir):
3267 if not tempfile_re.match(temp_filename):
3269 temp_filename = os.path.join(distdir, temp_filename)
3271 if size != os.stat(temp_filename).st_size:
3276 temp_checksum = portage.checksum.perform_md5(temp_filename)
3277 except portage.exception.FileNotFound:
3278 # Apparently the temp file disappeared. Let it go.
3280 if checksum is None:
3281 checksum = portage.checksum.perform_md5(filename)
3282 if checksum == temp_checksum:
3284 return temp_filename
3286 from tempfile import mkstemp
3287 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3289 os.rename(filename, temp_filename)
3290 return temp_filename
3292 def _check_digests(filename, digests, show_errors=1):
3294 Check digests and display a message if an error occurs.
3295 @return True if all digests match, False otherwise.
3297 verified_ok, reason = portage.checksum.verify_all(filename, digests)
3300 writemsg("!!! Previously fetched" + \
3301 " file: '%s'\n" % filename, noiselevel=-1)
3302 writemsg("!!! Reason: %s\n" % reason[0],
3304 writemsg(("!!! Got: %s\n" + \
3305 "!!! Expected: %s\n") % \
3306 (reason[1], reason[2]), noiselevel=-1)
3310 def _check_distfile(filename, digests, eout, show_errors=1):
3312 @return a tuple of (match, stat_obj) where match is True if filename
3313 matches all given digests (if any) and stat_obj is a stat result, or
3314 None if the file does not exist.
3318 size = digests.get("size")
3319 if size is not None and len(digests) == 1:
3323 st = os.stat(filename)
3325 return (False, None)
3326 if size is not None and size != st.st_size:
3329 if size is not None:
3330 eout.ebegin("%s %s ;-)" % (os.path.basename(filename), "size"))
3332 elif st.st_size == 0:
3333 # Zero-byte distfiles are always invalid.
3336 if _check_digests(filename, digests, show_errors=show_errors):
3337 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
3338 " ".join(sorted(digests))))
3344 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3346 _size_suffix_map = {
3358 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3359 "fetch files. Will use digest file if available."
3364 features = mysettings.features
3365 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3367 from portage.data import secpass
3368 userfetch = secpass >= 2 and "userfetch" in features
3369 userpriv = secpass >= 2 and "userpriv" in features
3371 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3372 if "mirror" in restrict or \
3373 "nomirror" in restrict:
3374 if ("mirror" in features) and ("lmirror" not in features):
3375 # lmirror should allow you to bypass mirror restrictions.
3376 # XXX: This is not a good thing, and is temporary at best.
3377 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3380 # Generally, downloading the same file repeatedly from
3381 # every single available mirror is a waste of bandwidth
3382 # and time, so there needs to be a cap.
3383 checksum_failure_max_tries = 5
3384 v = checksum_failure_max_tries
3386 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3387 checksum_failure_max_tries))
3388 except (ValueError, OverflowError):
3389 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3390 " contains non-integer value: '%s'\n" % \
3391 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3392 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3393 "default value: %s\n" % checksum_failure_max_tries,
3395 v = checksum_failure_max_tries
3397 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3398 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3399 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3400 "default value: %s\n" % checksum_failure_max_tries,
3402 v = checksum_failure_max_tries
3403 checksum_failure_max_tries = v
3406 fetch_resume_size_default = "350K"
3407 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3408 if fetch_resume_size is not None:
3409 fetch_resume_size = "".join(fetch_resume_size.split())
3410 if not fetch_resume_size:
3411 # If it's undefined or empty, silently use the default.
3412 fetch_resume_size = fetch_resume_size_default
3413 match = _fetch_resume_size_re.match(fetch_resume_size)
3414 if match is None or \
3415 (match.group(2).upper() not in _size_suffix_map):
3416 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3417 " contains an unrecognized format: '%s'\n" % \
3418 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3419 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3420 "default value: %s\n" % fetch_resume_size_default,
3422 fetch_resume_size = None
3423 if fetch_resume_size is None:
3424 fetch_resume_size = fetch_resume_size_default
3425 match = _fetch_resume_size_re.match(fetch_resume_size)
3426 fetch_resume_size = int(match.group(1)) * \
3427 2 ** _size_suffix_map[match.group(2).upper()]
3429 # Behave like the package has RESTRICT="primaryuri" after a
3430 # couple of checksum failures, to increase the probablility
3431 # of success before checksum_failure_max_tries is reached.
3432 checksum_failure_primaryuri = 2
3433 thirdpartymirrors = mysettings.thirdpartymirrors()
3435 # In the background parallel-fetch process, it's safe to skip checksum
3436 # verification of pre-existing files in $DISTDIR that have the correct
3437 # file size. The parent process will verify their checksums prior to
3440 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3441 if parallel_fetchonly:
3444 check_config_instance(mysettings)
3446 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3447 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3451 if listonly or ("distlocks" not in features):
3455 if "skiprocheck" in features:
3458 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3460 writemsg(colorize("BAD",
3461 "!!! For fetching to a read-only filesystem, " + \
3462 "locking should be turned off.\n"), noiselevel=-1)
3463 writemsg("!!! This can be done by adding -distlocks to " + \
3464 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3467 # local mirrors are always added
3468 if "local" in custommirrors:
3469 mymirrors += custommirrors["local"]
3471 if "nomirror" in restrict or \
3472 "mirror" in restrict:
3473 # We don't add any mirrors.
3477 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3479 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
3480 pkgdir = mysettings.get("O")
3481 if not (pkgdir is None or skip_manifest):
3482 mydigests = Manifest(
3483 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3485 # no digests because fetch was not called for a specific package
3489 ro_distdirs = [x for x in \
3490 shlex.split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
3491 if os.path.isdir(x)]
3494 for x in range(len(mymirrors)-1,-1,-1):
3495 if mymirrors[x] and mymirrors[x][0]=='/':
3496 fsmirrors += [mymirrors[x]]
3499 restrict_fetch = "fetch" in restrict
3500 custom_local_mirrors = custommirrors.get("local", [])
3502 # With fetch restriction, a normal uri may only be fetched from
3503 # custom local mirrors (if available). A mirror:// uri may also
3504 # be fetched from specific mirrors (effectively overriding fetch
3505 # restriction, but only for specific mirrors).
3506 locations = custom_local_mirrors
3508 locations = mymirrors
3510 file_uri_tuples = []
3511 if isinstance(myuris, dict):
3512 for myfile, uri_set in myuris.iteritems():
3513 for myuri in uri_set:
3514 file_uri_tuples.append((myfile, myuri))
3516 for myuri in myuris:
3517 file_uri_tuples.append((os.path.basename(myuri), myuri))
3520 primaryuri_indexes={}
3521 primaryuri_dict = {}
3522 thirdpartymirror_uris = {}
3523 for myfile, myuri in file_uri_tuples:
3524 if myfile not in filedict:
3526 for y in range(0,len(locations)):
3527 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3528 if myuri[:9]=="mirror://":
3529 eidx = myuri.find("/", 9)
3531 mirrorname = myuri[9:eidx]
3532 path = myuri[eidx+1:]
3534 # Try user-defined mirrors first
3535 if mirrorname in custommirrors:
3536 for cmirr in custommirrors[mirrorname]:
3537 filedict[myfile].append(
3538 cmirr.rstrip("/") + "/" + path)
3540 # now try the official mirrors
3541 if mirrorname in thirdpartymirrors:
3542 shuffle(thirdpartymirrors[mirrorname])
3544 uris = [locmirr.rstrip("/") + "/" + path \
3545 for locmirr in thirdpartymirrors[mirrorname]]
3546 filedict[myfile].extend(uris)
3547 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
3549 if not filedict[myfile]:
3550 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3552 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3553 writemsg(" %s\n" % (myuri), noiselevel=-1)
3556 # Only fetch from specific mirrors is allowed.
3558 if "primaryuri" in restrict:
3559 # Use the source site first.
3560 if myfile in primaryuri_indexes:
3561 primaryuri_indexes[myfile] += 1
3563 primaryuri_indexes[myfile] = 0
3564 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3566 filedict[myfile].append(myuri)
3567 primaryuris = primaryuri_dict.get(myfile)
3568 if primaryuris is None:
3570 primaryuri_dict[myfile] = primaryuris
3571 primaryuris.append(myuri)
3573 # Prefer thirdpartymirrors over normal mirrors in cases when
3574 # the file does not yet exist on the normal mirrors.
3575 for myfile, uris in thirdpartymirror_uris.iteritems():
3576 primaryuri_dict.setdefault(myfile, []).extend(uris)
3583 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3584 if not mysettings.get(var_name, None):
3587 if can_fetch and not fetch_to_ro:
3588 global _userpriv_test_write_file_cache
3592 dir_gid = portage_gid
3593 if "FAKED_MODE" in mysettings:
3594 # When inside fakeroot, directories with portage's gid appear
3595 # to have root's gid. Therefore, use root's gid instead of
3596 # portage's gid to avoid spurrious permissions adjustments
3597 # when inside fakeroot.
3600 if "distlocks" in features:
3601 distdir_dirs.append(".locks")
3604 for x in distdir_dirs:
3605 mydir = os.path.join(mysettings["DISTDIR"], x)
3606 write_test_file = os.path.join(
3607 mydir, ".__portage_test_write__")
3614 if st is not None and stat.S_ISDIR(st.st_mode):
3615 if not (userfetch or userpriv):
3617 if _userpriv_test_write_file(mysettings, write_test_file):
3620 _userpriv_test_write_file_cache.pop(write_test_file, None)
3621 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
3623 # The directory has just been created
3624 # and therefore it must be empty.
3626 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3629 raise # bail out on the first error that occurs during recursion
3630 if not apply_recursive_permissions(mydir,
3631 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
3632 filemode=filemode, filemask=modemask, onerror=onerror):
3633 raise portage.exception.OperationNotPermitted(
3634 "Failed to apply recursive permissions for the portage group.")
3635 except portage.exception.PortageException, e:
3636 if not os.path.isdir(mysettings["DISTDIR"]):
3637 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3638 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3639 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3642 not fetch_to_ro and \
3643 not os.access(mysettings["DISTDIR"], os.W_OK):
3644 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3648 if can_fetch and use_locks and locks_in_subdir:
3649 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3650 if not os.access(distlocks_subdir, os.W_OK):
3651 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3654 del distlocks_subdir
3656 distdir_writable = can_fetch and not fetch_to_ro
3657 failed_files = set()
3658 restrict_fetch_msg = False
3660 for myfile in filedict:
3664 1 partially downloaded
3665 2 completely downloaded
3669 orig_digests = mydigests.get(myfile, {})
3670 size = orig_digests.get("size")
3672 # Zero-byte distfiles are always invalid, so discard their digests.
3673 del mydigests[myfile]
3674 orig_digests.clear()
3676 pruned_digests = orig_digests
3677 if parallel_fetchonly:
3679 if size is not None:
3680 pruned_digests["size"] = size
3682 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3686 writemsg_stdout("\n", noiselevel=-1)
3688 # check if there is enough space in DISTDIR to completely store myfile
3689 # overestimate the filesize so we aren't bitten by FS overhead
3690 if size is not None and hasattr(os, "statvfs"):
3691 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3693 mysize = os.stat(myfile_path).st_size
3695 if e.errno != errno.ENOENT:
3699 if (size - mysize + vfs_stat.f_bsize) >= \
3700 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3701 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3704 if distdir_writable and use_locks:
3706 if not parallel_fetchonly and "parallel-fetch" in features:
3707 waiting_msg = ("Fetching '%s' " + \
3708 "in the background. " + \
3709 "To view fetch progress, run `tail -f " + \
3710 "/var/log/emerge-fetch.log` in another " + \
3711 "terminal.") % myfile
3712 msg_prefix = colorize("GOOD", " * ")
3713 from textwrap import wrap
3714 waiting_msg = "\n".join(msg_prefix + line \
3715 for line in wrap(waiting_msg, 65))
3718 lock_file = os.path.join(mysettings["DISTDIR"],
3719 locks_in_subdir, myfile)
3721 lock_file = myfile_path
3725 lock_kwargs["flags"] = os.O_NONBLOCK
3727 lock_kwargs["waiting_msg"] = waiting_msg
3730 file_lock = portage.locks.lockfile(myfile_path,
3731 wantnewlockfile=1, **lock_kwargs)
3732 except portage.exception.TryAgain:
3733 writemsg((">>> File '%s' is already locked by " + \
3734 "another fetcher. Continuing...\n") % myfile,
3740 eout = portage.output.EOutput()
3741 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
3742 match, mystat = _check_distfile(
3743 myfile_path, pruned_digests, eout)
3745 if distdir_writable:
3747 apply_secpass_permissions(myfile_path,
3748 gid=portage_gid, mode=0664, mask=02,
3750 except portage.exception.PortageException, e:
3751 if not os.access(myfile_path, os.R_OK):
3752 writemsg("!!! Failed to adjust permissions:" + \
3753 " %s\n" % str(e), noiselevel=-1)
3757 if distdir_writable and mystat is None:
3758 # Remove broken symlinks if necessary.
3760 os.unlink(myfile_path)
3764 if mystat is not None:
3765 if mystat.st_size == 0:
3766 if distdir_writable:
3768 os.unlink(myfile_path)
3771 elif distdir_writable:
3772 if mystat.st_size < fetch_resume_size and \
3773 mystat.st_size < size:
3774 writemsg((">>> Deleting distfile with size " + \
3775 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3776 "ME_MIN_SIZE)\n") % mystat.st_size)
3778 os.unlink(myfile_path)
3780 if e.errno != errno.ENOENT:
3783 elif mystat.st_size >= size:
3785 _checksum_failure_temp_file(
3786 mysettings["DISTDIR"], myfile)
3787 writemsg_stdout("Refetching... " + \
3788 "File renamed to '%s'\n\n" % \
3789 temp_filename, noiselevel=-1)
3791 if distdir_writable and ro_distdirs:
3792 readonly_file = None
3793 for x in ro_distdirs:
3794 filename = os.path.join(x, myfile)
3795 match, mystat = _check_distfile(
3796 filename, pruned_digests, eout)
3798 readonly_file = filename
3800 if readonly_file is not None:
3802 os.unlink(myfile_path)
3804 if e.errno != errno.ENOENT:
3807 os.symlink(readonly_file, myfile_path)
3810 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3811 for mydir in fsmirrors:
3812 mirror_file = os.path.join(mydir, myfile)
3814 shutil.copyfile(mirror_file, myfile_path)
3815 writemsg(_("Local mirror has file:" + \
3816 " %(file)s\n" % {"file":myfile}))
3818 except (IOError, OSError), e:
3819 if e.errno != errno.ENOENT:
3824 mystat = os.stat(myfile_path)
3826 if e.errno != errno.ENOENT:
3831 apply_secpass_permissions(
3832 myfile_path, gid=portage_gid, mode=0664, mask=02,
3834 except portage.exception.PortageException, e:
3835 if not os.access(myfile_path, os.R_OK):
3836 writemsg("!!! Failed to adjust permissions:" + \
3837 " %s\n" % str(e), noiselevel=-1)
3839 # If the file is empty then it's obviously invalid. Remove
3840 # the empty file and try to download if possible.
3841 if mystat.st_size == 0:
3842 if distdir_writable:
3844 os.unlink(myfile_path)
3845 except EnvironmentError:
3847 elif myfile not in mydigests:
3848 # We don't have a digest, but the file exists. We must
3849 # assume that it is fully downloaded.
3852 if mystat.st_size < mydigests[myfile]["size"] and \
3854 fetched = 1 # Try to resume this download.
3855 elif parallel_fetchonly and \
3856 mystat.st_size == mydigests[myfile]["size"]:
3857 eout = portage.output.EOutput()
3859 mysettings.get("PORTAGE_QUIET") == "1"
3861 "%s size ;-)" % (myfile, ))
3865 verified_ok, reason = portage.checksum.verify_all(
3866 myfile_path, mydigests[myfile])
3868 writemsg("!!! Previously fetched" + \
3869 " file: '%s'\n" % myfile, noiselevel=-1)
3870 writemsg("!!! Reason: %s\n" % reason[0],
3872 writemsg(("!!! Got: %s\n" + \
3873 "!!! Expected: %s\n") % \
3874 (reason[1], reason[2]), noiselevel=-1)
3875 if reason[0] == "Insufficient data for checksum verification":
3877 if distdir_writable:
3879 _checksum_failure_temp_file(
3880 mysettings["DISTDIR"], myfile)
3881 writemsg_stdout("Refetching... " + \
3882 "File renamed to '%s'\n\n" % \
3883 temp_filename, noiselevel=-1)
3885 eout = portage.output.EOutput()
3887 mysettings.get("PORTAGE_QUIET", None) == "1"
3888 digests = mydigests.get(myfile)
3890 digests = digests.keys()
3893 "%s %s ;-)" % (myfile, " ".join(digests)))
3895 continue # fetch any remaining files
3897 # Create a reversed list since that is optimal for list.pop().
3898 uri_list = filedict[myfile][:]
3900 checksum_failure_count = 0
3901 tried_locations = set()
3903 loc = uri_list.pop()
3904 # Eliminate duplicates here in case we've switched to
3905 # "primaryuri" mode on the fly due to a checksum failure.
3906 if loc in tried_locations:
3908 tried_locations.add(loc)
3910 writemsg_stdout(loc+" ", noiselevel=-1)
3912 # allow different fetchcommands per protocol
3913 protocol = loc[0:loc.find("://")]
3914 if "FETCHCOMMAND_" + protocol.upper() in mysettings:
3915 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
3917 fetchcommand=mysettings["FETCHCOMMAND"]
3918 if "RESUMECOMMAND_" + protocol.upper() in mysettings:
3919 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
3921 resumecommand=mysettings["RESUMECOMMAND"]
3926 mysize = os.stat(myfile_path).st_size
3928 if e.errno != errno.ENOENT:
3934 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
3936 elif size is None or size > mysize:
3937 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
3940 writemsg(("!!! File %s is incorrect size, " + \
3941 "but unable to retry.\n") % myfile, noiselevel=-1)
3942 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3943 if not mysettings.get(var_name, None):
3944 writemsg(("!!! %s is unset. It should " + \
3945 "have been defined in /etc/make.globals.\n") \
3946 % var_name, noiselevel=-1)
3951 if fetched != 2 and has_space:
3952 #we either need to resume or start the download
3955 mystat = os.stat(myfile_path)
3957 if e.errno != errno.ENOENT:
3962 if mystat.st_size < fetch_resume_size:
3963 writemsg((">>> Deleting distfile with size " + \
3964 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3965 "ME_MIN_SIZE)\n") % mystat.st_size)
3967 os.unlink(myfile_path)
3969 if e.errno != errno.ENOENT:
3975 writemsg(">>> Resuming download...\n")
3976 locfetch=resumecommand
3979 locfetch=fetchcommand
3980 writemsg_stdout(">>> Downloading '%s'\n" % \
3981 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
3983 "DISTDIR": mysettings["DISTDIR"],
3987 import shlex, StringIO
3988 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
3989 lexer.whitespace_split = True
3990 myfetch = [varexpand(x, mydict=variables) for x in lexer]
3994 myret = _spawn_fetch(mysettings, myfetch)
3998 apply_secpass_permissions(myfile_path,
3999 gid=portage_gid, mode=0664, mask=02)
4000 except portage.exception.FileNotFound, e:
4002 except portage.exception.PortageException, e:
4003 if not os.access(myfile_path, os.R_OK):
4004 writemsg("!!! Failed to adjust permissions:" + \
4005 " %s\n" % str(e), noiselevel=-1)
4007 # If the file is empty then it's obviously invalid. Don't
4008 # trust the return value from the fetcher. Remove the
4009 # empty file and try to download again.
4011 if os.stat(myfile_path).st_size == 0:
4012 os.unlink(myfile_path)
4015 except EnvironmentError:
4018 if mydigests is not None and myfile in mydigests:
4020 mystat = os.stat(myfile_path)
4022 if e.errno != errno.ENOENT:
4027 # no exception? file exists. let digestcheck() report
4028 # an appropriately for size or checksum errors
4030 # If the fetcher reported success and the file is
4031 # too small, it's probably because the digest is
4032 # bad (upstream changed the distfile). In this
4033 # case we don't want to attempt to resume. Show a
4034 # digest verification failure to that the user gets
4035 # a clue about what just happened.
4036 if myret != os.EX_OK and \
4037 mystat.st_size < mydigests[myfile]["size"]:
4038 # Fetch failed... Try the next one... Kill 404 files though.
4039 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4040 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4041 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
4043 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4044 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
4047 except (IOError, OSError):
4052 # File is the correct size--check the checksums for the fetched
4053 # file NOW, for those users who don't have a stable/continuous
4054 # net connection. This way we have a chance to try to download
4055 # from another mirror...
4056 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4059 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
4061 writemsg("!!! Reason: "+reason[0]+"\n",
4063 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
4064 (reason[1], reason[2]), noiselevel=-1)
4065 if reason[0] == "Insufficient data for checksum verification":
4068 _checksum_failure_temp_file(
4069 mysettings["DISTDIR"], myfile)
4070 writemsg_stdout("Refetching... " + \
4071 "File renamed to '%s'\n\n" % \
4072 temp_filename, noiselevel=-1)
4074 checksum_failure_count += 1
4075 if checksum_failure_count == \
4076 checksum_failure_primaryuri:
4077 # Switch to "primaryuri" mode in order
4078 # to increase the probablility of
4081 primaryuri_dict.get(myfile)
4084 reversed(primaryuris))
4085 if checksum_failure_count >= \
4086 checksum_failure_max_tries:
4089 eout = portage.output.EOutput()
4090 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4091 digests = mydigests.get(myfile)
4093 eout.ebegin("%s %s ;-)" % \
4094 (myfile, " ".join(sorted(digests))))
4102 elif mydigests!=None:
4103 writemsg("No digest file available and download failed.\n\n",
4106 if use_locks and file_lock:
4107 portage.locks.unlockfile(file_lock)
4110 writemsg_stdout("\n", noiselevel=-1)
4112 if restrict_fetch and not restrict_fetch_msg:
4113 restrict_fetch_msg = True
4114 msg = ("\n!!! %s/%s" + \
4115 " has fetch restriction turned on.\n" + \
4116 "!!! This probably means that this " + \
4117 "ebuild's files must be downloaded\n" + \
4118 "!!! manually. See the comments in" + \
4119 " the ebuild for more information.\n\n") % \
4120 (mysettings["CATEGORY"], mysettings["PF"])
4121 portage.util.writemsg_level(msg,
4122 level=logging.ERROR, noiselevel=-1)
4123 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
4124 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
4126 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
4127 private_tmpdir = None
4128 if not parallel_fetchonly and not have_builddir:
4129 # When called by digestgen(), it's normal that
4130 # PORTAGE_BUILDDIR doesn't exist. It's helpful
4131 # to show the pkg_nofetch output though, so go
4132 # ahead and create a temporary PORTAGE_BUILDDIR.
4133 # Use a temporary config instance to avoid altering
4134 # the state of the one that's been passed in.
4135 mysettings = config(clone=mysettings)
4136 from tempfile import mkdtemp
4138 private_tmpdir = mkdtemp("", "._portage_fetch_.",
4141 if e.errno != portage.exception.PermissionDenied.errno:
4143 raise portage.exception.PermissionDenied(global_tmpdir)
4144 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
4145 mysettings.backup_changes("PORTAGE_TMPDIR")
4146 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4147 portage.doebuild_environment(mysettings["EBUILD"], "fetch",
4148 mysettings["ROOT"], mysettings, debug, 1, None)
4149 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
4150 have_builddir = True
4152 if not parallel_fetchonly and have_builddir:
4153 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
4154 # ensuring sane $PWD (bug #239560) and storing elog
4155 # messages. Therefore, calling code needs to ensure that
4156 # PORTAGE_BUILDDIR is already clean and locked here.
4158 # All the pkg_nofetch goes to stderr since it's considered
4159 # to be an error message.
4161 0 : sys.stdin.fileno(),
4162 1 : sys.stderr.fileno(),
4163 2 : sys.stderr.fileno(),
4166 ebuild_phase = mysettings.get("EBUILD_PHASE")
4168 mysettings["EBUILD_PHASE"] = "nofetch"
4169 spawn(_shell_quote(EBUILD_SH_BINARY) + \
4170 " nofetch", mysettings, fd_pipes=fd_pipes)
4172 if ebuild_phase is None:
4173 mysettings.pop("EBUILD_PHASE", None)
4175 mysettings["EBUILD_PHASE"] = ebuild_phase
4176 if private_tmpdir is not None:
4177 shutil.rmtree(private_tmpdir)
4179 elif restrict_fetch:
4183 elif not filedict[myfile]:
4184 writemsg("Warning: No mirrors available for file" + \
4185 " '%s'\n" % (myfile), noiselevel=-1)
4187 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
4193 failed_files.add(myfile)
4200 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
4202 Generates a digest file if missing. Assumes all files are available.
4203 DEPRECATED: this now only is a compability wrapper for
4204 portage.manifest.Manifest()
4205 NOTE: manifestonly and overwrite are useless with manifest2 and
4206 are therefore ignored."""
4207 if myportdb is None:
4208 writemsg("Warning: myportdb not specified to digestgen\n")
4211 global _doebuild_manifest_exempt_depend
4213 _doebuild_manifest_exempt_depend += 1
4215 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
4216 for cpv in fetchlist_dict:
4218 for myfile in fetchlist_dict[cpv]:
4219 distfiles_map.setdefault(myfile, []).append(cpv)
4220 except portage.exception.InvalidDependString, e:
4221 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4224 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
4225 manifest1_compat = False
4226 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
4227 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
4228 # Don't require all hashes since that can trigger excessive
4229 # fetches when sufficient digests already exist. To ease transition
4230 # while Manifest 1 is being removed, only require hashes that will
4231 # exist before and after the transition.
4232 required_hash_types = set()
4233 required_hash_types.add("size")
4234 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4235 dist_hashes = mf.fhashdict.get("DIST", {})
4237 # To avoid accidental regeneration of digests with the incorrect
4238 # files (such as partially downloaded files), trigger the fetch
4239 # code if the file exists and it's size doesn't match the current
4240 # manifest entry. If there really is a legitimate reason for the
4241 # digest to change, `ebuild --force digest` can be used to avoid
4242 # triggering this code (or else the old digests can be manually
4243 # removed from the Manifest).
4245 for myfile in distfiles_map:
4246 myhashes = dist_hashes.get(myfile)
4249 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4252 if st is None or st.st_size == 0:
4253 missing_files.append(myfile)
4255 size = myhashes.get("size")
4258 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4260 if e.errno != errno.ENOENT:
4264 missing_files.append(myfile)
4266 if required_hash_types.difference(myhashes):
4267 missing_files.append(myfile)
4270 if st.st_size == 0 or size is not None and size != st.st_size:
4271 missing_files.append(myfile)
4275 mytree = os.path.realpath(os.path.dirname(
4276 os.path.dirname(mysettings["O"])))
4277 fetch_settings = config(clone=mysettings)
4278 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4279 for myfile in missing_files:
4281 for cpv in distfiles_map[myfile]:
4282 myebuild = os.path.join(mysettings["O"],
4283 catsplit(cpv)[1] + ".ebuild")
4284 # for RESTRICT=fetch, mirror, etc...
4285 doebuild_environment(myebuild, "fetch",
4286 mysettings["ROOT"], fetch_settings,
4288 uris.update(myportdb.getFetchMap(
4289 cpv, mytree=mytree)[myfile])
4291 fetch_settings["A"] = myfile # for use by pkg_nofetch()
4294 st = os.stat(os.path.join(
4295 mysettings["DISTDIR"],myfile))
4299 if not fetch({myfile : uris}, fetch_settings):
4300 writemsg(("!!! Fetch failed for %s, can't update " + \
4301 "Manifest\n") % myfile, noiselevel=-1)
4302 if myfile in dist_hashes and \
4303 st is not None and st.st_size > 0:
4304 # stat result is obtained before calling fetch(),
4305 # since fetch may rename the existing file if the
4306 # digest does not match.
4307 writemsg("!!! If you would like to " + \
4308 "forcefully replace the existing " + \
4309 "Manifest entry\n!!! for %s, use the " % \
4310 myfile + "following command:\n" + \
4311 "!!! " + colorize("INFORM",
4312 "ebuild --force %s manifest" % \
4313 os.path.basename(myebuild)) + "\n",
4316 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
4318 mf.create(requiredDistfiles=myarchives,
4319 assumeDistHashesSometimes=True,
4320 assumeDistHashesAlways=(
4321 "assume-digests" in mysettings.features))
4322 except portage.exception.FileNotFound, e:
4323 writemsg(("!!! File %s doesn't exist, can't update " + \
4324 "Manifest\n") % e, noiselevel=-1)
4326 except portage.exception.PortagePackageException, e:
4327 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
4330 mf.write(sign=False)
4331 except portage.exception.PermissionDenied, e:
4332 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
4334 if "assume-digests" not in mysettings.features:
4335 distlist = mf.fhashdict.get("DIST", {}).keys()
4338 for filename in distlist:
4339 if not os.path.exists(
4340 os.path.join(mysettings["DISTDIR"], filename)):
4341 auto_assumed.append(filename)
4343 mytree = os.path.realpath(
4344 os.path.dirname(os.path.dirname(mysettings["O"])))
4345 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
4346 pkgs = myportdb.cp_list(cp, mytree=mytree)
4348 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
4349 str(len(auto_assumed)).rjust(18)) + "\n")
4350 for pkg_key in pkgs:
4351 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
4352 pv = pkg_key.split("/")[1]
4353 for filename in auto_assumed:
4354 if filename in fetchlist:
4356 " %s::%s\n" % (pv, filename))
4359 _doebuild_manifest_exempt_depend -= 1
4361 def digestParseFile(myfilename, mysettings=None):
4362 """(filename) -- Parses a given file for entries matching:
4363 <checksumkey> <checksum_hex_string> <filename> <filesize>
4364 Ignores lines that don't start with a valid checksum identifier
4365 and returns a dict with the filenames as keys and {checksumkey:checksum}
4367 DEPRECATED: this function is now only a compability wrapper for
4368 portage.manifest.Manifest()."""
4370 mysplit = myfilename.split(os.sep)
4371 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
4372 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
4373 elif mysplit[-1] == "Manifest":
4374 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
4376 if mysettings is None:
4378 mysettings = config(clone=settings)
4380 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
4382 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
4383 """Verifies checksums. Assumes all files have been downloaded.
4384 DEPRECATED: this is now only a compability wrapper for
4385 portage.manifest.Manifest()."""
4386 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4388 pkgdir = mysettings["O"]
4389 manifest_path = os.path.join(pkgdir, "Manifest")
4390 if not os.path.exists(manifest_path):
4391 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4397 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4398 manifest_empty = True
4399 for d in mf.fhashdict.itervalues():
4401 manifest_empty = False
4404 writemsg("!!! Manifest is empty: '%s'\n" % manifest_path,
4410 eout = portage.output.EOutput()
4411 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4413 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
4414 eout.ebegin("checking ebuild checksums ;-)")
4415 mf.checkTypeHashes("EBUILD")
4417 eout.ebegin("checking auxfile checksums ;-)")
4418 mf.checkTypeHashes("AUX")
4420 eout.ebegin("checking miscfile checksums ;-)")
4421 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4424 eout.ebegin("checking %s ;-)" % f)
4425 mf.checkFileHashes(mf.findFile(f), f)
4429 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
4431 except portage.exception.FileNotFound, e:
4433 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
4436 except portage.exception.DigestException, e:
4438 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
4439 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4440 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4441 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4442 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4444 # Make sure that all of the ebuilds are actually listed in the Manifest.
4445 for f in os.listdir(pkgdir):
4446 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4447 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4448 os.path.join(pkgdir, f), noiselevel=-1)
4451 """ epatch will just grab all the patches out of a directory, so we have to
4452 make sure there aren't any foreign files that it might grab."""
4453 filesdir = os.path.join(pkgdir, "files")
4454 for parent, dirs, files in os.walk(filesdir):
4456 if d.startswith(".") or d == "CVS":
4459 if f.startswith("."):
4461 f = os.path.join(parent, f)[len(filesdir) + 1:]
4462 file_type = mf.findFile(f)
4463 if file_type != "AUX" and not f.startswith("digest-"):
4464 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4465 os.path.join(filesdir, f), noiselevel=-1)
4470 # parse actionmap to spawn ebuild with the appropriate args
4471 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
4472 logfile=None, fd_pipes=None, returnpid=False):
4473 if not returnpid and \
4474 (alwaysdep or "noauto" not in mysettings.features):
4475 # process dependency first
4476 if "dep" in actionmap[mydo]:
4477 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
4478 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
4479 fd_pipes=fd_pipes, returnpid=returnpid)
4483 eapi = mysettings["EAPI"]
4485 if mydo == "configure" and eapi in ("0", "1", "2_pre1"):
4488 if mydo == "prepare" and eapi in ("0", "1", "2_pre1", "2_pre2"):
4491 kwargs = actionmap[mydo]["args"]
4492 mysettings["EBUILD_PHASE"] = mydo
4493 _doebuild_exit_status_unlink(
4494 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4497 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
4498 mysettings, debug=debug, logfile=logfile,
4499 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
4501 mysettings["EBUILD_PHASE"] = ""
4505 msg = _doebuild_exit_status_check(mydo, mysettings)
4508 from textwrap import wrap
4509 from portage.elog.messages import eerror
4510 for l in wrap(msg, 72):
4511 eerror(l, phase=mydo, key=mysettings.mycpv)
4513 _post_phase_userpriv_perms(mysettings)
4514 if mydo == "install":
4515 _check_build_log(mysettings)
4516 if phase_retval == os.EX_OK:
4517 phase_retval = _post_src_install_checks(mysettings)
4519 if mydo == "test" and phase_retval != os.EX_OK and \
4520 "test-fail-continue" in mysettings.features:
4521 phase_retval = os.EX_OK
4525 _post_phase_cmds = {
4529 "install_symlink_html_docs"],
4534 "preinst_selinux_labels",
4535 "preinst_suid_scan",
4539 "postinst_bsdflags"]
4542 def _post_phase_userpriv_perms(mysettings):
4543 if "userpriv" in mysettings.features and secpass >= 2:
4544 """ Privileged phases may have left files that need to be made
4545 writable to a less privileged user."""
4546 apply_recursive_permissions(mysettings["T"],
4547 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4548 filemode=060, filemask=0)
4550 def _post_src_install_checks(mysettings):
4551 _post_src_install_uid_fix(mysettings)
4552 global _post_phase_cmds
4553 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
4554 if retval != os.EX_OK:
4555 writemsg("!!! install_qa_check failed; exiting.\n",
4559 def _check_build_log(mysettings, out=None):
4561 Search the content of $PORTAGE_LOG_FILE if it exists
4562 and generate the following QA Notices when appropriate:
4564 * Automake "maintainer mode"
4566 * Unrecognized configure options
4568 logfile = mysettings.get("PORTAGE_LOG_FILE")
4572 f = open(logfile, 'rb')
4573 except EnvironmentError:
4576 am_maintainer_mode = []
4577 bash_command_not_found = []
4578 bash_command_not_found_re = re.compile(
4579 r'(.*): line (\d*): (.*): command not found$')
4580 command_not_found_exclude_re = re.compile(r'/configure: line ')
4581 helper_missing_file = []
4582 helper_missing_file_re = re.compile(
4583 r'^!!! (do|new).*: .* does not exist$')
4585 configure_opts_warn = []
4586 configure_opts_warn_re = re.compile(
4587 r'^configure: WARNING: [Uu]nrecognized options: ')
4588 am_maintainer_mode_re = re.compile(r'/missing --run ')
4589 am_maintainer_mode_exclude_re = \
4590 re.compile(r'/missing --run (autoheader|makeinfo)')
4592 make_jobserver_re = \
4593 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
4598 if am_maintainer_mode_re.search(line) is not None and \
4599 am_maintainer_mode_exclude_re.search(line) is None:
4600 am_maintainer_mode.append(line.rstrip("\n"))
4602 if bash_command_not_found_re.match(line) is not None and \
4603 command_not_found_exclude_re.search(line) is None:
4604 bash_command_not_found.append(line.rstrip("\n"))
4606 if helper_missing_file_re.match(line) is not None:
4607 helper_missing_file.append(line.rstrip("\n"))
4609 if configure_opts_warn_re.match(line) is not None:
4610 configure_opts_warn.append(line.rstrip("\n"))
4612 if make_jobserver_re.match(line) is not None:
4613 make_jobserver.append(line.rstrip("\n"))
4618 from portage.elog.messages import eqawarn
4619 def _eqawarn(lines):
4621 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
4622 from textwrap import wrap
4625 if am_maintainer_mode:
4626 msg = ["QA Notice: Automake \"maintainer mode\" detected:"]
4628 msg.extend("\t" + line for line in am_maintainer_mode)
4631 "If you patch Makefile.am, " + \
4632 "configure.in, or configure.ac then you " + \
4633 "should use autotools.eclass and " + \
4634 "eautomake or eautoreconf. Exceptions " + \
4635 "are limited to system packages " + \
4636 "for which it is impossible to run " + \
4637 "autotools during stage building. " + \
4638 "See http://www.gentoo.org/p" + \
4639 "roj/en/qa/autofailure.xml for more information.",
4643 if bash_command_not_found:
4644 msg = ["QA Notice: command not found:"]
4646 msg.extend("\t" + line for line in bash_command_not_found)
4649 if helper_missing_file:
4650 msg = ["QA Notice: file does not exist:"]
4652 msg.extend("\t" + line[4:] for line in helper_missing_file)
4655 if configure_opts_warn:
4656 msg = ["QA Notice: Unrecognized configure options:"]
4658 msg.extend("\t" + line for line in configure_opts_warn)
4662 msg = ["QA Notice: make jobserver unavailable:"]
4664 msg.extend("\t" + line for line in make_jobserver)
4667 def _post_src_install_uid_fix(mysettings):
4669 Files in $D with user and group bits that match the "portage"
4670 user or group are automatically mapped to PORTAGE_INST_UID and
4671 PORTAGE_INST_GID if necessary. The chown system call may clear
4672 S_ISUID and S_ISGID bits, so those bits are restored if
4675 inst_uid = int(mysettings["PORTAGE_INST_UID"])
4676 inst_gid = int(mysettings["PORTAGE_INST_GID"])
4677 for parent, dirs, files in os.walk(mysettings["D"]):
4678 for fname in chain(dirs, files):
4679 fpath = os.path.join(parent, fname)
4680 mystat = os.lstat(fpath)
4681 if mystat.st_uid != portage_uid and \
4682 mystat.st_gid != portage_gid:
4686 if mystat.st_uid == portage_uid:
4688 if mystat.st_gid == portage_gid:
4690 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
4691 mode=mystat.st_mode, stat_cached=mystat,
4694 def _post_pkg_preinst_cmd(mysettings):
4696 Post phase logic and tasks that have been factored out of
4697 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4698 can be used to wipe out any gmon.out files created during
4699 previous functions (in case any tools were built with -pg
4703 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4704 misc_sh_binary = os.path.join(portage_bin_path,
4705 os.path.basename(MISC_SH_BINARY))
4707 mysettings["EBUILD_PHASE"] = ""
4708 global _post_phase_cmds
4709 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
4713 def _post_pkg_postinst_cmd(mysettings):
4715 Post phase logic and tasks that have been factored out of
4719 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4720 misc_sh_binary = os.path.join(portage_bin_path,
4721 os.path.basename(MISC_SH_BINARY))
4723 mysettings["EBUILD_PHASE"] = ""
4724 global _post_phase_cmds
4725 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
4729 def _spawn_misc_sh(mysettings, commands, **kwargs):
4731 @param mysettings: the ebuild config
4732 @type mysettings: config
4733 @param commands: a list of function names to call in misc-functions.sh
4734 @type commands: list
4736 @returns: the return value from the spawn() call
4739 # Note: PORTAGE_BIN_PATH may differ from the global
4740 # constant when portage is reinstalling itself.
4741 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4742 misc_sh_binary = os.path.join(portage_bin_path,
4743 os.path.basename(MISC_SH_BINARY))
4744 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
4745 _doebuild_exit_status_unlink(
4746 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4747 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4748 logfile = mysettings.get("PORTAGE_LOG_FILE")
4749 mydo = mysettings["EBUILD_PHASE"]
4751 rval = spawn(mycommand, mysettings, debug=debug,
4752 logfile=logfile, **kwargs)
4755 msg = _doebuild_exit_status_check(mydo, mysettings)
4758 from textwrap import wrap
4759 from portage.elog.messages import eerror
4760 for l in wrap(msg, 72):
4761 eerror(l, phase=mydo, key=mysettings.mycpv)
4764 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
4766 def _eapi_is_deprecated(eapi):
4767 return eapi in _deprecated_eapis
4769 def eapi_is_supported(eapi):
4770 eapi = str(eapi).strip()
4772 if _eapi_is_deprecated(eapi):
4781 return eapi <= portage.const.EAPI
4783 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
4785 ebuild_path = os.path.abspath(myebuild)
4786 pkg_dir = os.path.dirname(ebuild_path)
4788 if "CATEGORY" in mysettings.configdict["pkg"]:
4789 cat = mysettings.configdict["pkg"]["CATEGORY"]
4791 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
4792 mypv = os.path.basename(ebuild_path)[:-7]
4793 mycpv = cat+"/"+mypv
4794 mysplit=pkgsplit(mypv,silent=0)
4796 raise portage.exception.IncorrectParameter(
4797 "Invalid ebuild path: '%s'" % myebuild)
4799 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
4800 # so that the caller can override it.
4801 tmpdir = mysettings["PORTAGE_TMPDIR"]
4803 if mydo != "depend" and mycpv != mysettings.mycpv:
4804 """For performance reasons, setcpv only triggers reset when it
4805 detects a package-specific change in config. For the ebuild
4806 environment, a reset call is forced in order to ensure that the
4807 latest env.d variables are used."""
4809 mysettings.reset(use_cache=use_cache)
4810 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
4812 # config.reset() might have reverted a change made by the caller,
4813 # so restore it to it's original value.
4814 mysettings["PORTAGE_TMPDIR"] = tmpdir
4816 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
4817 mysettings["EBUILD_PHASE"] = mydo
4819 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
4821 # We are disabling user-specific bashrc files.
4822 mysettings["BASH_ENV"] = INVALID_ENV_FILE
4824 if debug: # Otherwise it overrides emerge's settings.
4825 # We have no other way to set debug... debug can't be passed in
4826 # due to how it's coded... Don't overwrite this so we can use it.
4827 mysettings["PORTAGE_DEBUG"] = "1"
4829 mysettings["ROOT"] = myroot
4830 mysettings["STARTDIR"] = getcwd()
4831 mysettings["EBUILD"] = ebuild_path
4832 mysettings["O"] = pkg_dir
4833 mysettings.configdict["pkg"]["CATEGORY"] = cat
4834 mysettings["FILESDIR"] = pkg_dir+"/files"
4835 mysettings["PF"] = mypv
4837 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
4838 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
4839 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
4841 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
4842 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
4844 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
4845 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
4846 mysettings["PN"] = mysplit[0]
4847 mysettings["PV"] = mysplit[1]
4848 mysettings["PR"] = mysplit[2]
4850 if portage.util.noiselimit < 0:
4851 mysettings["PORTAGE_QUIET"] = "1"
4853 if mydo != "depend":
4854 # Metadata vars such as EAPI and RESTRICT are
4855 # set by the above config.setcpv() call.
4856 eapi = mysettings["EAPI"]
4857 if not eapi_is_supported(eapi):
4858 # can't do anything with this.
4859 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
4861 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
4862 portage.dep.use_reduce(portage.dep.paren_reduce(
4863 mysettings["RESTRICT"]),
4864 uselist=mysettings["PORTAGE_USE"].split())))
4865 except portage.exception.InvalidDependString:
4866 # RESTRICT is validated again inside doebuild, so let this go
4867 mysettings["PORTAGE_RESTRICT"] = ""
4869 if mysplit[2] == "r0":
4870 mysettings["PVR"]=mysplit[1]
4872 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
4874 if "PATH" in mysettings:
4875 mysplit=mysettings["PATH"].split(":")
4878 # Note: PORTAGE_BIN_PATH may differ from the global constant
4879 # when portage is reinstalling itself.
4880 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4881 if portage_bin_path not in mysplit:
4882 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
4884 # Sandbox needs cannonical paths.
4885 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
4886 mysettings["PORTAGE_TMPDIR"])
4887 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
4888 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
4890 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
4891 # locations in order to prevent interference.
4892 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
4893 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4894 mysettings["PKG_TMPDIR"],
4895 mysettings["CATEGORY"], mysettings["PF"])
4897 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4898 mysettings["BUILD_PREFIX"],
4899 mysettings["CATEGORY"], mysettings["PF"])
4901 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
4902 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
4903 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
4904 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
4906 mysettings["PORTAGE_BASHRC"] = os.path.join(
4907 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
4908 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
4909 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
4911 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
4912 if mydo != "depend" and "KV" not in mysettings:
4913 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
4915 # Regular source tree
4916 mysettings["KV"]=mykv
4919 mysettings.backup_changes("KV")
4921 # Allow color.map to control colors associated with einfo, ewarn, etc...
4923 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
4924 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
4925 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
4927 def prepare_build_dirs(myroot, mysettings, cleanup):
4929 clean_dirs = [mysettings["HOME"]]
4931 # We enable cleanup when we want to make sure old cruft (such as the old
4932 # environment) doesn't interfere with the current phase.
4934 clean_dirs.append(mysettings["T"])
4936 for clean_dir in clean_dirs:
4938 shutil.rmtree(clean_dir)
4940 if errno.ENOENT == oe.errno:
4942 elif errno.EPERM == oe.errno:
4943 writemsg("%s\n" % oe, noiselevel=-1)
4944 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
4945 clean_dir, noiselevel=-1)
4950 def makedirs(dir_path):
4952 os.makedirs(dir_path)
4954 if errno.EEXIST == oe.errno:
4956 elif errno.EPERM == oe.errno:
4957 writemsg("%s\n" % oe, noiselevel=-1)
4958 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
4959 dir_path, noiselevel=-1)
4965 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
4967 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
4968 mydirs.append(os.path.dirname(mydirs[-1]))
4971 for mydir in mydirs:
4972 portage.util.ensure_dirs(mydir)
4973 portage.util.apply_secpass_permissions(mydir,
4974 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
4975 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
4976 """These directories don't necessarily need to be group writable.
4977 However, the setup phase is commonly run as a privileged user prior
4978 to the other phases being run by an unprivileged user. Currently,
4979 we use the portage group to ensure that the unprivleged user still
4980 has write access to these directories in any case."""
4981 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
4982 portage.util.apply_secpass_permissions(mysettings[dir_key],
4983 uid=portage_uid, gid=portage_gid)
4984 except portage.exception.PermissionDenied, e:
4985 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
4987 except portage.exception.OperationNotPermitted, e:
4988 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
4990 except portage.exception.FileNotFound, e:
4991 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
4994 _prepare_workdir(mysettings)
4995 _prepare_features_dirs(mysettings)
4997 def _adjust_perms_msg(settings, msg):
5000 writemsg(msg, noiselevel=-1)
5002 background = settings.get("PORTAGE_BACKGROUND") == "1"
5003 log_path = settings.get("PORTAGE_LOG_FILE")
5006 if background and log_path is not None:
5008 log_file = open(log_path, 'a')
5020 if log_file is not None:
5023 def _prepare_features_dirs(mysettings):
5027 "basedir_var":"CCACHE_DIR",
5028 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
5029 "always_recurse":False},
5031 "basedir_var":"DISTCC_DIR",
5032 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
5033 "subdirs":("lock", "state"),
5034 "always_recurse":True}
5039 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
5040 from portage.data import secpass
5041 droppriv = secpass >= 2 and \
5042 "userpriv" in mysettings.features and \
5043 "userpriv" not in restrict
5044 for myfeature, kwargs in features_dirs.iteritems():
5045 if myfeature in mysettings.features:
5046 basedir = mysettings[kwargs["basedir_var"]]
5048 basedir = kwargs["default_dir"]
5049 mysettings[kwargs["basedir_var"]] = basedir
5051 mydirs = [mysettings[kwargs["basedir_var"]]]
5052 if "subdirs" in kwargs:
5053 for subdir in kwargs["subdirs"]:
5054 mydirs.append(os.path.join(basedir, subdir))
5055 for mydir in mydirs:
5056 modified = portage.util.ensure_dirs(mydir)
5057 # Generally, we only want to apply permissions for
5058 # initial creation. Otherwise, we don't know exactly what
5059 # permissions the user wants, so should leave them as-is.
5060 droppriv_fix = False
5063 if st.st_gid != portage_gid or \
5064 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
5066 if not droppriv_fix:
5067 # Check permissions of files in the directory.
5068 for filename in os.listdir(mydir):
5070 subdir_st = os.lstat(
5071 os.path.join(mydir, filename))
5074 if subdir_st.st_gid != portage_gid or \
5075 ((stat.S_ISDIR(subdir_st.st_mode) and \
5076 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
5081 _adjust_perms_msg(mysettings,
5082 colorize("WARN", " * ") + \
5083 "Adjusting permissions " + \
5084 "for FEATURES=userpriv: '%s'\n" % mydir)
5086 _adjust_perms_msg(mysettings,
5087 colorize("WARN", " * ") + \
5088 "Adjusting permissions " + \
5089 "for FEATURES=%s: '%s'\n" % (myfeature, mydir))
5091 if modified or kwargs["always_recurse"] or droppriv_fix:
5093 raise # The feature is disabled if a single error
5094 # occurs during permissions adjustment.
5095 if not apply_recursive_permissions(mydir,
5096 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5097 filemode=filemode, filemask=modemask, onerror=onerror):
5098 raise portage.exception.OperationNotPermitted(
5099 "Failed to apply recursive permissions for the portage group.")
5100 except portage.exception.PortageException, e:
5101 mysettings.features.remove(myfeature)
5102 mysettings["FEATURES"] = " ".join(mysettings.features)
5103 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5104 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
5105 (kwargs["basedir_var"], basedir), noiselevel=-1)
5106 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
5110 def _prepare_workdir(mysettings):
5113 mode = mysettings["PORTAGE_WORKDIR_MODE"]
5115 parsed_mode = int(mode, 8)
5120 if parsed_mode & 07777 != parsed_mode:
5121 raise ValueError("Invalid file mode: %s" % mode)
5123 workdir_mode = parsed_mode
5125 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
5126 except ValueError, e:
5128 writemsg("%s\n" % e)
5129 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
5130 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
5131 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
5133 apply_secpass_permissions(mysettings["WORKDIR"],
5134 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
5135 except portage.exception.FileNotFound:
5136 pass # ebuild.sh will create it
5138 if mysettings.get("PORT_LOGDIR", "") == "":
5139 while "PORT_LOGDIR" in mysettings:
5140 del mysettings["PORT_LOGDIR"]
5141 if "PORT_LOGDIR" in mysettings:
5143 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
5145 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
5146 uid=portage_uid, gid=portage_gid, mode=02770)
5147 except portage.exception.PortageException, e:
5148 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5149 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
5150 mysettings["PORT_LOGDIR"], noiselevel=-1)
5151 writemsg("!!! Disabling logging.\n", noiselevel=-1)
5152 while "PORT_LOGDIR" in mysettings:
5153 del mysettings["PORT_LOGDIR"]
5154 if "PORT_LOGDIR" in mysettings and \
5155 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
5156 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
5157 if not os.path.exists(logid_path):
5158 f = open(logid_path, "w")
5161 logid_time = time.strftime("%Y%m%d-%H%M%S",
5162 time.gmtime(os.stat(logid_path).st_mtime))
5163 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5164 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
5165 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
5166 del logid_path, logid_time
5168 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
5169 # enabled since it is possible that local SELinux security policies
5170 # do not allow ouput to be piped out of the sesandbox domain.
5171 if not (mysettings.selinux_enabled() and \
5172 "sesandbox" in mysettings.features):
5173 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5174 mysettings["T"], "build.log")
5176 def _doebuild_exit_status_check(mydo, settings):
5178 Returns an error string if the shell appeared
5179 to exit unsuccessfully, None otherwise.
5181 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
5182 if not exit_status_file or \
5183 os.path.exists(exit_status_file):
5185 msg = ("The ebuild phase '%s' has exited " % mydo) + \
5186 "unexpectedly. This type of behavior " + \
5187 "is known to be triggered " + \
5188 "by things such as failed variable " + \
5189 "assignments (bug #190128) or bad substitution " + \
5190 "errors (bug #200313). Normally, before exiting, bash should " + \
5191 "have displayed an error message above. If bash did not " + \
5192 "produce an error message above, it's possible " + \
5193 "that the ebuild has called `exit` when it " + \
5194 "should have called `die` instead. This behavior may also " + \
5195 "be triggered by a corrupt bash binary or a hardware " + \
5196 "problem such as memory or cpu malfunction. If the problem is not " + \
5197 "reproducible or it appears to occur randomly, then it is likely " + \
5198 "to be triggered by a hardware problem. " + \
5199 "If you suspect a hardware problem then you should " + \
5200 "try some basic hardware diagnostics such as memtest. " + \
5201 "Please do not report this as a bug unless it is consistently " + \
5202 "reproducible and you are sure that your bash binary and hardware " + \
5203 "are functioning properly."
5206 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
5207 if retval != os.EX_OK:
5209 msg = _doebuild_exit_status_check(mydo, settings)
5212 from textwrap import wrap
5213 from portage.elog.messages import eerror
5214 for l in wrap(msg, 72):
5215 eerror(l, phase=mydo, key=settings.mycpv)
5218 def _doebuild_exit_status_unlink(exit_status_file):
5220 Double check to make sure it really doesn't exist
5221 and raise an OSError if it still does (it shouldn't).
5222 OSError if necessary.
5224 if not exit_status_file:
5227 os.unlink(exit_status_file)
5230 if os.path.exists(exit_status_file):
5231 os.unlink(exit_status_file)
5233 _doebuild_manifest_exempt_depend = 0
5234 _doebuild_manifest_cache = None
5235 _doebuild_broken_ebuilds = set()
5236 _doebuild_broken_manifests = set()
5238 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
5239 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
5240 mydbapi=None, vartree=None, prev_mtimes=None,
5241 fd_pipes=None, returnpid=False):
5244 Wrapper function that invokes specific ebuild phases through the spawning
5247 @param myebuild: name of the ebuild to invoke the phase on (CPV)
5248 @type myebuild: String
5249 @param mydo: Phase to run
5251 @param myroot: $ROOT (usually '/', see man make.conf)
5252 @type myroot: String
5253 @param mysettings: Portage Configuration
5254 @type mysettings: instance of portage.config
5255 @param debug: Turns on various debug information (eg, debug for spawn)
5256 @type debug: Boolean
5257 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
5258 @type listonly: Boolean
5259 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
5260 @type fetchonly: Boolean
5261 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
5262 @type cleanup: Boolean
5263 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
5264 @type dbkey: Dict or String
5265 @param use_cache: Enables the cache
5266 @type use_cache: Boolean
5267 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
5268 @type fetchall: Boolean
5269 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
5271 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
5272 @type mydbapi: portdbapi instance
5273 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
5274 @type vartree: vartree instance
5275 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
5276 @type prev_mtimes: dictionary
5282 Most errors have an accompanying error message.
5284 listonly and fetchonly are only really necessary for operations involving 'fetch'
5285 prev_mtimes are only necessary for merge operations.
5286 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
5291 writemsg("Warning: tree not specified to doebuild\n")
5295 # chunked out deps for each phase, so that ebuild binary can use it
5296 # to collapse targets down.
5299 "unpack": ["setup"],
5300 "prepare": ["unpack"],
5301 "configure": ["prepare"],
5302 "compile":["configure"],
5303 "test": ["compile"],
5306 "package":["install"],
5310 mydbapi = db[myroot][tree].dbapi
5312 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
5313 vartree = db[myroot]["vartree"]
5315 features = mysettings.features
5316 noauto = "noauto" in features
5317 from portage.data import secpass
5319 clean_phases = ("clean", "cleanrm")
5320 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
5321 "config", "info", "setup", "depend",
5322 "fetch", "fetchall", "digest",
5323 "unpack", "prepare", "configure", "compile", "test",
5324 "install", "rpm", "qmerge", "merge",
5325 "package","unmerge", "manifest"]
5327 if mydo not in validcommands:
5328 validcommands.sort()
5329 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
5331 for vcount in range(len(validcommands)):
5333 writemsg("\n!!! ", noiselevel=-1)
5334 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
5335 writemsg("\n", noiselevel=-1)
5338 if mydo == "fetchall":
5342 parallel_fetchonly = mydo in ("fetch", "fetchall") and \
5343 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
5345 if mydo not in clean_phases and not os.path.exists(myebuild):
5346 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
5350 global _doebuild_manifest_exempt_depend
5352 if "strict" in features and \
5353 "digest" not in features and \
5354 tree == "porttree" and \
5355 mydo not in ("digest", "manifest", "help") and \
5356 not _doebuild_manifest_exempt_depend:
5357 # Always verify the ebuild checksums before executing it.
5358 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
5359 _doebuild_broken_ebuilds
5361 if myebuild in _doebuild_broken_ebuilds:
5364 pkgdir = os.path.dirname(myebuild)
5365 manifest_path = os.path.join(pkgdir, "Manifest")
5367 # Avoid checking the same Manifest several times in a row during a
5368 # regen with an empty cache.
5369 if _doebuild_manifest_cache is None or \
5370 _doebuild_manifest_cache.getFullname() != manifest_path:
5371 _doebuild_manifest_cache = None
5372 if not os.path.exists(manifest_path):
5373 out = portage.output.EOutput()
5374 out.eerror("Manifest not found for '%s'" % (myebuild,))
5375 _doebuild_broken_ebuilds.add(myebuild)
5377 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5380 mf = _doebuild_manifest_cache
5383 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
5385 out = portage.output.EOutput()
5386 out.eerror("Missing digest for '%s'" % (myebuild,))
5387 _doebuild_broken_ebuilds.add(myebuild)
5389 except portage.exception.FileNotFound:
5390 out = portage.output.EOutput()
5391 out.eerror("A file listed in the Manifest " + \
5392 "could not be found: '%s'" % (myebuild,))
5393 _doebuild_broken_ebuilds.add(myebuild)
5395 except portage.exception.DigestException, e:
5396 out = portage.output.EOutput()
5397 out.eerror("Digest verification failed:")
5398 out.eerror("%s" % e.value[0])
5399 out.eerror("Reason: %s" % e.value[1])
5400 out.eerror("Got: %s" % e.value[2])
5401 out.eerror("Expected: %s" % e.value[3])
5402 _doebuild_broken_ebuilds.add(myebuild)
5405 if mf.getFullname() in _doebuild_broken_manifests:
5408 if mf is not _doebuild_manifest_cache:
5410 # Make sure that all of the ebuilds are
5411 # actually listed in the Manifest.
5412 for f in os.listdir(pkgdir):
5413 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
5414 f = os.path.join(pkgdir, f)
5415 if f not in _doebuild_broken_ebuilds:
5416 out = portage.output.EOutput()
5417 out.eerror("A file is not listed in the " + \
5418 "Manifest: '%s'" % (f,))
5419 _doebuild_broken_manifests.add(manifest_path)
5422 # Only cache it if the above stray files test succeeds.
5423 _doebuild_manifest_cache = mf
5425 def exit_status_check(retval):
5426 if retval != os.EX_OK:
5428 msg = _doebuild_exit_status_check(mydo, mysettings)
5431 from textwrap import wrap
5432 from portage.elog.messages import eerror
5433 for l in wrap(msg, 72):
5434 eerror(l, phase=mydo, key=mysettings.mycpv)
5437 # Note: PORTAGE_BIN_PATH may differ from the global
5438 # constant when portage is reinstalling itself.
5439 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5440 ebuild_sh_binary = os.path.join(portage_bin_path,
5441 os.path.basename(EBUILD_SH_BINARY))
5442 misc_sh_binary = os.path.join(portage_bin_path,
5443 os.path.basename(MISC_SH_BINARY))
5446 builddir_lock = None
5451 if mydo in ("digest", "manifest", "help"):
5452 # Temporarily exempt the depend phase from manifest checks, in case
5453 # aux_get calls trigger cache generation.
5454 _doebuild_manifest_exempt_depend += 1
5456 # If we don't need much space and we don't need a constant location,
5457 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
5458 # so that there's no need for locking and it can be used even if the
5459 # user isn't in the portage group.
5460 if mydo in ("info",):
5461 from tempfile import mkdtemp
5463 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
5464 mysettings["PORTAGE_TMPDIR"] = tmpdir
5466 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
5469 if mydo in clean_phases:
5470 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
5471 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
5472 logfile=None, returnpid=returnpid)
5475 # get possible slot information from the deps file
5476 if mydo == "depend":
5477 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
5478 droppriv = "userpriv" in mysettings.features
5480 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5481 mysettings, fd_pipes=fd_pipes, returnpid=True,
5484 elif isinstance(dbkey, dict):
5485 mysettings["dbkey"] = ""
5488 0:sys.stdin.fileno(),
5489 1:sys.stdout.fileno(),
5490 2:sys.stderr.fileno(),
5492 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5494 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
5495 os.close(pw) # belongs exclusively to the child process now
5499 mybytes.append(os.read(pr, maxbytes))
5503 mybytes = "".join(mybytes)
5505 for k, v in izip(auxdbkeys, mybytes.splitlines()):
5507 retval = os.waitpid(mypids[0], 0)[1]
5508 portage.process.spawned_pids.remove(mypids[0])
5509 # If it got a signal, return the signal that was sent, but
5510 # shift in order to distinguish it from a return value. (just
5511 # like portage.process.spawn() would do).
5513 retval = (retval & 0xff) << 8
5515 # Otherwise, return its exit code.
5516 retval = retval >> 8
5517 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
5518 # Don't trust bash's returncode if the
5519 # number of lines is incorrect.
5523 mysettings["dbkey"] = dbkey
5525 mysettings["dbkey"] = \
5526 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
5528 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
5532 # Validate dependency metadata here to ensure that ebuilds with invalid
5533 # data are never installed via the ebuild command. Don't bother when
5534 # returnpid == True since there's no need to do this every time emerge
5537 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
5538 if rval != os.EX_OK:
5541 if "PORTAGE_TMPDIR" not in mysettings or \
5542 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
5543 writemsg("The directory specified in your " + \
5544 "PORTAGE_TMPDIR variable, '%s',\n" % \
5545 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
5546 writemsg("does not exist. Please create this directory or " + \
5547 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
5550 # as some people use a separate PORTAGE_TMPDIR mount
5551 # we prefer that as the checks below would otherwise be pointless
5553 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
5554 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
5556 checkdir = mysettings["PORTAGE_TMPDIR"]
5558 if not os.access(checkdir, os.W_OK):
5559 writemsg("%s is not writable.\n" % checkdir + \
5560 "Likely cause is that you've mounted it as readonly.\n" \
5564 from tempfile import NamedTemporaryFile
5565 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
5566 os.chmod(fd.name, 0755)
5567 if not os.access(fd.name, os.X_OK):
5568 writemsg("Can not execute files in %s\n" % checkdir + \
5569 "Likely cause is that you've mounted it with one of the\n" + \
5570 "following mount options: 'noexec', 'user', 'users'\n\n" + \
5571 "Please make sure that portage can execute files in this directory.\n" \
5578 if mydo == "unmerge":
5579 return unmerge(mysettings["CATEGORY"],
5580 mysettings["PF"], myroot, mysettings, vartree=vartree)
5582 # Build directory creation isn't required for any of these.
5583 have_build_dirs = False
5584 if not parallel_fetchonly and mydo not in ("digest", "help", "manifest"):
5585 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
5588 have_build_dirs = True
5590 # emerge handles logging externally
5592 # PORTAGE_LOG_FILE is set by the
5593 # above prepare_build_dirs() call.
5594 logfile = mysettings.get("PORTAGE_LOG_FILE")
5597 env_file = os.path.join(mysettings["T"], "environment")
5601 env_stat = os.stat(env_file)
5603 if e.errno != errno.ENOENT:
5607 saved_env = os.path.join(
5608 os.path.dirname(myebuild), "environment.bz2")
5609 if not os.path.isfile(saved_env):
5613 "bzip2 -dc %s > %s" % \
5614 (_shell_quote(saved_env),
5615 _shell_quote(env_file)))
5617 env_stat = os.stat(env_file)
5619 if e.errno != errno.ENOENT:
5622 if os.WIFEXITED(retval) and \
5623 os.WEXITSTATUS(retval) == os.EX_OK and \
5624 env_stat and env_stat.st_size > 0:
5625 # This is a signal to ebuild.sh, so that it knows to filter
5626 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
5627 # would be preserved between normal phases.
5628 open(env_file + ".raw", "w")
5630 writemsg(("!!! Error extracting saved " + \
5631 "environment: '%s'\n") % \
5632 saved_env, noiselevel=-1)
5636 if e.errno != errno.ENOENT:
5643 for var in ("ARCH", ):
5644 value = mysettings.get(var)
5645 if value and value.strip():
5647 msg = ("%s is not set... " % var) + \
5648 ("Are you missing the '%setc/make.profile' symlink? " % \
5649 mysettings["PORTAGE_CONFIGROOT"]) + \
5650 "Is the symlink correct? " + \
5651 "Is your portage tree complete?"
5652 from portage.elog.messages import eerror
5653 from textwrap import wrap
5654 for line in wrap(msg, 70):
5655 eerror(line, phase="setup", key=mysettings.mycpv)
5656 from portage.elog import elog_process
5657 elog_process(mysettings.mycpv, mysettings)
5659 del env_file, env_stat, saved_env
5660 _doebuild_exit_status_unlink(
5661 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5663 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
5665 # if any of these are being called, handle them -- running them out of
5666 # the sandbox -- and stop now.
5668 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
5669 mysettings, debug=debug, free=1, logfile=logfile)
5670 elif mydo == "setup":
5672 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
5673 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
5674 returnpid=returnpid)
5677 retval = exit_status_check(retval)
5679 """ Privileged phases may have left files that need to be made
5680 writable to a less privileged user."""
5681 apply_recursive_permissions(mysettings["T"],
5682 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
5683 filemode=060, filemask=0)
5685 elif mydo == "preinst":
5686 phase_retval = spawn(
5687 _shell_quote(ebuild_sh_binary) + " " + mydo,
5688 mysettings, debug=debug, free=1, logfile=logfile,
5689 fd_pipes=fd_pipes, returnpid=returnpid)
5694 phase_retval = exit_status_check(phase_retval)
5695 if phase_retval == os.EX_OK:
5696 _doebuild_exit_status_unlink(
5697 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5698 mysettings.pop("EBUILD_PHASE", None)
5699 phase_retval = spawn(
5700 " ".join(_post_pkg_preinst_cmd(mysettings)),
5701 mysettings, debug=debug, free=1, logfile=logfile)
5702 phase_retval = exit_status_check(phase_retval)
5703 if phase_retval != os.EX_OK:
5704 writemsg("!!! post preinst failed; exiting.\n",
5707 elif mydo == "postinst":
5708 phase_retval = spawn(
5709 _shell_quote(ebuild_sh_binary) + " " + mydo,
5710 mysettings, debug=debug, free=1, logfile=logfile,
5711 fd_pipes=fd_pipes, returnpid=returnpid)
5716 phase_retval = exit_status_check(phase_retval)
5717 if phase_retval == os.EX_OK:
5718 _doebuild_exit_status_unlink(
5719 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5720 mysettings.pop("EBUILD_PHASE", None)
5721 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
5722 mysettings, debug=debug, free=1, logfile=logfile)
5723 phase_retval = exit_status_check(phase_retval)
5724 if phase_retval != os.EX_OK:
5725 writemsg("!!! post postinst failed; exiting.\n",
5728 elif mydo in ("prerm", "postrm", "config", "info"):
5730 _shell_quote(ebuild_sh_binary) + " " + mydo,
5731 mysettings, debug=debug, free=1, logfile=logfile,
5732 fd_pipes=fd_pipes, returnpid=returnpid)
5737 retval = exit_status_check(retval)
5740 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
5742 emerge_skip_distfiles = returnpid
5743 # Only try and fetch the files if we are going to need them ...
5744 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
5745 # unpack compile install`, we will try and fetch 4 times :/
5746 need_distfiles = not emerge_skip_distfiles and \
5747 (mydo in ("fetch", "unpack") or \
5748 mydo not in ("digest", "manifest") and "noauto" not in features)
5749 alist = mysettings.configdict["pkg"].get("A")
5750 aalist = mysettings.configdict["pkg"].get("AA")
5751 if need_distfiles or alist is None or aalist is None:
5752 # Make sure we get the correct tree in case there are overlays.
5753 mytree = os.path.realpath(
5754 os.path.dirname(os.path.dirname(mysettings["O"])))
5755 useflags = mysettings["PORTAGE_USE"].split()
5757 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
5759 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
5760 except portage.exception.InvalidDependString, e:
5761 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5762 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
5766 mysettings.configdict["pkg"]["A"] = " ".join(alist)
5767 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
5769 alist = set(alist.split())
5770 aalist = set(aalist.split())
5771 if ("mirror" in features) or fetchall:
5779 # Files are already checked inside fetch(),
5780 # so do not check them again.
5784 if not emerge_skip_distfiles and \
5785 need_distfiles and not fetch(
5786 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
5789 if mydo == "fetch" and listonly:
5793 if mydo == "manifest":
5794 return not digestgen(aalist, mysettings, overwrite=1,
5795 manifestonly=1, myportdb=mydbapi)
5796 elif mydo == "digest":
5797 return not digestgen(aalist, mysettings, overwrite=1,
5799 elif "digest" in mysettings.features:
5800 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
5801 except portage.exception.PermissionDenied, e:
5802 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
5803 if mydo in ("digest", "manifest"):
5806 # See above comment about fetching only when needed
5807 if not emerge_skip_distfiles and \
5808 not digestcheck(checkme, mysettings, "strict" in features):
5814 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
5815 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
5816 orig_distdir = mysettings["DISTDIR"]
5817 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
5818 edpath = mysettings["DISTDIR"] = \
5819 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
5820 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0755)
5822 # Remove any unexpected files or directories.
5823 for x in os.listdir(edpath):
5824 symlink_path = os.path.join(edpath, x)
5825 st = os.lstat(symlink_path)
5826 if x in alist and stat.S_ISLNK(st.st_mode):
5828 if stat.S_ISDIR(st.st_mode):
5829 shutil.rmtree(symlink_path)
5831 os.unlink(symlink_path)
5833 # Check for existing symlinks and recreate if necessary.
5835 symlink_path = os.path.join(edpath, x)
5836 target = os.path.join(orig_distdir, x)
5838 link_target = os.readlink(symlink_path)
5840 os.symlink(target, symlink_path)
5842 if link_target != target:
5843 os.unlink(symlink_path)
5844 os.symlink(target, symlink_path)
5846 #initial dep checks complete; time to process main commands
5848 restrict = mysettings["PORTAGE_RESTRICT"].split()
5849 nosandbox = (("userpriv" in features) and \
5850 ("usersandbox" not in features) and \
5851 "userpriv" not in restrict and \
5852 "nouserpriv" not in restrict)
5853 if nosandbox and ("userpriv" not in features or \
5854 "userpriv" in restrict or \
5855 "nouserpriv" in restrict):
5856 nosandbox = ("sandbox" not in features and \
5857 "usersandbox" not in features)
5859 sesandbox = mysettings.selinux_enabled() and \
5860 "sesandbox" in mysettings.features
5862 droppriv = "userpriv" in mysettings.features and \
5863 "userpriv" not in restrict and \
5866 fakeroot = "fakeroot" in mysettings.features
5868 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
5869 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
5871 # args are for the to spawn function
5873 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
5874 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5875 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5876 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5877 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5878 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5879 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
5880 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5881 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5884 # merge the deps in so we have again a 'full' actionmap
5885 # be glad when this can die.
5887 if len(actionmap_deps.get(x, [])):
5888 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
5890 if mydo in actionmap:
5891 if mydo == "package":
5892 # Make sure the package directory exists before executing
5893 # this phase. This can raise PermissionDenied if
5894 # the current user doesn't have write access to $PKGDIR.
5895 parent_dir = os.path.join(mysettings["PKGDIR"],
5896 mysettings["CATEGORY"])
5897 portage.util.ensure_dirs(parent_dir)
5898 if not os.access(parent_dir, os.W_OK):
5899 raise portage.exception.PermissionDenied(
5900 "access('%s', os.W_OK)" % parent_dir)
5901 retval = spawnebuild(mydo,
5902 actionmap, mysettings, debug, logfile=logfile,
5903 fd_pipes=fd_pipes, returnpid=returnpid)
5904 elif mydo=="qmerge":
5905 # check to ensure install was run. this *only* pops up when users
5906 # forget it and are using ebuild
5907 if not os.path.exists(
5908 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
5909 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
5912 # qmerge is a special phase that implies noclean.
5913 if "noclean" not in mysettings.features:
5914 mysettings.features.append("noclean")
5915 #qmerge is specifically not supposed to do a runtime dep check
5917 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
5918 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
5919 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
5920 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
5922 retval = spawnebuild("install", actionmap, mysettings, debug,
5923 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
5924 returnpid=returnpid)
5925 retval = exit_status_check(retval)
5926 if retval != os.EX_OK:
5927 # The merge phase handles this already. Callers don't know how
5928 # far this function got, so we have to call elog_process() here
5929 # so that it's only called once.
5930 from portage.elog import elog_process
5931 elog_process(mysettings.mycpv, mysettings)
5932 if retval == os.EX_OK:
5933 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
5934 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
5935 "build-info"), myroot, mysettings,
5936 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
5937 vartree=vartree, prev_mtimes=prev_mtimes)
5939 print "!!! Unknown mydo:",mydo
5947 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
5948 shutil.rmtree(tmpdir)
5950 portage.locks.unlockdir(builddir_lock)
5952 # Make sure that DISTDIR is restored to it's normal value before we return!
5953 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
5954 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
5955 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
5959 if os.stat(logfile).st_size == 0:
5964 if mydo in ("digest", "manifest", "help"):
5965 # If necessary, depend phase has been triggered by aux_get calls
5966 # and the exemption is no longer needed.
5967 _doebuild_manifest_exempt_depend -= 1
5969 def _validate_deps(mysettings, myroot, mydo, mydbapi):
5971 invalid_dep_exempt_phases = \
5972 set(["clean", "cleanrm", "help", "prerm", "postrm"])
5973 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
5974 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
5975 other_keys = ["SLOT"]
5976 all_keys = dep_keys + misc_keys + other_keys
5977 metadata = dict(izip(all_keys,
5978 mydbapi.aux_get(mysettings.mycpv, all_keys)))
5980 class FakeTree(object):
5981 def __init__(self, mydb):
5983 dep_check_trees = {myroot:{}}
5984 dep_check_trees[myroot]["porttree"] = \
5985 FakeTree(fakedbapi(settings=mysettings))
5988 for dep_type in dep_keys:
5989 mycheck = dep_check(metadata[dep_type], None, mysettings,
5990 myuse="all", myroot=myroot, trees=dep_check_trees)
5992 msgs.append(" %s: %s\n %s\n" % (
5993 dep_type, metadata[dep_type], mycheck[1]))
5997 portage.dep.use_reduce(
5998 portage.dep.paren_reduce(metadata[k]), matchall=True)
5999 except portage.exception.InvalidDependString, e:
6000 msgs.append(" %s: %s\n %s\n" % (
6001 k, metadata[k], str(e)))
6003 if not metadata["SLOT"]:
6004 msgs.append(" SLOT is undefined\n")
6007 portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \
6008 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
6010 portage.util.writemsg_level(x,
6011 level=logging.ERROR, noiselevel=-1)
6012 if mydo not in invalid_dep_exempt_phases:
6019 def _movefile(src, dest, **kwargs):
6020 """Calls movefile and raises a PortageException if an error occurs."""
6021 if movefile(src, dest, **kwargs) is None:
6022 raise portage.exception.PortageException(
6023 "mv '%s' '%s'" % (src, dest))
6025 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
6026 hardlink_candidates=None):
6027 """moves a file from src to dest, preserving all permissions and attributes; mtime will
6028 be preserved even when moving across filesystems. Returns true on success and false on
6029 failure. Move is atomic."""
6030 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
6032 if mysettings is None:
6034 mysettings = settings
6035 selinux_enabled = mysettings.selinux_enabled()
6040 except SystemExit, e:
6042 except Exception, e:
6043 print "!!! Stating source file failed... movefile()"
6049 dstat=os.lstat(dest)
6050 except (OSError, IOError):
6051 dstat=os.lstat(os.path.dirname(dest))
6055 if destexists and dstat.st_flags != 0:
6056 bsd_chflags.lchflags(dest, 0)
6057 # Use normal stat/chflags for the parent since we want to
6058 # follow any symlinks to the real parent directory.
6059 pflags = os.stat(os.path.dirname(dest)).st_flags
6061 bsd_chflags.chflags(os.path.dirname(dest), 0)
6064 if stat.S_ISLNK(dstat[stat.ST_MODE]):
6068 except SystemExit, e:
6070 except Exception, e:
6073 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6075 target=os.readlink(src)
6076 if mysettings and mysettings["D"]:
6077 if target.find(mysettings["D"])==0:
6078 target=target[len(mysettings["D"]):]
6079 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
6082 sid = selinux.get_lsid(src)
6083 selinux.secure_symlink(target,dest,sid)
6085 os.symlink(target,dest)
6086 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6087 # utime() only works on the target of a symlink, so it's not
6088 # possible to perserve mtime on symlinks.
6089 return os.lstat(dest)[stat.ST_MTIME]
6090 except SystemExit, e:
6092 except Exception, e:
6093 print "!!! failed to properly create symlink:"
6094 print "!!!",dest,"->",target
6099 # Since identical files might be merged to multiple filesystems,
6100 # so os.link() calls might fail for some paths, so try them all.
6101 # For atomic replacement, first create the link as a temp file
6102 # and them use os.rename() to replace the destination.
6103 if hardlink_candidates:
6104 head, tail = os.path.split(dest)
6105 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
6106 (tail, os.getpid()))
6108 os.unlink(hardlink_tmp)
6110 if e.errno != errno.ENOENT:
6111 writemsg("!!! Failed to remove hardlink temp file: %s\n" % \
6112 (hardlink_tmp,), noiselevel=-1)
6113 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6116 for hardlink_src in hardlink_candidates:
6118 os.link(hardlink_src, hardlink_tmp)
6123 os.rename(hardlink_tmp, dest)
6125 writemsg("!!! Failed to rename %s to %s\n" % \
6126 (hardlink_tmp, dest), noiselevel=-1)
6127 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6134 renamefailed = False
6135 if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
6138 ret=selinux.secure_rename(src,dest)
6140 ret=os.rename(src,dest)
6142 except SystemExit, e:
6144 except Exception, e:
6145 if e[0]!=errno.EXDEV:
6146 # Some random error.
6147 print "!!! Failed to move",src,"to",dest
6150 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
6153 if stat.S_ISREG(sstat[stat.ST_MODE]):
6154 try: # For safety copy then move it over.
6156 selinux.secure_copy(src,dest+"#new")
6157 selinux.secure_rename(dest+"#new",dest)
6159 shutil.copyfile(src,dest+"#new")
6160 os.rename(dest+"#new",dest)
6162 except SystemExit, e:
6164 except Exception, e:
6165 print '!!! copy',src,'->',dest,'failed.'
6169 #we don't yet handle special, so we need to fall back to /bin/mv
6171 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
6173 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
6175 print "!!! Failed to move special file:"
6176 print "!!! '"+src+"' to '"+dest+"'"
6178 return None # failure
6181 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6182 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6184 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6185 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
6187 except SystemExit, e:
6189 except Exception, e:
6190 print "!!! Failed to chown/chmod/unlink in movefile()"
6197 newmtime = long(os.stat(dest).st_mtime)
6199 if newmtime is not None:
6200 os.utime(dest, (newmtime, newmtime))
6202 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
6203 newmtime = long(sstat.st_mtime)
6205 # The utime can fail here with EPERM even though the move succeeded.
6206 # Instead of failing, use stat to return the mtime if possible.
6208 newmtime = long(os.stat(dest).st_mtime)
6210 writemsg("!!! Failed to stat in movefile()\n", noiselevel=-1)
6211 writemsg("!!! %s\n" % dest, noiselevel=-1)
6212 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6216 # Restore the flags we saved before moving
6218 bsd_chflags.chflags(os.path.dirname(dest), pflags)
6222 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
6223 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
6225 if not os.access(myroot, os.W_OK):
6226 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
6229 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
6230 vartree=vartree, blockers=blockers, scheduler=scheduler)
6231 return mylink.merge(pkgloc, infloc, myroot, myebuild,
6232 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6234 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
6235 ldpath_mtimes=None, scheduler=None):
6236 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
6237 vartree=vartree, scheduler=scheduler)
6238 vartree = mylink.vartree
6242 vartree.dbapi.plib_registry.load()
6243 vartree.dbapi.plib_registry.pruneNonExisting()
6244 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
6245 ldpath_mtimes=ldpath_mtimes)
6246 if retval == os.EX_OK:
6251 vartree.dbapi.linkmap._clear_cache()
6254 def getCPFromCPV(mycpv):
6255 """Calls pkgsplit on a cpv and returns only the cp."""
6256 return pkgsplit(mycpv)[0]
6258 def dep_virtual(mysplit, mysettings):
6259 "Does virtual dependency conversion"
6261 myvirtuals = mysettings.getvirtuals()
6263 if isinstance(x, list):
6264 newsplit.append(dep_virtual(x, mysettings))
6267 mychoices = myvirtuals.get(mykey, None)
6269 if len(mychoices) == 1:
6270 a = x.replace(mykey, mychoices[0])
6273 # blocker needs "and" not "or(||)".
6278 a.append(x.replace(mykey, y))
6284 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
6285 trees=None, use_mask=None, use_force=None, **kwargs):
6286 """Recursively expand new-style virtuals so as to collapse one or more
6287 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
6288 zero cost regardless of whether or not they are currently installed. Virtual
6289 blockers are supported but only when the virtual expands to a single
6290 atom because it wouldn't necessarily make sense to block all the components
6291 of a compound virtual. When more than one new-style virtual is matched,
6292 the matches are sorted from highest to lowest versions and the atom is
6293 expanded to || ( highest match ... lowest match )."""
6295 # According to GLEP 37, RDEPEND is the only dependency type that is valid
6296 # for new-style virtuals. Repoman should enforce this.
6297 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
6298 portdb = trees[myroot]["porttree"].dbapi
6299 repoman = isinstance(mydbapi, portdbapi)
6300 if kwargs["use_binaries"]:
6301 portdb = trees[myroot]["bintree"].dbapi
6302 myvirtuals = mysettings.getvirtuals()
6303 myuse = kwargs["myuse"]
6308 elif isinstance(x, list):
6309 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
6310 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
6311 use_force=use_force, **kwargs))
6314 if not isinstance(x, portage.dep.Atom):
6316 x = portage.dep.Atom(x)
6317 except portage.exception.InvalidAtom:
6318 if portage.dep._dep_check_strict:
6319 raise portage.exception.ParseError(
6320 "invalid atom: '%s'" % x)
6322 if repoman and x.use and x.use.conditional:
6323 evaluated_atom = portage.dep.remove_slot(x)
6325 evaluated_atom += ":%s" % x.slot
6326 evaluated_atom += str(x.use._eval_qa_conditionals(
6327 use_mask, use_force))
6328 x = portage.dep.Atom(evaluated_atom)
6330 if not repoman and \
6331 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
6332 if x.use.conditional:
6333 evaluated_atom = portage.dep.remove_slot(x)
6335 evaluated_atom += ":%s" % x.slot
6336 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
6337 x = portage.dep.Atom(evaluated_atom)
6339 mykey = dep_getkey(x)
6340 if not mykey.startswith("virtual/"):
6343 mychoices = myvirtuals.get(mykey, [])
6344 isblocker = x.startswith("!")
6346 # Virtual blockers are no longer expanded here since
6347 # the un-expanded virtual atom is more useful for
6348 # maintaining a cache of blocker atoms.
6355 matches = portdb.match(match_atom)
6356 # Use descending order to prefer higher versions.
6359 # only use new-style matches
6360 if cpv.startswith("virtual/"):
6361 pkgs.append((cpv, catpkgsplit(cpv)[1:], portdb))
6362 if not (pkgs or mychoices):
6363 # This one couldn't be expanded as a new-style virtual. Old-style
6364 # virtuals have already been expanded by dep_virtual, so this one
6365 # is unavailable and dep_zapdeps will identify it as such. The
6366 # atom is not eliminated here since it may still represent a
6367 # dependency that needs to be satisfied.
6370 if not pkgs and len(mychoices) == 1:
6371 newsplit.append(portage.dep.Atom(x.replace(mykey, mychoices[0])))
6378 cpv, pv_split, db = y
6379 depstring = " ".join(db.aux_get(cpv, dep_keys))
6380 pkg_kwargs = kwargs.copy()
6381 if isinstance(db, portdbapi):
6386 use_split = db.aux_get(cpv, ["USE"])[0].split()
6387 pkg_kwargs["myuse"] = use_split
6389 print "Virtual Parent: ", y[0]
6390 print "Virtual Depstring:", depstring
6391 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
6392 trees=trees, **pkg_kwargs)
6394 raise portage.exception.ParseError(
6395 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
6397 virtual_atoms = [atom for atom in mycheck[1] \
6398 if not atom.startswith("!")]
6399 if len(virtual_atoms) == 1:
6400 # It wouldn't make sense to block all the components of a
6401 # compound virtual, so only a single atom block is allowed.
6402 a.append(portage.dep.Atom("!" + virtual_atoms[0]))
6404 # pull in the new-style virtual
6405 mycheck[1].append(portage.dep.Atom("="+y[0]))
6406 a.append(mycheck[1])
6407 # Plain old-style virtuals. New-style virtuals are preferred.
6409 a.append(portage.dep.Atom(x.replace(mykey, y, 1)))
6410 if isblocker and not a:
6411 # Probably a compound virtual. Pass the atom through unprocessed.
6417 def dep_eval(deplist):
6420 if deplist[0]=="||":
6421 #or list; we just need one "1"
6422 for x in deplist[1:]:
6423 if isinstance(x, list):
6428 #XXX: unless there's no available atoms in the list
6429 #in which case we need to assume that everything is
6430 #okay as some ebuilds are relying on an old bug.
6431 if len(deplist) == 1:
6436 if isinstance(x, list):
6443 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
6444 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
6445 Returned deplist contains steps that must be taken to satisfy dependencies."""
6449 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
6450 if not reduced or unreduced == ["||"] or dep_eval(reduced):
6453 if unreduced[0] != "||":
6455 for dep, satisfied in izip(unreduced, reduced):
6456 if isinstance(dep, list):
6457 unresolved += dep_zapdeps(dep, satisfied, myroot,
6458 use_binaries=use_binaries, trees=trees)
6460 unresolved.append(dep)
6463 # We're at a ( || atom ... ) type level and need to make a choice
6464 deps = unreduced[1:]
6465 satisfieds = reduced[1:]
6467 # Our preference order is for an the first item that:
6468 # a) contains all unmasked packages with the same key as installed packages
6469 # b) contains all unmasked packages
6470 # c) contains masked installed packages
6471 # d) is the first item
6474 preferred_not_installed = []
6475 preferred_any_slot = []
6476 possible_upgrades = []
6479 # Alias the trees we'll be checking availability against
6480 parent = trees[myroot].get("parent")
6481 graph_db = trees[myroot].get("graph_db")
6483 if "vartree" in trees[myroot]:
6484 vardb = trees[myroot]["vartree"].dbapi
6486 mydbapi = trees[myroot]["bintree"].dbapi
6488 mydbapi = trees[myroot]["porttree"].dbapi
6490 # Sort the deps into preferred (installed) and other
6491 # with values of [[required_atom], availablility]
6492 for dep, satisfied in izip(deps, satisfieds):
6493 if isinstance(dep, list):
6494 atoms = dep_zapdeps(dep, satisfied, myroot,
6495 use_binaries=use_binaries, trees=trees)
6501 other.append((atoms, None, False))
6504 all_available = True
6509 avail_pkg = mydbapi.match(atom)
6511 avail_pkg = avail_pkg[-1] # highest (ascending order)
6512 avail_slot = "%s:%s" % (dep_getkey(atom),
6513 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
6515 all_available = False
6518 versions[avail_slot] = avail_pkg
6520 this_choice = (atoms, versions, all_available)
6522 # The "all installed" criterion is not version or slot specific.
6523 # If any version of a package is installed then we assume that it
6524 # is preferred over other possible packages choices.
6525 all_installed = True
6526 for atom in set([dep_getkey(atom) for atom in atoms \
6527 if atom[:1] != "!"]):
6528 # New-style virtuals have zero cost to install.
6529 if not vardb.match(atom) and not atom.startswith("virtual/"):
6530 all_installed = False
6532 all_installed_slots = False
6534 all_installed_slots = True
6535 for slot_atom in versions:
6536 # New-style virtuals have zero cost to install.
6537 if not vardb.match(slot_atom) and \
6538 not slot_atom.startswith("virtual/"):
6539 all_installed_slots = False
6542 if all_installed_slots:
6543 preferred.append(this_choice)
6545 preferred_any_slot.append(this_choice)
6546 elif graph_db is None:
6547 possible_upgrades.append(this_choice)
6550 for slot_atom in versions:
6551 # New-style virtuals have zero cost to install.
6552 if not graph_db.match(slot_atom) and \
6553 not slot_atom.startswith("virtual/"):
6554 all_in_graph = False
6558 preferred_not_installed.append(this_choice)
6560 # Check if the atom would result in a direct circular
6561 # dependency and try to avoid that if it seems likely
6562 # to be unresolvable.
6563 cpv_slot_list = [parent]
6564 circular_atom = None
6568 if vardb.match(atom):
6569 # If the atom is satisfied by an installed
6570 # version then it's not a circular dep.
6572 if dep_getkey(atom) != parent.cp:
6574 if match_from_list(atom, cpv_slot_list):
6575 circular_atom = atom
6577 if circular_atom is None:
6578 preferred_not_installed.append(this_choice)
6580 other.append(this_choice)
6582 possible_upgrades.append(this_choice)
6584 other.append(this_choice)
6586 # Compare the "all_installed" choices against the "all_available" choices
6587 # for possible missed upgrades. The main purpose of this code is to find
6588 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
6589 # into || ( highest version ... lowest version ). We want to prefer the
6590 # highest all_available version of the new-style virtual when there is a
6591 # lower all_installed version.
6592 preferred.extend(preferred_not_installed)
6593 preferred.extend(preferred_any_slot)
6594 preferred.extend(possible_upgrades)
6595 possible_upgrades = preferred[1:]
6596 for possible_upgrade in possible_upgrades:
6597 atoms, versions, all_available = possible_upgrade
6598 myslots = set(versions)
6599 for other_choice in preferred:
6600 if possible_upgrade is other_choice:
6601 # possible_upgrade will not be promoted, so move on
6603 o_atoms, o_versions, o_all_available = other_choice
6604 intersecting_slots = myslots.intersection(o_versions)
6605 if not intersecting_slots:
6608 has_downgrade = False
6609 for myslot in intersecting_slots:
6610 myversion = versions[myslot]
6611 o_version = o_versions[myslot]
6612 difference = pkgcmp(catpkgsplit(myversion)[1:],
6613 catpkgsplit(o_version)[1:])
6618 has_downgrade = True
6620 if has_upgrade and not has_downgrade:
6621 preferred.remove(possible_upgrade)
6622 o_index = preferred.index(other_choice)
6623 preferred.insert(o_index, possible_upgrade)
6626 # preferred now contains a) and c) from the order above with
6627 # the masked flag differentiating the two. other contains b)
6628 # and d) so adding other to preferred will give us a suitable
6629 # list to iterate over.
6630 preferred.extend(other)
6632 for allow_masked in (False, True):
6633 for atoms, versions, all_available in preferred:
6634 if all_available or allow_masked:
6637 assert(False) # This point should not be reachable
6640 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
6646 mydep = dep_getcpv(orig_dep)
6647 myindex = orig_dep.index(mydep)
6648 prefix = orig_dep[:myindex]
6649 postfix = orig_dep[myindex+len(mydep):]
6650 expanded = cpv_expand(mydep, mydb=mydb,
6651 use_cache=use_cache, settings=settings)
6653 return portage.dep.Atom(prefix + expanded + postfix)
6654 except portage.exception.InvalidAtom:
6655 # Missing '=' prefix is allowed for backward compatibility.
6656 if not isvalidatom("=" + prefix + expanded + postfix):
6658 return portage.dep.Atom("=" + prefix + expanded + postfix)
6660 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
6661 use_cache=1, use_binaries=0, myroot="/", trees=None):
6662 """Takes a depend string and parses the condition."""
6663 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
6664 #check_config_instance(mysettings)
6666 trees = globals()["db"]
6670 myusesplit = mysettings["PORTAGE_USE"].split()
6673 # We've been given useflags to use.
6674 #print "USE FLAGS PASSED IN."
6676 #if "bindist" in myusesplit:
6677 # print "BINDIST is set!"
6679 # print "BINDIST NOT set."
6681 #we are being run by autouse(), don't consult USE vars yet.
6682 # WE ALSO CANNOT USE SETTINGS
6685 #convert parenthesis to sublists
6687 mysplit = portage.dep.paren_reduce(depstring)
6688 except portage.exception.InvalidDependString, e:
6693 useforce.add(mysettings["ARCH"])
6695 # This masking/forcing is only for repoman. In other cases, relevant
6696 # masking/forcing should have already been applied via
6697 # config.regenerate(). Also, binary or installed packages may have
6698 # been built with flags that are now masked, and it would be
6699 # inconsistent to mask them now. Additionally, myuse may consist of
6700 # flags from a parent package that is being merged to a $ROOT that is
6701 # different from the one that mysettings represents.
6702 mymasks.update(mysettings.usemask)
6703 mymasks.update(mysettings.archlist())
6704 mymasks.discard(mysettings["ARCH"])
6705 useforce.update(mysettings.useforce)
6706 useforce.difference_update(mymasks)
6708 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
6709 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
6710 except portage.exception.InvalidDependString, e:
6713 # Do the || conversions
6714 mysplit=portage.dep.dep_opconvert(mysplit)
6717 #dependencies were reduced to nothing
6720 # Recursively expand new-style virtuals so as to
6721 # collapse one or more levels of indirection.
6723 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
6724 use=use, mode=mode, myuse=myuse,
6725 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
6726 use_binaries=use_binaries, myroot=myroot, trees=trees)
6727 except portage.exception.ParseError, e:
6731 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
6732 if mysplit2 is None:
6733 return [0,"Invalid token"]
6735 writemsg("\n\n\n", 1)
6736 writemsg("mysplit: %s\n" % (mysplit), 1)
6737 writemsg("mysplit2: %s\n" % (mysplit2), 1)
6740 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
6741 use_binaries=use_binaries, trees=trees)
6742 except portage.exception.InvalidAtom, e:
6743 if portage.dep._dep_check_strict:
6744 raise # This shouldn't happen.
6745 # dbapi.match() failed due to an invalid atom in
6746 # the dependencies of an installed package.
6747 return [0, "Invalid atom: '%s'" % (e,)]
6749 mylist = flatten(myzaps)
6750 writemsg("myzaps: %s\n" % (myzaps), 1)
6751 writemsg("mylist: %s\n" % (mylist), 1)
6756 writemsg("mydict: %s\n" % (mydict), 1)
6757 return [1,mydict.keys()]
6759 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
6760 "Reduces the deplist to ones and zeros"
6761 deplist=mydeplist[:]
6762 for mypos, token in enumerate(deplist):
6763 if isinstance(deplist[mypos], list):
6765 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
6766 elif deplist[mypos]=="||":
6768 elif token[:1] == "!":
6769 deplist[mypos] = False
6771 mykey = dep_getkey(deplist[mypos])
6772 if mysettings and mykey in mysettings.pprovideddict and \
6773 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
6775 elif mydbapi is None:
6776 # Assume nothing is satisfied. This forces dep_zapdeps to
6777 # return all of deps the deps that have been selected
6778 # (excluding those satisfied by package.provided).
6779 deplist[mypos] = False
6782 x = mydbapi.xmatch(mode, deplist[mypos])
6783 if mode.startswith("minimum-"):
6790 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
6793 if deplist[mypos][0]=="!":
6797 #encountered invalid string
6801 def cpv_getkey(mycpv):
6802 myslash=mycpv.split("/")
6803 mysplit=pkgsplit(myslash[-1])
6806 return myslash[0]+"/"+mysplit[0]
6812 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
6813 mysplit=mykey.split("/")
6814 if settings is None:
6815 settings = globals()["settings"]
6816 virts = settings.getvirtuals("/")
6817 virts_p = settings.get_virts_p("/")
6819 if hasattr(mydb, "cp_list"):
6820 for x in mydb.categories:
6821 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
6823 if mykey in virts_p:
6824 return(virts_p[mykey][0])
6825 return "null/"+mykey
6827 if hasattr(mydb, "cp_list"):
6828 if not mydb.cp_list(mykey, use_cache=use_cache) and \
6829 virts and mykey in virts:
6830 return virts[mykey][0]
6833 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
6834 """Given a string (packagename or virtual) expand it into a valid
6835 cat/package string. Virtuals use the mydb to determine which provided
6836 virtual is a valid choice and defaults to the first element when there
6837 are no installed/available candidates."""
6838 myslash=mycpv.split("/")
6839 mysplit=pkgsplit(myslash[-1])
6840 if settings is None:
6841 settings = globals()["settings"]
6842 virts = settings.getvirtuals("/")
6843 virts_p = settings.get_virts_p("/")
6845 # this is illegal case.
6848 elif len(myslash)==2:
6850 mykey=myslash[0]+"/"+mysplit[0]
6853 if mydb and virts and mykey in virts:
6854 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
6855 if hasattr(mydb, "cp_list"):
6856 if not mydb.cp_list(mykey, use_cache=use_cache):
6857 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
6858 mykey_orig = mykey[:]
6859 for vkey in virts[mykey]:
6860 # The virtuals file can contain a versioned atom, so
6861 # it may be necessary to remove the operator and
6862 # version from the atom before it is passed into
6864 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
6866 writemsg("virts chosen: %s\n" % (mykey), 1)
6868 if mykey == mykey_orig:
6869 mykey=virts[mykey][0]
6870 writemsg("virts defaulted: %s\n" % (mykey), 1)
6871 #we only perform virtual expansion if we are passed a dbapi
6873 #specific cpv, no category, ie. "foo-1.0"
6881 if mydb and hasattr(mydb, "categories"):
6882 for x in mydb.categories:
6883 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
6884 matches.append(x+"/"+myp)
6885 if len(matches) > 1:
6886 virtual_name_collision = False
6887 if len(matches) == 2:
6889 if not x.startswith("virtual/"):
6890 # Assume that the non-virtual is desired. This helps
6891 # avoid the ValueError for invalid deps that come from
6892 # installed packages (during reverse blocker detection,
6896 virtual_name_collision = True
6897 if not virtual_name_collision:
6898 # AmbiguousPackageName inherits from ValueError,
6899 # for backward compatibility with calling code
6900 # that already handles ValueError.
6901 raise portage.exception.AmbiguousPackageName(matches)
6905 if not mykey and not isinstance(mydb, list):
6907 mykey=virts_p[myp][0]
6908 #again, we only perform virtual expansion if we have a dbapi (not a list)
6912 if mysplit[2]=="r0":
6913 return mykey+"-"+mysplit[1]
6915 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
6919 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
6920 from portage.util import grablines
6921 if settings is None:
6922 settings = globals()["settings"]
6924 portdb = globals()["portdb"]
6925 mysplit = catpkgsplit(mycpv)
6927 raise ValueError("invalid CPV: %s" % mycpv)
6928 if metadata is None:
6929 db_keys = list(portdb._aux_cache_keys)
6931 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6933 if not portdb.cpv_exists(mycpv):
6935 if metadata is None:
6936 # Can't access SLOT due to corruption.
6937 cpv_slot_list = [mycpv]
6939 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6940 mycp=mysplit[0]+"/"+mysplit[1]
6942 # XXX- This is a temporary duplicate of code from the config constructor.
6943 locations = [os.path.join(settings["PORTDIR"], "profiles")]
6944 locations.extend(settings.profiles)
6945 for ov in settings["PORTDIR_OVERLAY"].split():
6946 profdir = os.path.join(normalize_path(ov), "profiles")
6947 if os.path.isdir(profdir):
6948 locations.append(profdir)
6949 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
6950 USER_CONFIG_PATH.lstrip(os.path.sep)))
6952 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
6954 if mycp in settings.pmaskdict:
6955 for x in settings.pmaskdict[mycp]:
6956 if match_from_list(x, cpv_slot_list):
6960 for pmask in pmasklists:
6961 pmask_filename = os.path.join(pmask[0], "package.mask")
6962 for i in xrange(len(pmask[1])):
6963 l = pmask[1][i].strip()
6969 comment_valid = i + 1
6971 if comment_valid != i:
6974 return (comment, pmask_filename)
6977 elif comment_valid != -1:
6978 # Apparently this comment applies to muliple masks, so
6979 # it remains valid until a blank line is encountered.
6986 def getmaskingstatus(mycpv, settings=None, portdb=None):
6987 if settings is None:
6988 settings = config(clone=globals()["settings"])
6990 portdb = globals()["portdb"]
6994 if not isinstance(mycpv, basestring):
6995 # emerge passed in a Package instance
6998 metadata = pkg.metadata
6999 installed = pkg.installed
7001 mysplit = catpkgsplit(mycpv)
7003 raise ValueError("invalid CPV: %s" % mycpv)
7004 if metadata is None:
7005 db_keys = list(portdb._aux_cache_keys)
7007 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
7009 if not portdb.cpv_exists(mycpv):
7011 return ["corruption"]
7012 if "?" in metadata["LICENSE"]:
7013 settings.setcpv(mycpv, mydb=metadata)
7014 metadata["USE"] = settings["PORTAGE_USE"]
7016 metadata["USE"] = ""
7017 mycp=mysplit[0]+"/"+mysplit[1]
7022 if settings._getProfileMaskAtom(mycpv, metadata):
7023 rValue.append("profile")
7025 # package.mask checking
7026 if settings._getMaskAtom(mycpv, metadata):
7027 rValue.append("package.mask")
7030 eapi = metadata["EAPI"]
7031 mygroups = settings._getKeywords(mycpv, metadata)
7032 licenses = metadata["LICENSE"]
7033 slot = metadata["SLOT"]
7034 if eapi.startswith("-"):
7036 if not eapi_is_supported(eapi):
7037 return ["EAPI %s" % eapi]
7038 elif _eapi_is_deprecated(eapi) and not installed:
7039 return ["EAPI %s" % eapi]
7040 egroups = settings.configdict["backupenv"].get(
7041 "ACCEPT_KEYWORDS", "").split()
7042 pgroups = settings["ACCEPT_KEYWORDS"].split()
7043 myarch = settings["ARCH"]
7044 if pgroups and myarch not in pgroups:
7045 """For operating systems other than Linux, ARCH is not necessarily a
7047 myarch = pgroups[0].lstrip("~")
7049 cp = dep_getkey(mycpv)
7050 pkgdict = settings.pkeywordsdict.get(cp)
7053 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
7054 for atom, pkgkeywords in pkgdict.iteritems():
7055 if match_from_list(atom, cpv_slot_list):
7057 pgroups.extend(pkgkeywords)
7058 if matches or egroups:
7059 pgroups.extend(egroups)
7062 if x.startswith("-"):
7066 inc_pgroups.discard(x[1:])
7069 pgroups = inc_pgroups
7074 for keyword in pgroups:
7075 if keyword in mygroups:
7084 elif gp=="-"+myarch and myarch in pgroups:
7087 elif gp=="~"+myarch and myarch in pgroups:
7092 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
7093 if missing_licenses:
7094 allowed_tokens = set(["||", "(", ")"])
7095 allowed_tokens.update(missing_licenses)
7096 license_split = licenses.split()
7097 license_split = [x for x in license_split \
7098 if x in allowed_tokens]
7099 msg = license_split[:]
7100 msg.append("license(s)")
7101 rValue.append(" ".join(msg))
7102 except portage.exception.InvalidDependString, e:
7103 rValue.append("LICENSE: "+str(e))
7105 # Only show KEYWORDS masks for installed packages
7106 # if they're not masked for any other reason.
7107 if kmask and (not installed or not rValue):
7108 rValue.append(kmask+" keyword")
7114 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
7115 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
7116 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
7117 'PDEPEND', 'PROVIDE', 'EAPI',
7118 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
7119 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
7121 auxdbkeylen=len(auxdbkeys)
7123 from portage.dbapi import dbapi
7124 from portage.dbapi.virtual import fakedbapi
7125 from portage.dbapi.bintree import bindbapi, binarytree
7126 from portage.dbapi.vartree import vardbapi, vartree, dblink
7127 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
7129 class FetchlistDict(UserDict.DictMixin):
7130 """This provide a mapping interface to retrieve fetch lists. It's used
7131 to allow portage.manifest.Manifest to access fetch lists via a standard
7132 mapping interface rather than use the dbapi directly."""
7133 def __init__(self, pkgdir, settings, mydbapi):
7134 """pkgdir is a directory containing ebuilds and settings is passed into
7135 portdbapi.getfetchlist for __getitem__ calls."""
7136 self.pkgdir = pkgdir
7137 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7138 self.settings = settings
7139 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7140 self.portdb = mydbapi
7141 def __getitem__(self, pkg_key):
7142 """Returns the complete fetch list for a given package."""
7143 return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
7144 def __contains__(self, cpv):
7145 return cpv in self.keys()
7146 def has_key(self, pkg_key):
7147 """Returns true if the given package exists within pkgdir."""
7148 return pkg_key in self
7150 """Returns keys for all packages within pkgdir"""
7151 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7153 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
7154 vartree=None, prev_mtimes=None, blockers=None):
7155 """will merge a .tbz2 file, returning a list of runtime dependencies
7156 that must be satisfied, or None if there was a merge error. This
7157 code assumes the package exists."""
7160 mydbapi = db[myroot]["bintree"].dbapi
7162 vartree = db[myroot]["vartree"]
7163 if mytbz2[-5:]!=".tbz2":
7164 print "!!! Not a .tbz2 file"
7170 did_merge_phase = False
7173 """ Don't lock the tbz2 file because the filesytem could be readonly or
7174 shared by a cluster."""
7175 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
7177 mypkg = os.path.basename(mytbz2)[:-5]
7178 xptbz2 = portage.xpak.tbz2(mytbz2)
7179 mycat = xptbz2.getfile("CATEGORY")
7181 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7184 mycat = mycat.strip()
7186 # These are the same directories that would be used at build time.
7187 builddir = os.path.join(
7188 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7189 catdir = os.path.dirname(builddir)
7190 pkgloc = os.path.join(builddir, "image")
7191 infloc = os.path.join(builddir, "build-info")
7192 myebuild = os.path.join(
7193 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7194 portage.util.ensure_dirs(os.path.dirname(catdir),
7195 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7196 catdir_lock = portage.locks.lockdir(catdir)
7197 portage.util.ensure_dirs(catdir,
7198 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7200 shutil.rmtree(builddir)
7201 except (IOError, OSError), e:
7202 if e.errno != errno.ENOENT:
7205 for mydir in (builddir, pkgloc, infloc):
7206 portage.util.ensure_dirs(mydir, uid=portage_uid,
7207 gid=portage_gid, mode=0755)
7208 writemsg_stdout(">>> Extracting info\n")
7209 xptbz2.unpackinfo(infloc)
7210 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
7211 # Store the md5sum in the vdb.
7212 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7213 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
7216 # This gives bashrc users an opportunity to do various things
7217 # such as remove binary packages after they're installed.
7218 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
7219 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
7220 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7222 # Eventually we'd like to pass in the saved ebuild env here.
7223 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7224 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7225 if retval != os.EX_OK:
7226 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7229 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7230 retval = portage.process.spawn_bash(
7231 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7232 env=mysettings.environ())
7233 if retval != os.EX_OK:
7234 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7236 #portage.locks.unlockfile(tbz2_lock)
7239 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7240 treetype="bintree", blockers=blockers)
7241 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7242 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7243 did_merge_phase = True
7244 success = retval == os.EX_OK
7247 mysettings.pop("PORTAGE_BINPKG_FILE", None)
7249 portage.locks.unlockfile(tbz2_lock)
7251 if not did_merge_phase:
7252 # The merge phase handles this already. Callers don't know how
7253 # far this function got, so we have to call elog_process() here
7254 # so that it's only called once.
7255 from portage.elog import elog_process
7256 elog_process(mycat + "/" + mypkg, mysettings)
7259 shutil.rmtree(builddir)
7260 except (IOError, OSError), e:
7261 if e.errno != errno.ENOENT:
7265 def deprecated_profile_check(settings=None):
7267 if settings is not None:
7268 config_root = settings["PORTAGE_CONFIGROOT"]
7269 deprecated_profile_file = os.path.join(config_root,
7270 DEPRECATED_PROFILE_FILE.lstrip(os.sep))
7271 if not os.access(deprecated_profile_file, os.R_OK):
7273 deprecatedfile = open(deprecated_profile_file, "r")
7274 dcontent = deprecatedfile.readlines()
7275 deprecatedfile.close()
7276 writemsg(colorize("BAD", "\n!!! Your current profile is " + \
7277 "deprecated and not supported anymore.") + "\n", noiselevel=-1)
7279 writemsg(colorize("BAD","!!! Please refer to the " + \
7280 "Gentoo Upgrading Guide.") + "\n", noiselevel=-1)
7282 newprofile = dcontent[0]
7283 writemsg(colorize("BAD", "!!! Please upgrade to the " + \
7284 "following profile if possible:") + "\n", noiselevel=-1)
7285 writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
7286 if len(dcontent) > 1:
7287 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7288 for myline in dcontent[1:]:
7289 writemsg(myline, noiselevel=-1)
7290 writemsg("\n\n", noiselevel=-1)
7293 # gets virtual package settings
7294 def getvirtuals(myroot):
7296 writemsg("--- DEPRECATED call to getvirtual\n")
7297 return settings.getvirtuals(myroot)
7299 def commit_mtimedb(mydict=None, filename=None):
7302 if "mtimedb" not in globals() or mtimedb is None:
7306 if filename is None:
7308 filename = mtimedbfile
7309 mydict["version"] = VERSION
7310 d = {} # for full backward compat, pickle it as a plain dict object.
7313 f = atomic_ofstream(filename)
7314 pickle.dump(d, f, -1)
7316 portage.util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7317 except (IOError, OSError), e:
7321 global uid,portage_gid,portdb,db
7322 if secpass and os.environ.get("SANDBOX_ON") != "1":
7323 close_portdbapi_caches()
7326 atexit_register(portageexit)
7328 def _global_updates(trees, prev_mtimes):
7330 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7332 @param trees: A dictionary containing portage trees.
7334 @param prev_mtimes: A dictionary containing mtimes of files located in
7335 $PORTDIR/profiles/updates/.
7336 @type prev_mtimes: dict
7337 @rtype: None or List
7338 @return: None if no were no updates, otherwise a list of update commands
7339 that have been performed.
7341 # only do this if we're root and not running repoman/ebuild digest
7343 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7346 mysettings = trees["/"]["vartree"].settings
7347 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7350 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7351 update_data = grab_updates(updpath)
7353 update_data = grab_updates(updpath, prev_mtimes)
7354 except portage.exception.DirectoryNotFound:
7355 writemsg("--- 'profiles/updates' is empty or " + \
7356 "not available. Empty portage tree?\n", noiselevel=1)
7359 if len(update_data) > 0:
7360 do_upgrade_packagesmessage = 0
7363 for mykey, mystat, mycontent in update_data:
7364 writemsg_stdout("\n\n")
7365 writemsg_stdout(colorize("GOOD",
7366 "Performing Global Updates: ")+bold(mykey)+"\n")
7367 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7368 writemsg_stdout(" " + bold(".") + "='update pass' " + \
7369 bold("*") + "='binary update' " + bold("#") + \
7370 "='/var/db update' " + bold("@") + "='/var/db move'\n" + \
7371 " " + bold("s") + "='/var/db SLOT move' " + \
7372 bold("%") + "='binary move' " + bold("S") + \
7373 "='binary SLOT move'\n " + \
7374 bold("p") + "='update /etc/portage/package.*'\n")
7375 valid_updates, errors = parse_updates(mycontent)
7376 myupd.extend(valid_updates)
7377 writemsg_stdout(len(valid_updates) * "." + "\n")
7378 if len(errors) == 0:
7379 # Update our internal mtime since we
7380 # processed all of our directives.
7381 timestamps[mykey] = long(mystat.st_mtime)
7384 writemsg("%s\n" % msg, noiselevel=-1)
7386 world_file = os.path.join(root, WORLD_FILE)
7387 world_list = grabfile(world_file)
7388 world_modified = False
7389 for update_cmd in myupd:
7390 for pos, atom in enumerate(world_list):
7391 new_atom = update_dbentry(update_cmd, atom)
7392 if atom != new_atom:
7393 world_list[pos] = new_atom
7394 world_modified = True
7397 write_atomic(world_file,
7398 "".join("%s\n" % (x,) for x in world_list))
7400 update_config_files("/",
7401 mysettings.get("CONFIG_PROTECT","").split(),
7402 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7405 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7406 settings=mysettings)
7407 vardb = trees["/"]["vartree"].dbapi
7408 bindb = trees["/"]["bintree"].dbapi
7409 if not os.access(bindb.bintree.pkgdir, os.W_OK):
7411 for update_cmd in myupd:
7412 if update_cmd[0] == "move":
7413 moves = vardb.move_ent(update_cmd)
7415 writemsg_stdout(moves * "@")
7417 moves = bindb.move_ent(update_cmd)
7419 writemsg_stdout(moves * "%")
7420 elif update_cmd[0] == "slotmove":
7421 moves = vardb.move_slot_ent(update_cmd)
7423 writemsg_stdout(moves * "s")
7425 moves = bindb.move_slot_ent(update_cmd)
7427 writemsg_stdout(moves * "S")
7429 # The above global updates proceed quickly, so they
7430 # are considered a single mtimedb transaction.
7431 if len(timestamps) > 0:
7432 # We do not update the mtime in the mtimedb
7433 # until after _all_ of the above updates have
7434 # been processed because the mtimedb will
7435 # automatically commit when killed by ctrl C.
7436 for mykey, mtime in timestamps.iteritems():
7437 prev_mtimes[mykey] = mtime
7439 # We gotta do the brute force updates for these now.
7440 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7441 "fixpackages" in mysettings.features:
7442 def onUpdate(maxval, curval):
7444 writemsg_stdout("#")
7445 vardb.update_ents(myupd, onUpdate=onUpdate)
7447 def onUpdate(maxval, curval):
7449 writemsg_stdout("*")
7450 bindb.update_ents(myupd, onUpdate=onUpdate)
7452 do_upgrade_packagesmessage = 1
7454 # Update progress above is indicated by characters written to stdout so
7455 # we print a couple new lines here to separate the progress output from
7460 if do_upgrade_packagesmessage and bindb and \
7462 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7463 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7464 writemsg_stdout("\n")
7468 #continue setting up other trees
7470 class MtimeDB(dict):
7471 def __init__(self, filename):
7473 self.filename = filename
7474 self._load(filename)
7476 def _load(self, filename):
7479 mypickle = pickle.Unpickler(f)
7480 mypickle.find_global = None
7484 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
7485 if isinstance(e, pickle.UnpicklingError):
7486 writemsg("!!! Error loading '%s': %s\n" % \
7487 (filename, str(e)), noiselevel=-1)
7492 d["updates"] = d["old"]
7497 d.setdefault("starttime", 0)
7498 d.setdefault("version", "")
7499 for k in ("info", "ldpath", "updates"):
7502 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7503 "starttime", "updates", "version"))
7506 if k not in mtimedbkeys:
7507 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7510 self._clean_data = copy.deepcopy(d)
7513 if not self.filename:
7517 # Only commit if the internal state has changed.
7518 if d != self._clean_data:
7519 commit_mtimedb(mydict=d, filename=self.filename)
7520 self._clean_data = copy.deepcopy(d)
7522 def create_trees(config_root=None, target_root=None, trees=None):
7526 # clean up any existing portdbapi instances
7527 for myroot in trees:
7528 portdb = trees[myroot]["porttree"].dbapi
7529 portdb.close_caches()
7530 portdbapi.portdbapi_instances.remove(portdb)
7531 del trees[myroot]["porttree"], myroot, portdb
7533 settings = config(config_root=config_root, target_root=target_root,
7534 config_incrementals=portage.const.INCREMENTALS)
7537 myroots = [(settings["ROOT"], settings)]
7538 if settings["ROOT"] != "/":
7539 settings = config(config_root=None, target_root="/",
7540 config_incrementals=portage.const.INCREMENTALS)
7541 # When ROOT != "/" we only want overrides from the calling
7542 # environment to apply to the config that's associated
7543 # with ROOT != "/", so we wipe out the "backupenv" for the
7544 # config that is associated with ROOT == "/" and regenerate
7545 # it's incrementals.
7546 # Preserve backupenv values that are initialized in the config
7547 # constructor. Also, preserve XARGS since it is set by the
7548 # portage.data module.
7550 backupenv_whitelist = settings._environ_whitelist
7551 backupenv = settings.configdict["backupenv"]
7552 env_d = settings.configdict["env.d"]
7553 for k, v in os.environ.iteritems():
7554 if k in backupenv_whitelist:
7557 v == backupenv.get(k):
7558 backupenv.pop(k, None)
7559 settings.regenerate()
7561 myroots.append((settings["ROOT"], settings))
7563 for myroot, mysettings in myroots:
7564 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, None))
7565 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
7566 trees[myroot].addLazySingleton(
7567 "vartree", vartree, myroot, categories=mysettings.categories,
7568 settings=mysettings)
7569 trees[myroot].addLazySingleton("porttree",
7570 portagetree, myroot, settings=mysettings)
7571 trees[myroot].addLazySingleton("bintree",
7572 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
7575 class _LegacyGlobalProxy(portage.util.ObjectProxy):
7577 Instances of these serve as proxies to global variables
7578 that are initialized on demand.
7580 def __init__(self, name):
7581 portage.util.ObjectProxy.__init__(self)
7582 object.__setattr__(self, '_name', name)
7584 def _get_target(self):
7585 init_legacy_globals()
7586 name = object.__getattribute__(self, '_name')
7587 return globals()[name]
7589 class _PortdbProxy(portage.util.ObjectProxy):
7591 The portdb is initialized separately from the rest
7592 of the variables, since sometimes the other variables
7593 are needed while the portdb is not.
7596 def _get_target(self):
7597 init_legacy_globals()
7598 global db, portdb, root, _portdb_initialized
7599 if not _portdb_initialized:
7600 portdb = db[root]["porttree"].dbapi
7601 _portdb_initialized = True
7604 class _MtimedbProxy(portage.util.ObjectProxy):
7606 The mtimedb is independent from the portdb and other globals.
7609 def __init__(self, name):
7610 portage.util.ObjectProxy.__init__(self)
7611 object.__setattr__(self, '_name', name)
7613 def _get_target(self):
7614 global mtimedb, mtimedbfile, _mtimedb_initialized
7615 if not _mtimedb_initialized:
7616 mtimedbfile = os.path.join("/",
7617 CACHE_PATH.lstrip(os.path.sep), "mtimedb")
7618 mtimedb = MtimeDB(mtimedbfile)
7619 _mtimedb_initialized = True
7620 name = object.__getattribute__(self, '_name')
7621 return globals()[name]
7623 _legacy_global_var_names = ("archlist", "db", "features",
7624 "groups", "mtimedb", "mtimedbfile", "pkglines",
7625 "portdb", "profiledir", "root", "selinux_enabled",
7626 "settings", "thirdpartymirrors", "usedefaults")
7628 def _disable_legacy_globals():
7630 This deletes the ObjectProxy instances that are used
7631 for lazy initialization of legacy global variables.
7632 The purpose of deleting them is to prevent new code
7633 from referencing these deprecated variables.
7635 global _legacy_global_var_names
7636 for k in _legacy_global_var_names:
7637 globals().pop(k, None)
7639 # Initialization of legacy globals. No functions/classes below this point
7640 # please! When the above functions and classes become independent of the
7641 # below global variables, it will be possible to make the below code
7642 # conditional on a backward compatibility flag (backward compatibility could
7643 # be disabled via an environment variable, for example). This will enable new
7644 # code that is aware of this flag to import portage without the unnecessary
7645 # overhead (and other issues!) of initializing the legacy globals.
7647 def init_legacy_globals():
7648 global _globals_initialized
7649 if _globals_initialized:
7651 _globals_initialized = True
7653 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
7654 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
7655 profiledir, flushmtimedb
7657 # Portage needs to ensure a sane umask for the files it creates.
7661 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
7662 kwargs[k] = os.environ.get(envvar, "/")
7664 global _initializing_globals
7665 _initializing_globals = True
7666 db = create_trees(**kwargs)
7667 del _initializing_globals
7669 settings = db["/"]["vartree"].settings
7673 settings = db[myroot]["vartree"].settings
7676 root = settings["ROOT"]
7679 # ========================================================================
7681 # These attributes should not be used
7682 # within Portage under any circumstances.
7683 # ========================================================================
7684 archlist = settings.archlist()
7685 features = settings.features
7686 groups = settings["ACCEPT_KEYWORDS"].split()
7687 pkglines = settings.packages
7688 selinux_enabled = settings.selinux_enabled()
7689 thirdpartymirrors = settings.thirdpartymirrors()
7690 usedefaults = settings.use_defs
7692 if os.path.isdir(PROFILE_PATH):
7693 profiledir = PROFILE_PATH
7694 def flushmtimedb(record):
7695 writemsg("portage.flushmtimedb() is DEPRECATED\n")
7696 # ========================================================================
7698 # These attributes should not be used
7699 # within Portage under any circumstances.
7700 # ========================================================================
7704 _mtimedb_initialized = False
7705 mtimedb = _MtimedbProxy("mtimedb")
7706 mtimedbfile = _MtimedbProxy("mtimedbfile")
7708 _portdb_initialized = False
7709 portdb = _PortdbProxy()
7711 _globals_initialized = False
7713 for k in ("db", "settings", "root", "selinux_enabled",
7714 "archlist", "features", "groups",
7715 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
7717 globals()[k] = _LegacyGlobalProxy(k)
7722 # ============================================================================
7723 # ============================================================================