1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
23 import cPickle as pickle
29 from time import sleep
30 from random import shuffle
32 from itertools import chain, izip
35 except ImportError, e:
36 sys.stderr.write("\n\n")
37 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
38 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
39 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
41 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
42 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
43 sys.stderr.write(" "+str(e)+"\n\n");
47 if platform.system() in ["FreeBSD"]:
50 def _chflags(path, flags, opts=""):
51 cmd = "chflags %s %o '%s'" % (opts, flags, path)
52 status, output = commands.getstatusoutput(cmd)
53 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
55 # Try to generate an ENOENT error if appropriate.
60 # Make sure the binary exists.
61 if not portage.process.find_binary("chflags"):
62 raise portage.exception.CommandNotFound("chflags")
63 # Now we're not sure exactly why it failed or what
64 # the real errno was, so just report EPERM.
65 e = OSError(errno.EPERM, output)
70 def _lchflags(path, flags):
71 return _chflags(path, flags, opts="-h")
72 bsd_chflags.chflags = _chflags
73 bsd_chflags.lchflags = _lchflags
76 from portage.cache.cache_errors import CacheError
77 import portage.cvstree
79 import portage.getbinpkg
81 from portage.dep import dep_getcpv, dep_getkey, get_operator, \
82 isjustname, isspecific, isvalidatom, \
83 match_from_list, match_to_list, best_match_to_list
85 # XXX: This needs to get cleaned up.
87 from portage.output import bold, colorize, green, red, yellow
90 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
91 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
92 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
93 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
94 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
95 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
96 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
97 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
99 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
100 portage_uid, portage_gid, userpriv_groups
101 from portage.manifest import Manifest
104 from portage.util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
105 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
106 map_dictlist_vals, new_protect_filename, normalize_path, \
107 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
108 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
109 import portage.exception
111 import portage.process
112 from portage.process import atexit_register, run_exitfuncs
113 from portage.locks import unlockfile,unlockdir,lockfile,lockdir
114 import portage.checksum
115 from portage.checksum import perform_md5,perform_checksum,prelink_capable
116 import portage.eclass_cache
117 from portage.localization import _
118 from portage.update import dep_transform, fixdbentries, grab_updates, \
119 parse_updates, update_config_files, update_dbentries, update_dbentry
121 # Need these functions directly in portage namespace to not break every external tool in existence
122 from portage.versions import best, catpkgsplit, catsplit, pkgcmp, \
123 pkgsplit, vercmp, ververify
125 # endversion and endversion_keys are for backward compatibility only.
126 from portage.versions import endversion_keys
127 from portage.versions import suffix_value as endversion
129 except ImportError, e:
130 sys.stderr.write("\n\n")
131 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
132 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
133 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
134 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
135 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
136 sys.stderr.write("!!! a recovery of portage.\n")
137 sys.stderr.write(" "+str(e)+"\n\n")
142 import portage._selinux as selinux
144 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
149 # ===========================================================================
150 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
151 # ===========================================================================
155 modname = ".".join(name.split(".")[:-1])
156 mod = __import__(modname)
157 components = name.split('.')
158 for comp in components[1:]:
159 mod = getattr(mod, comp)
162 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
164 if x in top_dict and key in top_dict[x]:
166 return copy.deepcopy(top_dict[x][key])
168 return top_dict[x][key]
172 raise KeyError("Key not found in list; '%s'" % key)
175 "this fixes situations where the current directory doesn't exist"
178 except OSError: #dir doesn't exist
183 def abssymlink(symlink):
184 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
185 mylink=os.readlink(symlink)
187 mydir=os.path.dirname(symlink)
188 mylink=mydir+"/"+mylink
189 return os.path.normpath(mylink)
195 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
196 global cacheHit,cacheMiss,cacheStale
197 mypath = normalize_path(my_original_path)
198 if mypath in dircache:
200 cached_mtime, list, ftype = dircache[mypath]
203 cached_mtime, list, ftype = -1, [], []
205 pathstat = os.stat(mypath)
206 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
207 mtime = pathstat.st_mtime
209 raise portage.exception.DirectoryNotFound(mypath)
210 except EnvironmentError, e:
211 if e.errno == portage.exception.PermissionDenied.errno:
212 raise portage.exception.PermissionDenied(mypath)
217 except portage.exception.PortageException:
221 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
222 if mtime != cached_mtime or time.time() - mtime < 4:
223 if mypath in dircache:
226 list = os.listdir(mypath)
227 except EnvironmentError, e:
228 if e.errno != errno.EACCES:
231 raise portage.exception.PermissionDenied(mypath)
236 pathstat = os.stat(mypath+"/"+x)
238 pathstat = os.lstat(mypath+"/"+x)
240 if stat.S_ISREG(pathstat[stat.ST_MODE]):
242 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
244 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
248 except (IOError, OSError):
250 dircache[mypath] = mtime, list, ftype
254 for x in range(0, len(list)):
255 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
256 ret_list.append(list[x])
257 ret_ftype.append(ftype[x])
258 elif (list[x] not in ignorelist):
259 ret_list.append(list[x])
260 ret_ftype.append(ftype[x])
262 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
263 return ret_list, ret_ftype
265 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
266 EmptyOnError=False, dirsonly=False):
268 Portage-specific implementation of os.listdir
270 @param mypath: Path whose contents you wish to list
272 @param recursive: Recursively scan directories contained within mypath
273 @type recursive: Boolean
274 @param filesonly; Only return files, not more directories
275 @type filesonly: Boolean
276 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
277 @type ignorecvs: Boolean
278 @param ignorelist: List of filenames/directories to exclude
279 @type ignorelist: List
280 @param followSymlinks: Follow Symlink'd files and directories
281 @type followSymlinks: Boolean
282 @param EmptyOnError: Return [] if an error occurs.
283 @type EmptyOnError: Boolean
284 @param dirsonly: Only return directories.
285 @type dirsonly: Boolean
287 @returns: A list of files and directories (or just files or just directories) or an empty list.
290 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
297 if not (filesonly or dirsonly or recursive):
303 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
304 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
308 for y in range(0,len(l)):
309 l[y]=list[x]+"/"+l[y]
315 for x in range(0,len(ftype)):
317 rlist=rlist+[list[x]]
320 for x in range(0, len(ftype)):
322 rlist = rlist + [list[x]]
328 def flatten(mytokens):
329 """this function now turns a [1,[2,3]] list into
330 a [1,2,3] list and returns it."""
333 if isinstance(x, list):
334 newlist.extend(flatten(x))
339 #beautiful directed graph object
341 class digraph(object):
343 """Create an empty digraph"""
345 # { node : ( { child : priority } , { parent : priority } ) }
349 def add(self, node, parent, priority=0):
350 """Adds the specified node with the specified parent.
352 If the dep is a soft-dep and the node already has a hard
353 relationship to the parent, the relationship is left as hard."""
355 if node not in self.nodes:
356 self.nodes[node] = ({}, {}, node)
357 self.order.append(node)
362 if parent not in self.nodes:
363 self.nodes[parent] = ({}, {}, parent)
364 self.order.append(parent)
366 if parent in self.nodes[node][1]:
367 if priority > self.nodes[node][1][parent]:
368 self.nodes[node][1][parent] = priority
370 self.nodes[node][1][parent] = priority
372 if node in self.nodes[parent][0]:
373 if priority > self.nodes[parent][0][node]:
374 self.nodes[parent][0][node] = priority
376 self.nodes[parent][0][node] = priority
378 def remove(self, node):
379 """Removes the specified node from the digraph, also removing
380 and ties to other nodes in the digraph. Raises KeyError if the
381 node doesn't exist."""
383 if node not in self.nodes:
386 for parent in self.nodes[node][1]:
387 del self.nodes[parent][0][node]
388 for child in self.nodes[node][0]:
389 del self.nodes[child][1][node]
392 self.order.remove(node)
394 def difference_update(self, t):
396 Remove all given nodes from node_set. This is more efficient
397 than multiple calls to the remove() method.
399 if isinstance(t, (list, tuple)) or \
400 not hasattr(t, "__contains__"):
403 for node in self.order:
407 for parent in self.nodes[node][1]:
408 del self.nodes[parent][0][node]
409 for child in self.nodes[node][0]:
410 del self.nodes[child][1][node]
414 def remove_edge(self, child, parent):
416 Remove edge in the direction from child to parent. Note that it is
417 possible for a remaining edge to exist in the opposite direction.
418 Any endpoint vertices that become isolated will remain in the graph.
421 # Nothing should be modified when a KeyError is raised.
422 for k in parent, child:
423 if k not in self.nodes:
426 # Make sure the edge exists.
427 if child not in self.nodes[parent][0]:
428 raise KeyError(child)
429 if parent not in self.nodes[child][1]:
430 raise KeyError(parent)
433 del self.nodes[child][1][parent]
434 del self.nodes[parent][0][child]
437 return iter(self.order)
439 def contains(self, node):
440 """Checks if the digraph contains mynode"""
441 return node in self.nodes
443 def get(self, key, default=None):
444 node_data = self.nodes.get(key, self)
445 if node_data is self:
450 """Return a list of all nodes in the graph"""
453 def child_nodes(self, node, ignore_priority=None):
454 """Return all children of the specified node"""
455 if ignore_priority is None:
456 return self.nodes[node][0].keys()
458 for child, priority in self.nodes[node][0].iteritems():
459 if priority > ignore_priority:
460 children.append(child)
463 def parent_nodes(self, node):
464 """Return all parents of the specified node"""
465 return self.nodes[node][1].keys()
467 def leaf_nodes(self, ignore_priority=None):
468 """Return all nodes that have no children
470 If ignore_soft_deps is True, soft deps are not counted as
471 children in calculations."""
474 for node in self.order:
476 for child in self.nodes[node][0]:
477 if self.nodes[node][0][child] > ignore_priority:
481 leaf_nodes.append(node)
484 def root_nodes(self, ignore_priority=None):
485 """Return all nodes that have no parents.
487 If ignore_soft_deps is True, soft deps are not counted as
488 parents in calculations."""
491 for node in self.order:
493 for parent in self.nodes[node][1]:
494 if self.nodes[node][1][parent] > ignore_priority:
498 root_nodes.append(node)
502 """Checks if the digraph is empty"""
503 return len(self.nodes) == 0
508 for k, v in self.nodes.iteritems():
509 clone.nodes[k] = (v[0].copy(), v[1].copy(), v[2])
510 clone.order = self.order[:]
513 # Backward compatibility
516 allzeros = leaf_nodes
518 __contains__ = contains
522 def delnode(self, node):
529 leaf_nodes = self.leaf_nodes()
534 def hasallzeros(self, ignore_priority=None):
535 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
538 def debug_print(self):
540 writemsg(s, noiselevel=-1)
541 for node in self.nodes:
542 output("%s " % (node,))
543 if self.nodes[node][0]:
544 output("depends on\n")
546 output("(no children)\n")
547 for child in self.nodes[node][0]:
548 output(" %s (%s)\n" % \
549 (child, self.nodes[node][0][child],))
551 #parse /etc/env.d and generate /etc/profile.env
553 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
554 env=None, writemsg_level=portage.util.writemsg_level):
555 if target_root is None:
557 target_root = settings["ROOT"]
558 if prev_mtimes is None:
560 prev_mtimes = mtimedb["ldpath"]
563 envd_dir = os.path.join(target_root, "etc", "env.d")
564 portage.util.ensure_dirs(envd_dir, mode=0755)
565 fns = listdir(envd_dir, EmptyOnError=1)
571 if not x[0].isdigit() or not x[1].isdigit():
573 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
579 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
580 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
581 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
582 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
583 "PYTHONPATH", "ROOTPATH"])
588 file_path = os.path.join(envd_dir, x)
590 myconfig = getconfig(file_path, expand=False)
591 except portage.exception.ParseError, e:
592 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
596 # broken symlink or file removed by a concurrent process
597 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
599 config_list.append(myconfig)
600 if "SPACE_SEPARATED" in myconfig:
601 space_separated.update(myconfig["SPACE_SEPARATED"].split())
602 del myconfig["SPACE_SEPARATED"]
603 if "COLON_SEPARATED" in myconfig:
604 colon_separated.update(myconfig["COLON_SEPARATED"].split())
605 del myconfig["COLON_SEPARATED"]
609 for var in space_separated:
611 for myconfig in config_list:
613 for item in myconfig[var].split():
614 if item and not item in mylist:
616 del myconfig[var] # prepare for env.update(myconfig)
618 env[var] = " ".join(mylist)
619 specials[var] = mylist
621 for var in colon_separated:
623 for myconfig in config_list:
625 for item in myconfig[var].split(":"):
626 if item and not item in mylist:
628 del myconfig[var] # prepare for env.update(myconfig)
630 env[var] = ":".join(mylist)
631 specials[var] = mylist
633 for myconfig in config_list:
634 """Cumulative variables have already been deleted from myconfig so that
635 they won't be overwritten by this dict.update call."""
638 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
640 myld = open(ldsoconf_path)
641 myldlines=myld.readlines()
645 #each line has at least one char (a newline)
649 except (IOError, OSError), e:
650 if e.errno != errno.ENOENT:
654 ld_cache_update=False
656 newld = specials["LDPATH"]
658 #ld.so.conf needs updating and ldconfig needs to be run
659 myfd = atomic_ofstream(ldsoconf_path)
660 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
661 myfd.write("# contents of /etc/env.d directory\n")
662 for x in specials["LDPATH"]:
667 # Update prelink.conf if we are prelink-enabled
669 newprelink = atomic_ofstream(
670 os.path.join(target_root, "etc", "prelink.conf"))
671 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
672 newprelink.write("# contents of /etc/env.d directory\n")
674 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
675 newprelink.write("-l "+x+"\n");
676 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
682 for y in specials["PRELINK_PATH_MASK"]:
691 newprelink.write("-h "+x+"\n")
692 for x in specials["PRELINK_PATH_MASK"]:
693 newprelink.write("-b "+x+"\n")
696 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
697 # granularity is possible. In order to avoid the potential ambiguity of
698 # mtimes that differ by less than 1 second, sleep here if any of the
699 # directories have been modified during the current second.
700 sleep_for_mtime_granularity = False
701 current_time = long(time.time())
702 mtime_changed = False
704 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
705 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
707 newldpathtime = long(os.stat(x).st_mtime)
708 lib_dirs.add(normalize_path(x))
710 if oe.errno == errno.ENOENT:
715 # ignore this path because it doesn't exist
718 if newldpathtime == current_time:
719 sleep_for_mtime_granularity = True
721 if prev_mtimes[x] == newldpathtime:
724 prev_mtimes[x] = newldpathtime
727 prev_mtimes[x] = newldpathtime
731 ld_cache_update = True
734 not ld_cache_update and \
735 contents is not None:
736 libdir_contents_changed = False
737 for mypath, mydata in contents.iteritems():
738 if mydata[0] not in ("obj","sym"):
740 head, tail = os.path.split(mypath)
742 libdir_contents_changed = True
744 if not libdir_contents_changed:
747 ldconfig = "/sbin/ldconfig"
748 if "CHOST" in env and "CBUILD" in env and \
749 env["CHOST"] != env["CBUILD"]:
750 from portage.process import find_binary
751 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
753 # Only run ldconfig as needed
754 if (ld_cache_update or makelinks) and ldconfig:
755 # ldconfig has very different behaviour between FreeBSD and Linux
756 if ostype=="Linux" or ostype.lower().endswith("gnu"):
757 # We can't update links if we haven't cleaned other versions first, as
758 # an older package installed ON TOP of a newer version will cause ldconfig
759 # to overwrite the symlinks we just made. -X means no links. After 'clean'
760 # we can safely create links.
761 writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \
764 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
766 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
767 elif ostype in ("FreeBSD","DragonFly"):
768 writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
770 os.system(("cd / ; %s -elf -i " + \
771 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
772 (ldconfig, target_root, target_root))
774 del specials["LDPATH"]
776 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
777 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
778 cenvnotice = penvnotice[:]
779 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
780 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
782 #create /etc/profile.env for bash support
783 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
784 outfile.write(penvnotice)
786 env_keys = [ x for x in env if x != "LDPATH" ]
790 if v.startswith('$') and not v.startswith('${'):
791 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
793 outfile.write("export %s='%s'\n" % (k, v))
796 #create /etc/csh.env for (t)csh support
797 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
798 outfile.write(cenvnotice)
800 outfile.write("setenv %s '%s'\n" % (x, env[x]))
803 if sleep_for_mtime_granularity:
804 while current_time == long(time.time()):
807 def ExtractKernelVersion(base_dir):
809 Try to figure out what kernel version we are running
810 @param base_dir: Path to sources (usually /usr/src/linux)
811 @type base_dir: string
812 @rtype: tuple( version[string], error[string])
814 1. tuple( version[string], error[string])
815 Either version or error is populated (but never both)
819 pathname = os.path.join(base_dir, 'Makefile')
821 f = open(pathname, 'r')
822 except OSError, details:
823 return (None, str(details))
824 except IOError, details:
825 return (None, str(details))
829 lines.append(f.readline())
830 except OSError, details:
831 return (None, str(details))
832 except IOError, details:
833 return (None, str(details))
835 lines = [l.strip() for l in lines]
839 #XXX: The following code relies on the ordering of vars within the Makefile
841 # split on the '=' then remove annoying whitespace
842 items = line.split("=")
843 items = [i.strip() for i in items]
844 if items[0] == 'VERSION' or \
845 items[0] == 'PATCHLEVEL':
848 elif items[0] == 'SUBLEVEL':
850 elif items[0] == 'EXTRAVERSION' and \
851 items[-1] != items[0]:
854 # Grab a list of files named localversion* and sort them
855 localversions = os.listdir(base_dir)
856 for x in range(len(localversions)-1,-1,-1):
857 if localversions[x][:12] != "localversion":
861 # Append the contents of each to the version string, stripping ALL whitespace
862 for lv in localversions:
863 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
865 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
866 kernelconfig = getconfig(base_dir+"/.config")
867 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
868 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
870 return (version,None)
872 def autouse(myvartree, use_cache=1, mysettings=None):
874 autuse returns a list of USE variables auto-enabled to packages being installed
876 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
877 @type myvartree: vartree
878 @param use_cache: read values from cache
879 @type use_cache: Boolean
880 @param mysettings: Instance of config
881 @type mysettings: config
883 @returns: A string containing a list of USE variables that are enabled via use.defaults
885 if mysettings is None:
887 mysettings = settings
888 if mysettings.profile_path is None:
891 usedefaults = mysettings.use_defs
892 for myuse in usedefaults:
894 for mydep in usedefaults[myuse]:
895 if not myvartree.dep_match(mydep,use_cache=True):
899 myusevars += " "+myuse
902 def check_config_instance(test):
903 if not isinstance(test, config):
904 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
906 class config(object):
908 This class encompasses the main portage configuration. Data is pulled from
909 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
910 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
913 Generally if you need data like USE flags, FEATURES, environment variables,
914 virtuals ...etc you look in here.
918 "A", "AA", "CATEGORY", "EBUILD_PHASE", "EMERGE_FROM",
919 "PF", "PKGUSE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
920 "PORTAGE_REPO_NAME", "PORTAGE_USE", "ROOT"
923 _environ_whitelist = []
925 # Whitelisted variables are always allowed to enter the ebuild
926 # environment. Generally, this only includes special portage
927 # variables. Ebuilds can unset variables that are not whitelisted
928 # and rely on them remaining unset for future phases, without them
929 # leaking back in from various locations (bug #189417). It's very
930 # important to set our special BASH_ENV variable in the ebuild
931 # environment in order to prevent sandbox from sourcing /etc/profile
932 # in it's bashrc (causing major leakage).
933 _environ_whitelist += [
934 "BASH_ENV", "BUILD_PREFIX", "D",
935 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
936 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
937 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
938 "FEATURES", "FILESDIR", "HOME", "PATH",
940 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
941 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
943 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
944 "PORTAGE_BINPKG_TMPFILE",
946 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
947 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
948 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
950 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
951 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
952 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
953 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
954 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
955 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
956 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
957 "USE_EXPAND", "USE_ORDER", "WORKDIR",
961 # user config variables
962 _environ_whitelist += [
963 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
966 _environ_whitelist += [
967 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
970 # misc variables inherited from the calling environment
971 _environ_whitelist += [
972 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
973 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
974 "TERM", "TERMCAP", "USER",
977 # other variables inherited from the calling environment
978 _environ_whitelist += [
979 "CVS_RSH", "ECHANGELOG_USER",
981 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
982 "STY", "WINDOW", "XAUTHORITY",
985 _environ_whitelist = frozenset(_environ_whitelist)
987 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
989 # Filter selected variables in the config.environ() method so that
990 # they don't needlessly propagate down into the ebuild environment.
993 # misc variables inherited from the calling environment
995 "INFOPATH", "MANPATH",
998 # variables that break bash
1003 # portage config variables and variables set directly by portage
1004 _environ_filter += [
1005 "ACCEPT_KEYWORDS", "AUTOCLEAN",
1006 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1007 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
1008 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1009 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1010 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1011 "PORTAGE_BACKGROUND",
1012 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1013 "PORTAGE_COUNTER_HASH",
1014 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
1015 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1016 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1017 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1019 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1020 "PORTAGE_PACKAGE_EMPTY_ABORT",
1021 "PORTAGE_RO_DISTDIRS",
1022 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1023 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1024 "QUICKPKG_DEFAULT_OPTS",
1025 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1026 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1029 _environ_filter = frozenset(_environ_filter)
1031 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1032 config_incrementals=None, config_root=None, target_root=None,
1035 @param clone: If provided, init will use deepcopy to copy by value the instance.
1036 @type clone: Instance of config class.
1037 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1038 and then calling instance.setcpv(mycpv).
1040 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1041 @type config_profile_path: String
1042 @param config_incrementals: List of incremental variables (usually portage.const.INCREMENTALS)
1043 @type config_incrementals: List
1044 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1045 @type config_root: String
1046 @param target_root: __init__ override of $ROOT env variable.
1047 @type target_root: String
1048 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1049 ignore local config (keywording and unmasking)
1050 @type local_config: Boolean
1053 # When initializing the global portage.settings instance, avoid
1054 # raising exceptions whenever possible since exceptions thrown
1055 # from 'import portage' or 'import portage.exceptions' statements
1056 # can practically render the api unusable for api consumers.
1057 tolerant = "_initializing_globals" in globals()
1059 self.already_in_regenerate = 0
1064 self.modifiedkeys = []
1066 self._accept_chost_re = None
1070 self.dirVirtuals = None
1073 # Virtuals obtained from the vartree
1074 self.treeVirtuals = {}
1075 # Virtuals by user specification. Includes negatives.
1076 self.userVirtuals = {}
1077 # Virtual negatives from user specifications.
1078 self.negVirtuals = {}
1079 # Virtuals added by the depgraph via self.setinst().
1080 self._depgraphVirtuals = {}
1082 self.user_profile_dir = None
1083 self.local_config = local_config
1086 self.incrementals = copy.deepcopy(clone.incrementals)
1087 self.profile_path = copy.deepcopy(clone.profile_path)
1088 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1089 self.local_config = copy.deepcopy(clone.local_config)
1091 self.module_priority = copy.deepcopy(clone.module_priority)
1092 self.modules = copy.deepcopy(clone.modules)
1094 self.depcachedir = copy.deepcopy(clone.depcachedir)
1096 self.packages = copy.deepcopy(clone.packages)
1097 self.virtuals = copy.deepcopy(clone.virtuals)
1099 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1100 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1101 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1102 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1103 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1105 self.use_defs = copy.deepcopy(clone.use_defs)
1106 self.usemask = copy.deepcopy(clone.usemask)
1107 self.usemask_list = copy.deepcopy(clone.usemask_list)
1108 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1109 self.useforce = copy.deepcopy(clone.useforce)
1110 self.useforce_list = copy.deepcopy(clone.useforce_list)
1111 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1112 self.puse = copy.deepcopy(clone.puse)
1113 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1114 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1115 self.mycpv = copy.deepcopy(clone.mycpv)
1117 self.configlist = copy.deepcopy(clone.configlist)
1118 self.lookuplist = self.configlist[:]
1119 self.lookuplist.reverse()
1121 "env.d": self.configlist[0],
1122 "pkginternal": self.configlist[1],
1123 "globals": self.configlist[2],
1124 "defaults": self.configlist[3],
1125 "conf": self.configlist[4],
1126 "pkg": self.configlist[5],
1127 "auto": self.configlist[6],
1128 "backupenv": self.configlist[7],
1129 "env": self.configlist[8] }
1130 self.profiles = copy.deepcopy(clone.profiles)
1131 self.backupenv = self.configdict["backupenv"]
1132 self.pusedict = copy.deepcopy(clone.pusedict)
1133 self.categories = copy.deepcopy(clone.categories)
1134 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1135 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1136 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1137 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1138 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1139 self.features = copy.deepcopy(clone.features)
1141 self._accept_license = copy.deepcopy(clone._accept_license)
1142 self._plicensedict = copy.deepcopy(clone._plicensedict)
1145 def check_var_directory(varname, var):
1146 if not os.path.isdir(var):
1147 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1148 "Please correct this.\n") % (varname, var),
1150 raise portage.exception.DirectoryNotFound(var)
1152 if config_root is None:
1155 config_root = normalize_path(os.path.abspath(
1156 config_root)).rstrip(os.path.sep) + os.path.sep
1158 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1160 self.depcachedir = DEPCACHE_PATH
1162 if not config_profile_path:
1163 config_profile_path = \
1164 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1165 if os.path.isdir(config_profile_path):
1166 self.profile_path = config_profile_path
1168 self.profile_path = None
1170 self.profile_path = config_profile_path[:]
1172 if not config_incrementals:
1173 writemsg("incrementals not specified to class config\n")
1174 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1176 self.incrementals = copy.deepcopy(config_incrementals)
1178 self.module_priority = ["user","default"]
1180 self.modules["user"] = getconfig(
1181 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1182 if self.modules["user"] is None:
1183 self.modules["user"] = {}
1184 self.modules["default"] = {
1185 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1186 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1192 # back up our incremental variables:
1194 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1195 self.configlist.append({})
1196 self.configdict["env.d"] = self.configlist[-1]
1198 self.configlist.append({})
1199 self.configdict["pkginternal"] = self.configlist[-1]
1201 # The symlink might not exist or might not be a symlink.
1202 if self.profile_path is None:
1206 def addProfile(currentPath):
1207 parentsFile = os.path.join(currentPath, "parent")
1208 eapi_file = os.path.join(currentPath, "eapi")
1210 eapi = open(eapi_file).readline().strip()
1214 if not eapi_is_supported(eapi):
1215 raise portage.exception.ParseError(
1216 "Profile contains unsupported " + \
1217 "EAPI '%s': '%s'" % \
1218 (eapi, os.path.realpath(eapi_file),))
1219 if os.path.exists(parentsFile):
1220 parents = grabfile(parentsFile)
1222 raise portage.exception.ParseError(
1223 "Empty parent file: '%s'" % parentsFile)
1224 for parentPath in parents:
1225 parentPath = normalize_path(os.path.join(
1226 currentPath, parentPath))
1227 if os.path.exists(parentPath):
1228 addProfile(parentPath)
1230 raise portage.exception.ParseError(
1231 "Parent '%s' not found: '%s'" % \
1232 (parentPath, parentsFile))
1233 self.profiles.append(currentPath)
1235 addProfile(os.path.realpath(self.profile_path))
1236 except portage.exception.ParseError, e:
1237 writemsg("!!! Unable to parse profile: '%s'\n" % \
1238 self.profile_path, noiselevel=-1)
1239 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1242 if local_config and self.profiles:
1243 custom_prof = os.path.join(
1244 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1245 if os.path.exists(custom_prof):
1246 self.user_profile_dir = custom_prof
1247 self.profiles.append(custom_prof)
1250 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1251 self.packages = stack_lists(self.packages_list, incremental=1)
1252 del self.packages_list
1253 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1256 self.prevmaskdict={}
1257 for x in self.packages:
1258 mycatpkg=dep_getkey(x)
1259 if mycatpkg not in self.prevmaskdict:
1260 self.prevmaskdict[mycatpkg]=[x]
1262 self.prevmaskdict[mycatpkg].append(x)
1264 # get profile-masked use flags -- INCREMENTAL Child over parent
1265 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1266 for x in self.profiles]
1267 self.usemask = set(stack_lists(
1268 self.usemask_list, incremental=True))
1269 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1270 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1273 self.pusemask_list = []
1274 rawpusemask = [grabdict_package(
1275 os.path.join(x, "package.use.mask")) \
1276 for x in self.profiles]
1277 for i in xrange(len(self.profiles)):
1279 for k, v in rawpusemask[i].iteritems():
1280 cpdict.setdefault(dep_getkey(k), {})[k] = v
1281 self.pusemask_list.append(cpdict)
1284 self.pkgprofileuse = []
1285 rawprofileuse = [grabdict_package(
1286 os.path.join(x, "package.use"), juststrings=True) \
1287 for x in self.profiles]
1288 for i in xrange(len(self.profiles)):
1290 for k, v in rawprofileuse[i].iteritems():
1291 cpdict.setdefault(dep_getkey(k), {})[k] = v
1292 self.pkgprofileuse.append(cpdict)
1295 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1296 for x in self.profiles]
1297 self.useforce = set(stack_lists(
1298 self.useforce_list, incremental=True))
1300 self.puseforce_list = []
1301 rawpuseforce = [grabdict_package(
1302 os.path.join(x, "package.use.force")) \
1303 for x in self.profiles]
1304 for i in xrange(len(self.profiles)):
1306 for k, v in rawpuseforce[i].iteritems():
1307 cpdict.setdefault(dep_getkey(k), {})[k] = v
1308 self.puseforce_list.append(cpdict)
1311 make_conf = getconfig(
1312 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1313 tolerant=tolerant, allow_sourcing=True)
1314 if make_conf is None:
1317 # Allow ROOT setting to come from make.conf if it's not overridden
1318 # by the constructor argument (from the calling environment).
1319 if target_root is None and "ROOT" in make_conf:
1320 target_root = make_conf["ROOT"]
1321 if not target_root.strip():
1323 if target_root is None:
1326 target_root = normalize_path(os.path.abspath(
1327 target_root)).rstrip(os.path.sep) + os.path.sep
1329 portage.util.ensure_dirs(target_root)
1330 check_var_directory("ROOT", target_root)
1332 # The expand_map is used for variable substitution
1333 # in getconfig() calls, and the getconfig() calls
1334 # update expand_map with the value of each variable
1335 # assignment that occurs. Variable substitution occurs
1336 # in the following order, which corresponds to the
1337 # order of appearance in self.lookuplist:
1344 # Notably absent is "env", since we want to avoid any
1345 # interaction with the calling environment that might
1346 # lead to unexpected results.
1349 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1351 # env_d will be None if profile.env doesn't exist.
1353 self.configdict["env.d"].update(env_d)
1354 expand_map.update(env_d)
1356 # backupenv is used for calculating incremental variables.
1357 self.backupenv = os.environ.copy()
1360 # Remove duplicate values so they don't override updated
1361 # profile.env values later (profile.env is reloaded in each
1362 # call to self.regenerate).
1363 for k, v in env_d.iteritems():
1365 if self.backupenv[k] == v:
1366 del self.backupenv[k]
1371 self.configdict["env"] = self.backupenv.copy()
1373 # make.globals should not be relative to config_root
1374 # because it only contains constants.
1375 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1376 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1381 if self.mygcfg is None:
1384 self.configlist.append(self.mygcfg)
1385 self.configdict["globals"]=self.configlist[-1]
1387 self.make_defaults_use = []
1390 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1391 expand=expand_map) for x in self.profiles]
1393 for cfg in mygcfg_dlists:
1395 self.make_defaults_use.append(cfg.get("USE", ""))
1397 self.make_defaults_use.append("")
1398 self.mygcfg = stack_dicts(mygcfg_dlists,
1399 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1400 if self.mygcfg is None:
1402 self.configlist.append(self.mygcfg)
1403 self.configdict["defaults"]=self.configlist[-1]
1405 self.mygcfg = getconfig(
1406 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1407 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1408 if self.mygcfg is None:
1411 # Don't allow the user to override certain variables in make.conf
1412 profile_only_variables = self.configdict["defaults"].get(
1413 "PROFILE_ONLY_VARIABLES", "").split()
1414 for k in profile_only_variables:
1415 self.mygcfg.pop(k, None)
1417 self.configlist.append(self.mygcfg)
1418 self.configdict["conf"]=self.configlist[-1]
1420 self.configlist.append({})
1421 self.configdict["pkg"]=self.configlist[-1]
1424 self.configlist.append({})
1425 self.configdict["auto"]=self.configlist[-1]
1427 self.configlist.append(self.backupenv) # XXX Why though?
1428 self.configdict["backupenv"]=self.configlist[-1]
1430 # Don't allow the user to override certain variables in the env
1431 for k in profile_only_variables:
1432 self.backupenv.pop(k, None)
1434 self.configlist.append(self.configdict["env"])
1436 # make lookuplist for loading package.*
1437 self.lookuplist=self.configlist[:]
1438 self.lookuplist.reverse()
1440 # Blacklist vars that could interfere with portage internals.
1441 for blacklisted in self._env_blacklist:
1442 for cfg in self.lookuplist:
1443 cfg.pop(blacklisted, None)
1444 del blacklisted, cfg
1446 self["PORTAGE_CONFIGROOT"] = config_root
1447 self.backup_changes("PORTAGE_CONFIGROOT")
1448 self["ROOT"] = target_root
1449 self.backup_changes("ROOT")
1452 self.pkeywordsdict = {}
1453 self._plicensedict = {}
1454 self.punmaskdict = {}
1455 abs_user_config = os.path.join(config_root,
1456 USER_CONFIG_PATH.lstrip(os.path.sep))
1458 # locations for "categories" and "arch.list" files
1459 locations = [os.path.join(self["PORTDIR"], "profiles")]
1460 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1461 pmask_locations.extend(self.profiles)
1463 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1464 special cases are needed here."""
1465 overlay_profiles = []
1466 for ov in self["PORTDIR_OVERLAY"].split():
1467 ov = normalize_path(ov)
1468 profiles_dir = os.path.join(ov, "profiles")
1469 if os.path.isdir(profiles_dir):
1470 overlay_profiles.append(profiles_dir)
1471 locations += overlay_profiles
1473 pmask_locations.extend(overlay_profiles)
1476 locations.append(abs_user_config)
1477 pmask_locations.append(abs_user_config)
1478 pusedict = grabdict_package(
1479 os.path.join(abs_user_config, "package.use"), recursive=1)
1480 for key in pusedict.keys():
1481 cp = dep_getkey(key)
1482 if cp not in self.pusedict:
1483 self.pusedict[cp] = {}
1484 self.pusedict[cp][key] = pusedict[key]
1487 pkgdict = grabdict_package(
1488 os.path.join(abs_user_config, "package.keywords"),
1490 for key in pkgdict.keys():
1491 # default to ~arch if no specific keyword is given
1492 if not pkgdict[key]:
1494 if self.configdict["defaults"] and \
1495 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1496 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1499 for keyword in groups:
1500 if not keyword[0] in "~-":
1501 mykeywordlist.append("~"+keyword)
1502 pkgdict[key] = mykeywordlist
1503 cp = dep_getkey(key)
1504 if cp not in self.pkeywordsdict:
1505 self.pkeywordsdict[cp] = {}
1506 self.pkeywordsdict[cp][key] = pkgdict[key]
1509 licdict = grabdict_package(os.path.join(
1510 abs_user_config, "package.license"), recursive=1)
1511 for k, v in licdict.iteritems():
1513 cp_dict = self._plicensedict.get(cp)
1516 self._plicensedict[cp] = cp_dict
1517 cp_dict[k] = self.expandLicenseTokens(v)
1520 pkgunmasklines = grabfile_package(
1521 os.path.join(abs_user_config, "package.unmask"),
1523 for x in pkgunmasklines:
1524 mycatpkg=dep_getkey(x)
1525 if mycatpkg in self.punmaskdict:
1526 self.punmaskdict[mycatpkg].append(x)
1528 self.punmaskdict[mycatpkg]=[x]
1530 #getting categories from an external file now
1531 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1532 self.categories = stack_lists(categories, incremental=1)
1535 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1536 archlist = stack_lists(archlist, incremental=1)
1537 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1541 for x in pmask_locations:
1542 pkgmasklines.append(grabfile_package(
1543 os.path.join(x, "package.mask"), recursive=1))
1544 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1547 for x in pkgmasklines:
1548 mycatpkg=dep_getkey(x)
1549 if mycatpkg in self.pmaskdict:
1550 self.pmaskdict[mycatpkg].append(x)
1552 self.pmaskdict[mycatpkg]=[x]
1554 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1555 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1556 has_invalid_data = False
1557 for x in range(len(pkgprovidedlines)-1, -1, -1):
1558 myline = pkgprovidedlines[x]
1559 if not isvalidatom("=" + myline):
1560 writemsg("Invalid package name in package.provided:" + \
1561 " %s\n" % myline, noiselevel=-1)
1562 has_invalid_data = True
1563 del pkgprovidedlines[x]
1565 cpvr = catpkgsplit(pkgprovidedlines[x])
1566 if not cpvr or cpvr[0] == "null":
1567 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1569 has_invalid_data = True
1570 del pkgprovidedlines[x]
1572 if cpvr[0] == "virtual":
1573 writemsg("Virtual package in package.provided: %s\n" % \
1574 myline, noiselevel=-1)
1575 has_invalid_data = True
1576 del pkgprovidedlines[x]
1578 if has_invalid_data:
1579 writemsg("See portage(5) for correct package.provided usage.\n",
1581 self.pprovideddict = {}
1582 for x in pkgprovidedlines:
1586 mycatpkg=dep_getkey(x)
1587 if mycatpkg in self.pprovideddict:
1588 self.pprovideddict[mycatpkg].append(x)
1590 self.pprovideddict[mycatpkg]=[x]
1592 # parse licensegroups
1593 self._license_groups = {}
1595 self._license_groups.update(
1596 grabdict(os.path.join(x, "license_groups")))
1598 # reasonable defaults; this is important as without USE_ORDER,
1599 # USE will always be "" (nothing set)!
1600 if "USE_ORDER" not in self:
1601 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1603 self["PORTAGE_GID"] = str(portage_gid)
1604 self.backup_changes("PORTAGE_GID")
1606 if self.get("PORTAGE_DEPCACHEDIR", None):
1607 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1608 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1609 self.backup_changes("PORTAGE_DEPCACHEDIR")
1611 overlays = self.get("PORTDIR_OVERLAY","").split()
1615 ov = normalize_path(ov)
1616 if os.path.isdir(ov):
1619 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1620 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1621 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1622 self.backup_changes("PORTDIR_OVERLAY")
1624 if "CBUILD" not in self and "CHOST" in self:
1625 self["CBUILD"] = self["CHOST"]
1626 self.backup_changes("CBUILD")
1628 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1629 self.backup_changes("PORTAGE_BIN_PATH")
1630 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1631 self.backup_changes("PORTAGE_PYM_PATH")
1633 # Expand license groups
1634 # This has to do be done for each config layer before regenerate()
1635 # in order for incremental negation to work properly.
1637 for c in self.configdict.itervalues():
1638 v = c.get("ACCEPT_LICENSE")
1641 v = " ".join(self.expandLicenseTokens(v.split()))
1642 c["ACCEPT_LICENSE"] = v
1645 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1647 self[var] = str(int(self.get(var, "0")))
1649 writemsg(("!!! %s='%s' is not a valid integer. " + \
1650 "Falling back to '0'.\n") % (var, self[var]),
1653 self.backup_changes(var)
1655 # initialize self.features
1659 self._accept_license = \
1660 set(self.get("ACCEPT_LICENSE", "").split())
1661 # In order to enforce explicit acceptance for restrictive
1662 # licenses that require it, "*" will not be allowed in the
1663 # user config. Don't enforce this until license groups are
1664 # fully implemented in the tree.
1665 #self._accept_license.discard("*")
1666 if not self._accept_license:
1667 self._accept_license = set(["*"])
1669 # repoman will accept any license
1670 self._accept_license = set(["*"])
1672 if not portage.process.sandbox_capable and \
1673 ("sandbox" in self.features or "usersandbox" in self.features):
1674 if self.profile_path is not None and \
1675 os.path.realpath(self.profile_path) == \
1676 os.path.realpath(PROFILE_PATH):
1677 """ Don't show this warning when running repoman and the
1678 sandbox feature came from a profile that doesn't belong to
1680 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1681 " binary. Disabling...\n\n"), noiselevel=-1)
1682 if "sandbox" in self.features:
1683 self.features.remove("sandbox")
1684 if "usersandbox" in self.features:
1685 self.features.remove("usersandbox")
1687 self.features.sort()
1688 self["FEATURES"] = " ".join(self.features)
1689 self.backup_changes("FEATURES")
1696 def _init_dirs(self):
1698 Create a few directories that are critical to portage operation
1700 if not os.access(self["ROOT"], os.W_OK):
1703 # gid, mode, mask, preserve_perms
1705 "tmp" : ( -1, 01777, 0, True),
1706 "var/tmp" : ( -1, 01777, 0, True),
1707 PRIVATE_PATH : ( portage_gid, 02750, 02, False),
1708 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False)
1711 for mypath, (gid, mode, modemask, preserve_perms) \
1712 in dir_mode_map.iteritems():
1713 mydir = os.path.join(self["ROOT"], mypath)
1714 if preserve_perms and os.path.isdir(mydir):
1715 # Only adjust permissions on some directories if
1716 # they don't exist yet. This gives freedom to the
1717 # user to adjust permissions to suit their taste.
1720 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1721 except portage.exception.PortageException, e:
1722 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1724 writemsg("!!! %s\n" % str(e),
1727 def expandLicenseTokens(self, tokens):
1728 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1729 if it's a group token (indicated by @) or just return it if it's not a
1730 group. If a group is negated then negate all group elements."""
1731 expanded_tokens = []
1733 expanded_tokens.extend(self._expandLicenseToken(x, None))
1734 return expanded_tokens
1736 def _expandLicenseToken(self, token, traversed_groups):
1739 if token.startswith("-"):
1741 license_name = token[1:]
1743 license_name = token
1744 if not license_name.startswith("@"):
1745 rValue.append(token)
1747 group_name = license_name[1:]
1748 if not traversed_groups:
1749 traversed_groups = set()
1750 license_group = self._license_groups.get(group_name)
1751 if group_name in traversed_groups:
1752 writemsg(("Circular license group reference" + \
1753 " detected in '%s'\n") % group_name, noiselevel=-1)
1754 rValue.append("@"+group_name)
1756 traversed_groups.add(group_name)
1757 for l in license_group:
1758 if l.startswith("-"):
1759 writemsg(("Skipping invalid element %s" + \
1760 " in license group '%s'\n") % (l, group_name),
1763 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1765 writemsg("Undefined license group '%s'\n" % group_name,
1767 rValue.append("@"+group_name)
1769 rValue = ["-" + token for token in rValue]
1773 """Validate miscellaneous settings and display warnings if necessary.
1774 (This code was previously in the global scope of portage.py)"""
1776 groups = self["ACCEPT_KEYWORDS"].split()
1777 archlist = self.archlist()
1779 writemsg("--- 'profiles/arch.list' is empty or " + \
1780 "not available. Empty portage tree?\n", noiselevel=1)
1782 for group in groups:
1783 if group not in archlist and \
1784 not (group.startswith("-") and group[1:] in archlist) and \
1785 group not in ("*", "~*", "**"):
1786 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1789 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1790 PROFILE_PATH.lstrip(os.path.sep))
1791 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1792 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1793 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1794 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1796 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1797 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1799 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1800 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1801 if os.path.exists(abs_user_virtuals):
1802 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1803 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1804 writemsg("!!! this new location.\n\n")
1806 if "fakeroot" in self.features and \
1807 not portage.process.fakeroot_capable:
1808 writemsg("!!! FEATURES=fakeroot is enabled, but the " + \
1809 "fakeroot binary is not installed.\n", noiselevel=-1)
1811 def loadVirtuals(self,root):
1812 """Not currently used by portage."""
1813 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1814 self.getvirtuals(root)
1816 def load_best_module(self,property_string):
1817 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1820 mod = load_mod(best_mod)
1822 if best_mod.startswith("cache."):
1823 best_mod = "portage." + best_mod
1825 mod = load_mod(best_mod)
1838 def modifying(self):
1840 raise Exception("Configuration is locked.")
1842 def backup_changes(self,key=None):
1844 if key and key in self.configdict["env"]:
1845 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1847 raise KeyError("No such key defined in environment: %s" % key)
1849 def reset(self,keeping_pkg=0,use_cache=1):
1851 Restore environment from self.backupenv, call self.regenerate()
1852 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1853 @type keeping_pkg: Boolean
1854 @param use_cache: Should self.regenerate use the cache or not
1855 @type use_cache: Boolean
1859 self.configdict["env"].clear()
1860 self.configdict["env"].update(self.backupenv)
1862 self.modifiedkeys = []
1866 self.configdict["pkg"].clear()
1867 self.configdict["pkginternal"].clear()
1868 self.configdict["defaults"]["USE"] = \
1869 " ".join(self.make_defaults_use)
1870 self.usemask = set(stack_lists(
1871 self.usemask_list, incremental=True))
1872 self.useforce = set(stack_lists(
1873 self.useforce_list, incremental=True))
1874 self.regenerate(use_cache=use_cache)
1876 def load_infodir(self,infodir):
1877 warnings.warn("portage.config.load_infodir() is deprecated",
1881 def setcpv(self, mycpv, use_cache=1, mydb=None):
1883 Load a particular CPV into the config, this lets us see the
1884 Default USE flags for a particular ebuild as well as the USE
1885 flags from package.use.
1887 @param mycpv: A cpv to load
1889 @param use_cache: Enables caching
1890 @type use_cache: Boolean
1891 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1892 @type mydb: dbapi or derivative.
1899 if not isinstance(mycpv, basestring):
1904 if self.mycpv == mycpv:
1908 cat, pf = catsplit(mycpv)
1909 cp = dep_getkey(mycpv)
1910 cpv_slot = self.mycpv
1913 env_configdict = self.configdict["env"]
1914 pkg_configdict = self.configdict["pkg"]
1915 previous_iuse = pkg_configdict.get("IUSE")
1916 pkg_configdict["CATEGORY"] = cat
1917 pkg_configdict["PF"] = pf
1919 if not hasattr(mydb, "aux_get"):
1920 pkg_configdict.update(mydb)
1922 aux_keys = [k for k in auxdbkeys \
1923 if not k.startswith("UNUSED_")]
1924 aux_keys.append("repository")
1925 for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
1926 pkg_configdict[k] = v
1927 repository = pkg_configdict.pop("repository", None)
1928 if repository is not None:
1929 pkg_configdict["PORTAGE_REPO_NAME"] = repository
1930 for k in pkg_configdict:
1932 env_configdict.pop(k, None)
1933 slot = pkg_configdict["SLOT"]
1934 iuse = pkg_configdict["IUSE"]
1936 cpv_slot = "%s:%s" % (self.mycpv, slot)
1940 for x in iuse.split():
1941 if x.startswith("+"):
1942 pkginternaluse.append(x[1:])
1943 elif x.startswith("-"):
1944 pkginternaluse.append(x)
1945 pkginternaluse = " ".join(pkginternaluse)
1946 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1947 self.configdict["pkginternal"]["USE"] = pkginternaluse
1951 for i in xrange(len(self.profiles)):
1952 cpdict = self.pkgprofileuse[i].get(cp, None)
1954 keys = cpdict.keys()
1956 bestmatch = best_match_to_list(cpv_slot, keys)
1958 keys.remove(bestmatch)
1959 defaults.insert(pos, cpdict[bestmatch])
1963 if self.make_defaults_use[i]:
1964 defaults.insert(pos, self.make_defaults_use[i])
1966 defaults = " ".join(defaults)
1967 if defaults != self.configdict["defaults"].get("USE",""):
1968 self.configdict["defaults"]["USE"] = defaults
1971 useforce = self._getUseForce(cpv_slot)
1972 if useforce != self.useforce:
1973 self.useforce = useforce
1976 usemask = self._getUseMask(cpv_slot)
1977 if usemask != self.usemask:
1978 self.usemask = usemask
1982 cpdict = self.pusedict.get(cp)
1984 keys = cpdict.keys()
1986 self.pusekey = best_match_to_list(cpv_slot, keys)
1988 keys.remove(self.pusekey)
1989 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
1993 if oldpuse != self.puse:
1995 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1996 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1999 self.reset(keeping_pkg=1,use_cache=use_cache)
2001 # If reset() has not been called, it's safe to return
2002 # early if IUSE has not changed.
2003 if not has_changed and previous_iuse == iuse:
2006 # Filter out USE flags that aren't part of IUSE. This has to
2007 # be done for every setcpv() call since practically every
2008 # package has different IUSE.
2009 use = set(self["USE"].split())
2010 iuse_implicit = self._get_implicit_iuse()
2011 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2013 # Escape anything except ".*" which is supposed
2014 # to pass through from _get_implicit_iuse()
2015 regex = sorted(re.escape(x) for x in iuse_implicit)
2016 regex = "^(%s)$" % "|".join(regex)
2017 regex = regex.replace("\\.\\*", ".*")
2018 self.configdict["pkg"]["PORTAGE_IUSE"] = regex
2020 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2021 if ebuild_force_test and \
2022 not hasattr(self, "_ebuild_force_test_msg_shown"):
2023 self._ebuild_force_test_msg_shown = True
2024 writemsg("Forcing test.\n", noiselevel=-1)
2025 if "test" in self.features and "test" in iuse_implicit:
2026 if "test" in self.usemask and not ebuild_force_test:
2027 # "test" is in IUSE and USE=test is masked, so execution
2028 # of src_test() probably is not reliable. Therefore,
2029 # temporarily disable FEATURES=test just for this package.
2030 self["FEATURES"] = " ".join(x for x in self.features \
2035 if ebuild_force_test:
2036 self.usemask.discard("test")
2038 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2039 # that they are consistent. For optimal performance, use slice
2040 # comparison instead of startswith().
2041 use_expand = self.get("USE_EXPAND", "").split()
2042 for var in use_expand:
2043 prefix = var.lower() + "_"
2044 prefix_len = len(prefix)
2045 expand_flags = set([ x[prefix_len:] for x in use \
2046 if x[:prefix_len] == prefix ])
2047 var_split = self.get(var, "").split()
2048 # Preserve the order of var_split because it can matter for things
2050 var_split = [ x for x in var_split if x in expand_flags ]
2051 var_split.extend(expand_flags.difference(var_split))
2052 has_wildcard = "*" in var_split
2054 var_split = [ x for x in var_split if x != "*" ]
2056 for x in iuse_implicit:
2057 if x[:prefix_len] == prefix:
2058 has_iuse.add(x[prefix_len:])
2060 # * means to enable everything in IUSE that's not masked
2062 for x in iuse_implicit:
2063 if x[:prefix_len] == prefix and x not in self.usemask:
2064 suffix = x[prefix_len:]
2065 var_split.append(suffix)
2068 # If there is a wildcard and no matching flags in IUSE then
2069 # LINGUAS should be unset so that all .mo files are
2072 # Make the flags unique and filter them according to IUSE.
2073 # Also, continue to preserve order for things like LINGUAS
2074 # and filter any duplicates that variable may contain.
2075 filtered_var_split = []
2076 remaining = has_iuse.intersection(var_split)
2080 filtered_var_split.append(x)
2081 var_split = filtered_var_split
2084 self[var] = " ".join(var_split)
2086 # Don't export empty USE_EXPAND vars unless the user config
2087 # exports them as empty. This is required for vars such as
2088 # LINGUAS, where unset and empty have different meanings.
2090 # ebuild.sh will see this and unset the variable so
2091 # that things like LINGUAS work properly
2097 # It's not in IUSE, so just allow the variable content
2098 # to pass through if it is defined somewhere. This
2099 # allows packages that support LINGUAS but don't
2100 # declare it in IUSE to use the variable outside of the
2101 # USE_EXPAND context.
2104 # Filtered for the ebuild environment. Store this in a separate
2105 # attribute since we still want to be able to see global USE
2106 # settings for things like emerge --info.
2108 self.configdict["pkg"]["PORTAGE_USE"] = " ".join(sorted(
2110 x in iuse_implicit))
2112 def _get_implicit_iuse(self):
2114 Some flags are considered to
2115 be implicit members of IUSE:
2116 * Flags derived from ARCH
2117 * Flags derived from USE_EXPAND_HIDDEN variables
2118 * Masked flags, such as those from {,package}use.mask
2119 * Forced flags, such as those from {,package}use.force
2120 * build and bootstrap flags used by bootstrap.sh
2122 iuse_implicit = set()
2123 # Flags derived from ARCH.
2124 arch = self.configdict["defaults"].get("ARCH")
2126 iuse_implicit.add(arch)
2127 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2129 # Flags derived from USE_EXPAND_HIDDEN variables
2130 # such as ELIBC, KERNEL, and USERLAND.
2131 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2132 for x in use_expand_hidden:
2133 iuse_implicit.add(x.lower() + "_.*")
2135 # Flags that have been masked or forced.
2136 iuse_implicit.update(self.usemask)
2137 iuse_implicit.update(self.useforce)
2139 # build and bootstrap flags used by bootstrap.sh
2140 iuse_implicit.add("build")
2141 iuse_implicit.add("bootstrap")
2142 return iuse_implicit
2144 def _getUseMask(self, pkg):
2145 cp = getattr(pkg, "cp", None)
2147 cp = dep_getkey(pkg)
2150 for i in xrange(len(self.profiles)):
2151 cpdict = self.pusemask_list[i].get(cp, None)
2153 keys = cpdict.keys()
2155 best_match = best_match_to_list(pkg, keys)
2157 keys.remove(best_match)
2158 usemask.insert(pos, cpdict[best_match])
2162 if self.usemask_list[i]:
2163 usemask.insert(pos, self.usemask_list[i])
2165 return set(stack_lists(usemask, incremental=True))
2167 def _getUseForce(self, pkg):
2168 cp = getattr(pkg, "cp", None)
2170 cp = dep_getkey(pkg)
2173 for i in xrange(len(self.profiles)):
2174 cpdict = self.puseforce_list[i].get(cp, None)
2176 keys = cpdict.keys()
2178 best_match = best_match_to_list(pkg, keys)
2180 keys.remove(best_match)
2181 useforce.insert(pos, cpdict[best_match])
2185 if self.useforce_list[i]:
2186 useforce.insert(pos, self.useforce_list[i])
2188 return set(stack_lists(useforce, incremental=True))
2190 def _getMaskAtom(self, cpv, metadata):
2192 Take a package and return a matching package.mask atom, or None if no
2193 such atom exists or it has been cancelled by package.unmask. PROVIDE
2194 is not checked, so atoms will not be found for old-style virtuals.
2196 @param cpv: The package name
2198 @param metadata: A dictionary of raw package metadata
2199 @type metadata: dict
2201 @return: An matching atom string or None if one is not found.
2204 cp = cpv_getkey(cpv)
2205 mask_atoms = self.pmaskdict.get(cp)
2207 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2208 unmask_atoms = self.punmaskdict.get(cp)
2209 for x in mask_atoms:
2210 if not match_from_list(x, pkg_list):
2213 for y in unmask_atoms:
2214 if match_from_list(y, pkg_list):
2219 def _getProfileMaskAtom(self, cpv, metadata):
2221 Take a package and return a matching profile atom, or None if no
2222 such atom exists. Note that a profile atom may or may not have a "*"
2223 prefix. PROVIDE is not checked, so atoms will not be found for
2226 @param cpv: The package name
2228 @param metadata: A dictionary of raw package metadata
2229 @type metadata: dict
2231 @return: An matching profile atom string or None if one is not found.
2234 cp = cpv_getkey(cpv)
2235 profile_atoms = self.prevmaskdict.get(cp)
2237 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2238 for x in profile_atoms:
2239 if match_from_list(x.lstrip("*"), pkg_list):
2244 def _getMissingKeywords(self, cpv, metadata):
2246 Take a package and return a list of any KEYWORDS that the user may
2247 may need to accept for the given package. If the KEYWORDS are empty
2248 and the the ** keyword has not been accepted, the returned list will
2249 contain ** alone (in order to distiguish from the case of "none
2252 @param cpv: The package name (for package.keywords support)
2254 @param metadata: A dictionary of raw package metadata
2255 @type metadata: dict
2257 @return: A list of KEYWORDS that have not been accepted.
2260 # Hack: Need to check the env directly here as otherwise stacking
2261 # doesn't work properly as negative values are lost in the config
2262 # object (bug #139600)
2263 egroups = self.configdict["backupenv"].get(
2264 "ACCEPT_KEYWORDS", "").split()
2265 mygroups = metadata["KEYWORDS"].split()
2266 # Repoman may modify this attribute as necessary.
2267 pgroups = self["ACCEPT_KEYWORDS"].split()
2269 cp = dep_getkey(cpv)
2270 pkgdict = self.pkeywordsdict.get(cp)
2273 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2274 for atom, pkgkeywords in pkgdict.iteritems():
2275 if match_from_list(atom, cpv_slot_list):
2277 pgroups.extend(pkgkeywords)
2278 if matches or egroups:
2279 pgroups.extend(egroups)
2282 if x.startswith("-"):
2286 inc_pgroups.discard(x[1:])
2289 pgroups = inc_pgroups
2294 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2295 writemsg(("--- WARNING: Package '%s' uses" + \
2296 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2303 elif gp.startswith("~"):
2305 elif not gp.startswith("-"):
2308 ((hastesting and "~*" in pgroups) or \
2309 (hasstable and "*" in pgroups) or "**" in pgroups):
2315 # If KEYWORDS is empty then we still have to return something
2316 # in order to distiguish from the case of "none missing".
2317 mygroups.append("**")
2321 def _getMissingLicenses(self, cpv, metadata):
2323 Take a LICENSE string and return a list any licenses that the user may
2324 may need to accept for the given package. The returned list will not
2325 contain any licenses that have already been accepted. This method
2326 can throw an InvalidDependString exception.
2328 @param cpv: The package name (for package.license support)
2330 @param metadata: A dictionary of raw package metadata
2331 @type metadata: dict
2333 @return: A list of licenses that have not been accepted.
2335 if "*" in self._accept_license:
2337 acceptable_licenses = self._accept_license
2338 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2340 acceptable_licenses = self._accept_license.copy()
2341 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2342 for atom in match_to_list(cpv_slot, cpdict.keys()):
2343 acceptable_licenses.update(cpdict[atom])
2345 license_str = metadata["LICENSE"]
2346 if "?" in license_str:
2347 use = metadata["USE"].split()
2351 license_struct = portage.dep.use_reduce(
2352 portage.dep.paren_reduce(license_str), uselist=use)
2353 license_struct = portage.dep.dep_opconvert(license_struct)
2354 return self._getMaskedLicenses(license_struct, acceptable_licenses)
2356 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2357 if not license_struct:
2359 if license_struct[0] == "||":
2361 for element in license_struct[1:]:
2362 if isinstance(element, list):
2364 ret.append(self._getMaskedLicenses(
2365 element, acceptable_licenses))
2369 if element in acceptable_licenses:
2372 # Return all masked licenses, since we don't know which combination
2373 # (if any) the user will decide to unmask.
2377 for element in license_struct:
2378 if isinstance(element, list):
2380 ret.extend(self._getMaskedLicenses(element,
2381 acceptable_licenses))
2383 if element not in acceptable_licenses:
2387 def _accept_chost(self, pkg):
2389 @return True if pkg CHOST is accepted, False otherwise.
2391 if self._accept_chost_re is None:
2392 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2393 if not accept_chost:
2394 chost = self.get("CHOST")
2396 accept_chost.append(chost)
2397 if not accept_chost:
2398 self._accept_chost_re = re.compile(".*")
2399 elif len(accept_chost) == 1:
2401 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2403 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2404 (accept_chost[0], e), noiselevel=-1)
2405 self._accept_chost_re = re.compile("^$")
2408 self._accept_chost_re = re.compile(
2409 r'^(%s)$' % "|".join(accept_chost))
2411 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2412 (" ".join(accept_chost), e), noiselevel=-1)
2413 self._accept_chost_re = re.compile("^$")
2415 return self._accept_chost_re.match(
2416 pkg.metadata.get("CHOST", "")) is not None
2418 def setinst(self,mycpv,mydbapi):
2419 """This updates the preferences for old-style virtuals,
2420 affecting the behavior of dep_expand() and dep_check()
2421 calls. It can change dbapi.match() behavior since that
2422 calls dep_expand(). However, dbapi instances have
2423 internal match caches that are not invalidated when
2424 preferences are updated here. This can potentially
2425 lead to some inconsistency (relevant to bug #1343)."""
2427 if len(self.virtuals) == 0:
2429 # Grab the virtuals this package provides and add them into the tree virtuals.
2430 if not hasattr(mydbapi, "aux_get"):
2431 provides = mydbapi["PROVIDE"]
2433 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2436 if isinstance(mydbapi, portdbapi):
2437 self.setcpv(mycpv, mydb=mydbapi)
2438 myuse = self["PORTAGE_USE"]
2439 elif not hasattr(mydbapi, "aux_get"):
2440 myuse = mydbapi["USE"]
2442 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2443 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2446 cp = dep_getkey(mycpv)
2448 virt = dep_getkey(virt)
2449 providers = self.virtuals.get(virt)
2450 if providers and cp in providers:
2452 providers = self._depgraphVirtuals.get(virt)
2453 if providers is None:
2455 self._depgraphVirtuals[virt] = providers
2456 if cp not in providers:
2457 providers.append(cp)
2461 self.virtuals = self.__getvirtuals_compile()
2464 """Reload things like /etc/profile.env that can change during runtime."""
2465 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2466 self.configdict["env.d"].clear()
2467 env_d = getconfig(env_d_filename, expand=False)
2469 # env_d will be None if profile.env doesn't exist.
2470 self.configdict["env.d"].update(env_d)
2472 def regenerate(self,useonly=0,use_cache=1):
2475 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2476 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2477 variables. This also updates the env.d configdict; useful in case an ebuild
2478 changes the environment.
2480 If FEATURES has already stacked, it is not stacked twice.
2482 @param useonly: Only regenerate USE flags (not any other incrementals)
2483 @type useonly: Boolean
2484 @param use_cache: Enable Caching (only for autouse)
2485 @type use_cache: Boolean
2490 if self.already_in_regenerate:
2491 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2492 writemsg("!!! Looping in regenerate.\n",1)
2495 self.already_in_regenerate = 1
2498 myincrementals=["USE"]
2500 myincrementals = self.incrementals
2501 myincrementals = set(myincrementals)
2502 # If self.features exists, it has already been stacked and may have
2503 # been mutated, so don't stack it again or else any mutations will be
2505 if "FEATURES" in myincrementals and hasattr(self, "features"):
2506 myincrementals.remove("FEATURES")
2508 if "USE" in myincrementals:
2509 # Process USE last because it depends on USE_EXPAND which is also
2511 myincrementals.remove("USE")
2513 for mykey in myincrementals:
2515 mydbs=self.configlist[:-1]
2519 if mykey not in curdb:
2521 #variables are already expanded
2522 mysplit = curdb[mykey].split()
2526 # "-*" is a special "minus" var that means "unset all settings".
2527 # so USE="-* gnome" will have *just* gnome enabled.
2532 # Not legal. People assume too much. Complain.
2533 writemsg(colorize("BAD",
2534 "USE flags should not start with a '+': %s" % x) \
2535 + "\n", noiselevel=-1)
2541 if (x[1:] in myflags):
2543 del myflags[myflags.index(x[1:])]
2546 # We got here, so add it now.
2547 if x not in myflags:
2551 #store setting in last element of configlist, the original environment:
2552 if myflags or mykey in self:
2553 self.configlist[-1][mykey] = " ".join(myflags)
2556 # Do the USE calculation last because it depends on USE_EXPAND.
2557 if "auto" in self["USE_ORDER"].split(":"):
2558 self.configdict["auto"]["USE"] = autouse(
2559 vartree(root=self["ROOT"], categories=self.categories,
2561 use_cache=use_cache, mysettings=self)
2563 self.configdict["auto"]["USE"] = ""
2565 use_expand = self.get("USE_EXPAND", "").split()
2568 for x in self["USE_ORDER"].split(":"):
2569 if x in self.configdict:
2570 self.uvlist.append(self.configdict[x])
2571 self.uvlist.reverse()
2573 # For optimal performance, use slice
2574 # comparison instead of startswith().
2576 for curdb in self.uvlist:
2577 cur_use_expand = [x for x in use_expand if x in curdb]
2578 mysplit = curdb.get("USE", "").split()
2579 if not mysplit and not cur_use_expand:
2587 writemsg(colorize("BAD", "USE flags should not start " + \
2588 "with a '+': %s\n" % x), noiselevel=-1)
2594 myflags.discard(x[1:])
2599 for var in cur_use_expand:
2600 var_lower = var.lower()
2601 is_not_incremental = var not in myincrementals
2602 if is_not_incremental:
2603 prefix = var_lower + "_"
2604 prefix_len = len(prefix)
2605 for x in list(myflags):
2606 if x[:prefix_len] == prefix:
2608 for x in curdb[var].split():
2610 if is_not_incremental:
2611 writemsg(colorize("BAD", "Invalid '+' " + \
2612 "operator in non-incremental variable " + \
2613 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2616 writemsg(colorize("BAD", "Invalid '+' " + \
2617 "operator in incremental variable " + \
2618 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2621 if is_not_incremental:
2622 writemsg(colorize("BAD", "Invalid '-' " + \
2623 "operator in non-incremental variable " + \
2624 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2626 myflags.discard(var_lower + "_" + x[1:])
2628 myflags.add(var_lower + "_" + x)
2630 if not hasattr(self, "features"):
2631 self.features = sorted(set(
2632 self.configlist[-1].get("FEATURES","").split()))
2633 self["FEATURES"] = " ".join(self.features)
2635 myflags.update(self.useforce)
2636 arch = self.configdict["defaults"].get("ARCH")
2640 myflags.difference_update(self.usemask)
2641 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2643 self.already_in_regenerate = 0
2645 def get_virts_p(self, myroot=None):
2648 virts = self.getvirtuals()
2651 vkeysplit = x.split("/")
2652 if vkeysplit[1] not in self.virts_p:
2653 self.virts_p[vkeysplit[1]] = virts[x]
2656 def getvirtuals(self, myroot=None):
2657 """myroot is now ignored because, due to caching, it has always been
2658 broken for all but the first call."""
2659 myroot = self["ROOT"]
2661 return self.virtuals
2664 for x in self.profiles:
2665 virtuals_file = os.path.join(x, "virtuals")
2666 virtuals_dict = grabdict(virtuals_file)
2667 for k in virtuals_dict.keys():
2668 if not isvalidatom(k) or dep_getkey(k) != k:
2669 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2670 (virtuals_file, k), noiselevel=-1)
2671 del virtuals_dict[k]
2673 myvalues = virtuals_dict[k]
2676 if x.startswith("-"):
2677 # allow incrementals
2679 if not isvalidatom(myatom):
2680 writemsg("--- Invalid atom in %s: %s\n" % \
2681 (virtuals_file, x), noiselevel=-1)
2684 del virtuals_dict[k]
2686 virtuals_list.append(virtuals_dict)
2688 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2691 for virt in self.dirVirtuals:
2692 # Preference for virtuals decreases from left to right.
2693 self.dirVirtuals[virt].reverse()
2695 # Repoman does not use user or tree virtuals.
2696 if self.local_config and not self.treeVirtuals:
2697 temp_vartree = vartree(myroot, None,
2698 categories=self.categories, settings=self)
2699 # Reduce the provides into a list by CP.
2700 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2702 self.virtuals = self.__getvirtuals_compile()
2703 return self.virtuals
2705 def __getvirtuals_compile(self):
2706 """Stack installed and profile virtuals. Preference for virtuals
2707 decreases from left to right.
2708 Order of preference:
2709 1. installed and in profile
2714 # Virtuals by profile+tree preferences.
2717 for virt, installed_list in self.treeVirtuals.iteritems():
2718 profile_list = self.dirVirtuals.get(virt, None)
2719 if not profile_list:
2721 for cp in installed_list:
2722 if cp in profile_list:
2723 ptVirtuals.setdefault(virt, [])
2724 ptVirtuals[virt].append(cp)
2726 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2727 self.dirVirtuals, self._depgraphVirtuals])
2730 def __delitem__(self,mykey):
2732 for x in self.lookuplist:
2737 def __getitem__(self,mykey):
2738 for d in self.lookuplist:
2741 return '' # for backward compat, don't raise KeyError
2743 def get(self, k, x=None):
2744 for d in self.lookuplist:
2749 def pop(self, key, *args):
2752 "pop expected at most 2 arguments, got " + \
2753 repr(1 + len(args)))
2755 for d in reversed(self.lookuplist):
2763 def has_key(self,mykey):
2764 warnings.warn("portage.config.has_key() is deprecated, "
2765 "use the in operator instead",
2767 return mykey in self
2769 def __contains__(self, mykey):
2770 """Called to implement membership test operators (in and not in)."""
2771 for d in self.lookuplist:
2776 def setdefault(self, k, x=None):
2789 for d in self.lookuplist:
2796 def iteritems(self):
2801 return list(self.iteritems())
2803 def __setitem__(self,mykey,myvalue):
2804 "set a value; will be thrown away at reset() time"
2805 if not isinstance(myvalue, str):
2806 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2808 self.modifiedkeys += [mykey]
2809 self.configdict["env"][mykey]=myvalue
2812 "return our locally-maintained environment"
2814 environ_filter = self._environ_filter
2816 filter_calling_env = False
2817 temp_dir = self.get("T")
2818 if temp_dir is not None and \
2819 os.path.exists(os.path.join(temp_dir, "environment")):
2820 filter_calling_env = True
2822 environ_whitelist = self._environ_whitelist
2823 env_d = self.configdict["env.d"]
2825 if x in environ_filter:
2828 if not isinstance(myvalue, basestring):
2829 writemsg("!!! Non-string value in config: %s=%s\n" % \
2830 (x, myvalue), noiselevel=-1)
2832 if filter_calling_env and \
2833 x not in environ_whitelist and \
2834 not self._environ_whitelist_re.match(x):
2835 # Do not allow anything to leak into the ebuild
2836 # environment unless it is explicitly whitelisted.
2837 # This ensures that variables unset by the ebuild
2841 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
2842 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2843 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2845 if filter_calling_env:
2846 phase = self.get("EBUILD_PHASE")
2850 whitelist.append("RPMDIR")
2856 # Filtered by IUSE and implicit IUSE.
2857 mydict["USE"] = self.get("PORTAGE_USE", "")
2859 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
2860 # so we have to back it up and restore it.
2861 rootpath = mydict.get("ROOTPATH")
2863 mydict["PORTAGE_ROOTPATH"] = rootpath
2867 def thirdpartymirrors(self):
2868 if getattr(self, "_thirdpartymirrors", None) is None:
2869 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2870 for x in self["PORTDIR_OVERLAY"].split():
2871 profileroots.insert(0, os.path.join(x, "profiles"))
2872 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2873 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2874 return self._thirdpartymirrors
2877 return flatten([[myarch, "~" + myarch] \
2878 for myarch in self["PORTAGE_ARCHLIST"].split()])
2880 def selinux_enabled(self):
2881 if getattr(self, "_selinux_enabled", None) is None:
2882 self._selinux_enabled = 0
2883 if "selinux" in self["USE"].split():
2884 if "selinux" in globals():
2885 if selinux.is_selinux_enabled() == 1:
2886 self._selinux_enabled = 1
2888 self._selinux_enabled = 0
2890 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2892 self._selinux_enabled = 0
2893 if self._selinux_enabled == 0:
2895 del sys.modules["selinux"]
2898 return self._selinux_enabled
2900 def _shell_quote(s):
2902 Quote a string in double-quotes and use backslashes to
2903 escape any backslashes, double-quotes, dollar signs, or
2904 backquotes in the string.
2906 for letter in "\\\"$`":
2908 s = s.replace(letter, "\\" + letter)
2911 # In some cases, openpty can be slow when it fails. Therefore,
2912 # stop trying to use it after the first failure.
2913 _disable_openpty = False
2915 def _create_pty_or_pipe(copy_term_size=None):
2917 Try to create a pty and if then fails then create a normal
2920 @param copy_term_size: If a tty file descriptor is given
2921 then the term size will be copied to the pty.
2922 @type copy_term_size: int
2924 @returns: A tuple of (is_pty, master_fd, slave_fd) where
2925 is_pty is True if a pty was successfully allocated, and
2926 False if a normal pipe was allocated.
2931 global _disable_openpty
2932 if _disable_openpty:
2933 master_fd, slave_fd = os.pipe()
2935 from pty import openpty
2937 master_fd, slave_fd = openpty()
2939 except EnvironmentError, e:
2940 _disable_openpty = True
2941 writemsg("openpty failed: '%s'\n" % str(e),
2944 master_fd, slave_fd = os.pipe()
2947 # Disable post-processing of output since otherwise weird
2948 # things like \n -> \r\n transformations may occur.
2950 mode = termios.tcgetattr(slave_fd)
2951 mode[1] &= ~termios.OPOST
2952 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
2955 copy_term_size is not None and \
2956 os.isatty(copy_term_size):
2957 from portage.output import get_term_size, set_term_size
2958 rows, columns = get_term_size()
2959 set_term_size(rows, columns, slave_fd)
2961 return (got_pty, master_fd, slave_fd)
2963 # XXX This would be to replace getstatusoutput completely.
2964 # XXX Issue: cannot block execution. Deadlock condition.
2965 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
2967 Spawn a subprocess with extra portage-specific options.
2970 Sandbox: Sandbox means the spawned process will be limited in its ability t
2971 read and write files (normally this means it is restricted to ${IMAGE}/)
2972 SElinux Sandbox: Enables sandboxing on SElinux
2973 Reduced Privileges: Drops privilages such that the process runs as portage:portage
2976 Notes: os.system cannot be used because it messes with signal handling. Instead we
2977 use the portage.process spawn* family of functions.
2979 This function waits for the process to terminate.
2981 @param mystring: Command to run
2982 @type mystring: String
2983 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2984 @type mysettings: Dictionary or config instance
2985 @param debug: Ignored
2986 @type debug: Boolean
2987 @param free: Enable sandboxing for this process
2989 @param droppriv: Drop to portage:portage when running this command
2990 @type droppriv: Boolean
2991 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2992 @type sesandbox: Boolean
2993 @param fakeroot: Run this command with faked root privileges
2994 @type fakeroot: Boolean
2995 @param keywords: Extra options encoded as a dict, to be passed to spawn
2996 @type keywords: Dictionary
2999 1. The return code of the spawned process.
3002 if isinstance(mysettings, dict):
3004 keywords["opt_name"]="[ %s ]" % "portage"
3006 check_config_instance(mysettings)
3007 env=mysettings.environ()
3008 keywords["opt_name"]="[%s]" % mysettings["PF"]
3010 fd_pipes = keywords.get("fd_pipes")
3011 if fd_pipes is None:
3013 0:sys.stdin.fileno(),
3014 1:sys.stdout.fileno(),
3015 2:sys.stderr.fileno(),
3017 # In some cases the above print statements don't flush stdout, so
3018 # it needs to be flushed before allowing a child process to use it
3019 # so that output always shows in the correct order.
3020 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3021 for fd in fd_pipes.itervalues():
3022 if fd in stdout_filenos:
3027 # The default policy for the sesandbox domain only allows entry (via exec)
3028 # from shells and from binaries that belong to portage (the number of entry
3029 # points is minimized). The "tee" binary is not among the allowed entry
3030 # points, so it is spawned outside of the sesandbox domain and reads from a
3031 # pseudo-terminal that connects two domains.
3032 logfile = keywords.get("logfile")
3036 fd_pipes_orig = None
3039 del keywords["logfile"]
3040 if 1 not in fd_pipes or 2 not in fd_pipes:
3041 raise ValueError(fd_pipes)
3043 fd_pipes.setdefault(0, sys.stdin.fileno())
3044 fd_pipes_orig = fd_pipes.copy()
3046 got_pty, master_fd, slave_fd = \
3047 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3049 # We must set non-blocking mode before we close the slave_fd
3050 # since otherwise the fcntl call can fail on FreeBSD (the child
3051 # process might have already exited and closed slave_fd so we
3052 # have to keep it open in order to avoid FreeBSD potentially
3053 # generating an EAGAIN exception).
3055 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3056 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3058 fd_pipes[0] = fd_pipes_orig[0]
3059 fd_pipes[1] = slave_fd
3060 fd_pipes[2] = slave_fd
3061 keywords["fd_pipes"] = fd_pipes
3063 features = mysettings.features
3064 # TODO: Enable fakeroot to be used together with droppriv. The
3065 # fake ownership/permissions will have to be converted to real
3066 # permissions in the merge phase.
3067 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3068 if droppriv and not uid and portage_gid and portage_uid:
3069 keywords.update({"uid":portage_uid,"gid":portage_gid,
3070 "groups":userpriv_groups,"umask":002})
3072 free=((droppriv and "usersandbox" not in features) or \
3073 (not droppriv and "sandbox" not in features and \
3074 "usersandbox" not in features and not fakeroot))
3076 if free or "SANDBOX_ACTIVE" in os.environ:
3077 keywords["opt_name"] += " bash"
3078 spawn_func = portage.process.spawn_bash
3080 keywords["opt_name"] += " fakeroot"
3081 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3082 spawn_func = portage.process.spawn_fakeroot
3084 keywords["opt_name"] += " sandbox"
3085 spawn_func = portage.process.spawn_sandbox
3088 con = selinux.getcontext()
3089 con = con.replace(mysettings["PORTAGE_T"],
3090 mysettings["PORTAGE_SANDBOX_T"])
3091 selinux.setexec(con)
3093 returnpid = keywords.get("returnpid")
3094 keywords["returnpid"] = True
3096 mypids.extend(spawn_func(mystring, env=env, **keywords))
3101 selinux.setexec(None)
3107 log_file = open(logfile, 'a')
3108 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
3109 master_file = os.fdopen(master_fd, 'r')
3110 iwtd = [master_file]
3113 import array, select
3117 events = select.select(iwtd, owtd, ewtd)
3119 # Use non-blocking mode to prevent read
3120 # calls from blocking indefinitely.
3121 buf = array.array('B')
3123 buf.fromfile(f, buffsize)
3129 if f is master_file:
3130 buf.tofile(stdout_file)
3132 buf.tofile(log_file)
3138 retval = os.waitpid(pid, 0)[1]
3139 portage.process.spawned_pids.remove(pid)
3140 if retval != os.EX_OK:
3142 return (retval & 0xff) << 8
3146 _userpriv_spawn_kwargs = (
3147 ("uid", portage_uid),
3148 ("gid", portage_gid),
3149 ("groups", userpriv_groups),
3153 def _spawn_fetch(settings, args, **kwargs):
3155 Spawn a process with appropriate settings for fetching, including
3156 userfetch and selinux support.
3159 global _userpriv_spawn_kwargs
3161 # Redirect all output to stdout since some fetchers like
3162 # wget pollute stderr (if portage detects a problem then it
3163 # can send it's own message to stderr).
3164 if "fd_pipes" not in kwargs:
3166 kwargs["fd_pipes"] = {
3167 0 : sys.stdin.fileno(),
3168 1 : sys.stdout.fileno(),
3169 2 : sys.stdout.fileno(),
3172 if "userfetch" in settings.features and \
3173 os.getuid() == 0 and portage_gid and portage_uid:
3174 kwargs.update(_userpriv_spawn_kwargs)
3178 if settings.selinux_enabled():
3179 con = selinux.getcontext()
3180 con = con.replace(settings["PORTAGE_T"], settings["PORTAGE_FETCH_T"])
3181 selinux.setexec(con)
3182 # bash is an allowed entrypoint, while most binaries are not
3183 if args[0] != BASH_BINARY:
3184 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
3186 rval = portage.process.spawn(args,
3187 env=dict(settings.iteritems()), **kwargs)
3190 if settings.selinux_enabled():
3191 selinux.setexec(None)
3195 _userpriv_test_write_file_cache = {}
3196 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
3197 "rm -f %(file_path)s ; exit $rval"
3199 def _userpriv_test_write_file(settings, file_path):
3201 Drop privileges and try to open a file for writing. The file may or
3202 may not exist, and the parent directory is assumed to exist. The file
3203 is removed before returning.
3205 @param settings: A config instance which is passed to _spawn_fetch()
3206 @param file_path: A file path to open and write.
3207 @return: True if write succeeds, False otherwise.
3210 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
3211 rval = _userpriv_test_write_file_cache.get(file_path)
3212 if rval is not None:
3215 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
3216 {"file_path" : _shell_quote(file_path)}]
3218 returncode = _spawn_fetch(settings, args)
3220 rval = returncode == os.EX_OK
3221 _userpriv_test_write_file_cache[file_path] = rval
3224 def _checksum_failure_temp_file(distdir, basename):
3226 First try to find a duplicate temp file with the same checksum and return
3227 that filename if available. Otherwise, use mkstemp to create a new unique
3228 filename._checksum_failure_.$RANDOM, rename the given file, and return the
3229 new filename. In any case, filename will be renamed or removed before this
3230 function returns a temp filename.
3233 filename = os.path.join(distdir, basename)
3234 size = os.stat(filename).st_size
3236 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
3237 for temp_filename in os.listdir(distdir):
3238 if not tempfile_re.match(temp_filename):
3240 temp_filename = os.path.join(distdir, temp_filename)
3242 if size != os.stat(temp_filename).st_size:
3247 temp_checksum = portage.checksum.perform_md5(temp_filename)
3248 except portage.exception.FileNotFound:
3249 # Apparently the temp file disappeared. Let it go.
3251 if checksum is None:
3252 checksum = portage.checksum.perform_md5(filename)
3253 if checksum == temp_checksum:
3255 return temp_filename
3257 from tempfile import mkstemp
3258 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3260 os.rename(filename, temp_filename)
3261 return temp_filename
3263 def _check_digests(filename, digests, show_errors=1):
3265 Check digests and display a message if an error occurs.
3266 @return True if all digests match, False otherwise.
3268 verified_ok, reason = portage.checksum.verify_all(filename, digests)
3271 writemsg("!!! Previously fetched" + \
3272 " file: '%s'\n" % filename, noiselevel=-1)
3273 writemsg("!!! Reason: %s\n" % reason[0],
3275 writemsg(("!!! Got: %s\n" + \
3276 "!!! Expected: %s\n") % \
3277 (reason[1], reason[2]), noiselevel=-1)
3281 def _check_distfile(filename, digests, eout, show_errors=1):
3283 @return a tuple of (match, stat_obj) where match is True if filename
3284 matches all given digests (if any) and stat_obj is a stat result, or
3285 None if the file does not exist.
3289 size = digests.get("size")
3290 if size is not None and len(digests) == 1:
3294 st = os.stat(filename)
3296 return (False, None)
3297 if size is not None and size != st.st_size:
3300 if size is not None:
3301 eout.ebegin("%s %s ;-)" % (os.path.basename(filename), "size"))
3303 elif st.st_size == 0:
3304 # Zero-byte distfiles are always invalid.
3307 if _check_digests(filename, digests, show_errors=show_errors):
3308 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
3309 " ".join(sorted(digests))))
3315 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3317 _size_suffix_map = {
3329 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3330 "fetch files. Will use digest file if available."
3335 features = mysettings.features
3336 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3338 from portage.data import secpass
3339 userfetch = secpass >= 2 and "userfetch" in features
3340 userpriv = secpass >= 2 and "userpriv" in features
3342 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3343 if "mirror" in restrict or \
3344 "nomirror" in restrict:
3345 if ("mirror" in features) and ("lmirror" not in features):
3346 # lmirror should allow you to bypass mirror restrictions.
3347 # XXX: This is not a good thing, and is temporary at best.
3348 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3351 # Generally, downloading the same file repeatedly from
3352 # every single available mirror is a waste of bandwidth
3353 # and time, so there needs to be a cap.
3354 checksum_failure_max_tries = 5
3355 v = checksum_failure_max_tries
3357 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3358 checksum_failure_max_tries))
3359 except (ValueError, OverflowError):
3360 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3361 " contains non-integer value: '%s'\n" % \
3362 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3363 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3364 "default value: %s\n" % checksum_failure_max_tries,
3366 v = checksum_failure_max_tries
3368 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3369 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3370 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3371 "default value: %s\n" % checksum_failure_max_tries,
3373 v = checksum_failure_max_tries
3374 checksum_failure_max_tries = v
3377 fetch_resume_size_default = "350K"
3378 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3379 if fetch_resume_size is not None:
3380 fetch_resume_size = "".join(fetch_resume_size.split())
3381 if not fetch_resume_size:
3382 # If it's undefined or empty, silently use the default.
3383 fetch_resume_size = fetch_resume_size_default
3384 match = _fetch_resume_size_re.match(fetch_resume_size)
3385 if match is None or \
3386 (match.group(2).upper() not in _size_suffix_map):
3387 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3388 " contains an unrecognized format: '%s'\n" % \
3389 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3390 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3391 "default value: %s\n" % fetch_resume_size_default,
3393 fetch_resume_size = None
3394 if fetch_resume_size is None:
3395 fetch_resume_size = fetch_resume_size_default
3396 match = _fetch_resume_size_re.match(fetch_resume_size)
3397 fetch_resume_size = int(match.group(1)) * \
3398 2 ** _size_suffix_map[match.group(2).upper()]
3400 # Behave like the package has RESTRICT="primaryuri" after a
3401 # couple of checksum failures, to increase the probablility
3402 # of success before checksum_failure_max_tries is reached.
3403 checksum_failure_primaryuri = 2
3404 thirdpartymirrors = mysettings.thirdpartymirrors()
3406 # In the background parallel-fetch process, it's safe to skip checksum
3407 # verification of pre-existing files in $DISTDIR that have the correct
3408 # file size. The parent process will verify their checksums prior to
3411 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3412 if parallel_fetchonly:
3415 check_config_instance(mysettings)
3417 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3418 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3422 if listonly or ("distlocks" not in features):
3426 if "skiprocheck" in features:
3429 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3431 writemsg(colorize("BAD",
3432 "!!! For fetching to a read-only filesystem, " + \
3433 "locking should be turned off.\n"), noiselevel=-1)
3434 writemsg("!!! This can be done by adding -distlocks to " + \
3435 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3438 # local mirrors are always added
3439 if "local" in custommirrors:
3440 mymirrors += custommirrors["local"]
3442 if "nomirror" in restrict or \
3443 "mirror" in restrict:
3444 # We don't add any mirrors.
3448 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3450 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
3451 pkgdir = mysettings.get("O")
3452 if not (pkgdir is None or skip_manifest):
3453 mydigests = Manifest(
3454 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3456 # no digests because fetch was not called for a specific package
3460 ro_distdirs = [x for x in \
3461 shlex.split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
3462 if os.path.isdir(x)]
3465 for x in range(len(mymirrors)-1,-1,-1):
3466 if mymirrors[x] and mymirrors[x][0]=='/':
3467 fsmirrors += [mymirrors[x]]
3470 restrict_fetch = "fetch" in restrict
3471 custom_local_mirrors = custommirrors.get("local", [])
3473 # With fetch restriction, a normal uri may only be fetched from
3474 # custom local mirrors (if available). A mirror:// uri may also
3475 # be fetched from specific mirrors (effectively overriding fetch
3476 # restriction, but only for specific mirrors).
3477 locations = custom_local_mirrors
3479 locations = mymirrors
3481 file_uri_tuples = []
3482 if isinstance(myuris, dict):
3483 for myfile, uri_set in myuris.iteritems():
3484 for myuri in uri_set:
3485 file_uri_tuples.append((myfile, myuri))
3487 for myuri in myuris:
3488 file_uri_tuples.append((os.path.basename(myuri), myuri))
3491 primaryuri_indexes={}
3492 primaryuri_dict = {}
3493 thirdpartymirror_uris = {}
3494 for myfile, myuri in file_uri_tuples:
3495 if myfile not in filedict:
3497 for y in range(0,len(locations)):
3498 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3499 if myuri[:9]=="mirror://":
3500 eidx = myuri.find("/", 9)
3502 mirrorname = myuri[9:eidx]
3503 path = myuri[eidx+1:]
3505 # Try user-defined mirrors first
3506 if mirrorname in custommirrors:
3507 for cmirr in custommirrors[mirrorname]:
3508 filedict[myfile].append(
3509 cmirr.rstrip("/") + "/" + path)
3511 # now try the official mirrors
3512 if mirrorname in thirdpartymirrors:
3513 shuffle(thirdpartymirrors[mirrorname])
3515 uris = [locmirr.rstrip("/") + "/" + path \
3516 for locmirr in thirdpartymirrors[mirrorname]]
3517 filedict[myfile].extend(uris)
3518 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
3520 if not filedict[myfile]:
3521 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3523 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3524 writemsg(" %s\n" % (myuri), noiselevel=-1)
3527 # Only fetch from specific mirrors is allowed.
3529 if "primaryuri" in restrict:
3530 # Use the source site first.
3531 if myfile in primaryuri_indexes:
3532 primaryuri_indexes[myfile] += 1
3534 primaryuri_indexes[myfile] = 0
3535 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3537 filedict[myfile].append(myuri)
3538 primaryuris = primaryuri_dict.get(myfile)
3539 if primaryuris is None:
3541 primaryuri_dict[myfile] = primaryuris
3542 primaryuris.append(myuri)
3544 # Prefer thirdpartymirrors over normal mirrors in cases when
3545 # the file does not yet exist on the normal mirrors.
3546 for myfile, uris in thirdpartymirror_uris.iteritems():
3547 primaryuri_dict.setdefault(myfile, []).extend(uris)
3554 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3555 if not mysettings.get(var_name, None):
3558 if can_fetch and not fetch_to_ro:
3559 global _userpriv_test_write_file_cache
3563 dir_gid = portage_gid
3564 if "FAKED_MODE" in mysettings:
3565 # When inside fakeroot, directories with portage's gid appear
3566 # to have root's gid. Therefore, use root's gid instead of
3567 # portage's gid to avoid spurrious permissions adjustments
3568 # when inside fakeroot.
3571 if "distlocks" in features:
3572 distdir_dirs.append(".locks")
3575 for x in distdir_dirs:
3576 mydir = os.path.join(mysettings["DISTDIR"], x)
3577 write_test_file = os.path.join(
3578 mydir, ".__portage_test_write__")
3585 if st is not None and stat.S_ISDIR(st.st_mode):
3586 if not (userfetch or userpriv):
3588 if _userpriv_test_write_file(mysettings, write_test_file):
3591 _userpriv_test_write_file_cache.pop(write_test_file, None)
3592 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
3594 # The directory has just been created
3595 # and therefore it must be empty.
3597 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3600 raise # bail out on the first error that occurs during recursion
3601 if not apply_recursive_permissions(mydir,
3602 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
3603 filemode=filemode, filemask=modemask, onerror=onerror):
3604 raise portage.exception.OperationNotPermitted(
3605 "Failed to apply recursive permissions for the portage group.")
3606 except portage.exception.PortageException, e:
3607 if not os.path.isdir(mysettings["DISTDIR"]):
3608 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3609 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3610 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3613 not fetch_to_ro and \
3614 not os.access(mysettings["DISTDIR"], os.W_OK):
3615 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3619 if can_fetch and use_locks and locks_in_subdir:
3620 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3621 if not os.access(distlocks_subdir, os.W_OK):
3622 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3625 del distlocks_subdir
3627 distdir_writable = can_fetch and not fetch_to_ro
3628 failed_files = set()
3629 restrict_fetch_msg = False
3631 for myfile in filedict:
3635 1 partially downloaded
3636 2 completely downloaded
3640 orig_digests = mydigests.get(myfile, {})
3641 size = orig_digests.get("size")
3643 # Zero-byte distfiles are always invalid, so discard their digests.
3644 del mydigests[myfile]
3645 orig_digests.clear()
3647 pruned_digests = orig_digests
3648 if parallel_fetchonly:
3650 if size is not None:
3651 pruned_digests["size"] = size
3653 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3657 writemsg_stdout("\n", noiselevel=-1)
3659 # check if there is enough space in DISTDIR to completely store myfile
3660 # overestimate the filesize so we aren't bitten by FS overhead
3661 if size is not None and hasattr(os, "statvfs"):
3662 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3664 mysize = os.stat(myfile_path).st_size
3666 if e.errno != errno.ENOENT:
3670 if (size - mysize + vfs_stat.f_bsize) >= \
3671 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3672 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3675 if distdir_writable and use_locks:
3677 if not parallel_fetchonly and "parallel-fetch" in features:
3678 waiting_msg = ("Fetching '%s' " + \
3679 "in the background. " + \
3680 "To view fetch progress, run `tail -f " + \
3681 "/var/log/emerge-fetch.log` in another " + \
3682 "terminal.") % myfile
3683 msg_prefix = colorize("GOOD", " * ")
3684 from textwrap import wrap
3685 waiting_msg = "\n".join(msg_prefix + line \
3686 for line in wrap(waiting_msg, 65))
3689 lock_file = os.path.join(mysettings["DISTDIR"],
3690 locks_in_subdir, myfile)
3692 lock_file = myfile_path
3696 lock_kwargs["flags"] = os.O_NONBLOCK
3698 lock_kwargs["waiting_msg"] = waiting_msg
3701 file_lock = portage.locks.lockfile(myfile_path,
3702 wantnewlockfile=1, **lock_kwargs)
3703 except portage.exception.TryAgain:
3704 writemsg((">>> File '%s' is already locked by " + \
3705 "another fetcher. Continuing...\n") % myfile,
3711 eout = portage.output.EOutput()
3712 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
3713 match, mystat = _check_distfile(
3714 myfile_path, pruned_digests, eout)
3716 if distdir_writable:
3718 apply_secpass_permissions(myfile_path,
3719 gid=portage_gid, mode=0664, mask=02,
3721 except portage.exception.PortageException, e:
3722 if not os.access(myfile_path, os.R_OK):
3723 writemsg("!!! Failed to adjust permissions:" + \
3724 " %s\n" % str(e), noiselevel=-1)
3728 if distdir_writable and mystat is None:
3729 # Remove broken symlinks if necessary.
3731 os.unlink(myfile_path)
3735 if mystat is not None:
3736 if mystat.st_size == 0:
3737 if distdir_writable:
3739 os.unlink(myfile_path)
3742 elif distdir_writable:
3743 if mystat.st_size < fetch_resume_size and \
3744 mystat.st_size < size:
3745 writemsg((">>> Deleting distfile with size " + \
3746 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3747 "ME_MIN_SIZE)\n") % mystat.st_size)
3749 os.unlink(myfile_path)
3751 if e.errno != errno.ENOENT:
3754 elif mystat.st_size >= size:
3756 _checksum_failure_temp_file(
3757 mysettings["DISTDIR"], myfile)
3758 writemsg_stdout("Refetching... " + \
3759 "File renamed to '%s'\n\n" % \
3760 temp_filename, noiselevel=-1)
3762 if distdir_writable and ro_distdirs:
3763 readonly_file = None
3764 for x in ro_distdirs:
3765 filename = os.path.join(x, myfile)
3766 match, mystat = _check_distfile(
3767 filename, pruned_digests, eout)
3769 readonly_file = filename
3771 if readonly_file is not None:
3773 os.unlink(myfile_path)
3775 if e.errno != errno.ENOENT:
3778 os.symlink(readonly_file, myfile_path)
3781 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3782 for mydir in fsmirrors:
3783 mirror_file = os.path.join(mydir, myfile)
3785 shutil.copyfile(mirror_file, myfile_path)
3786 writemsg(_("Local mirror has file:" + \
3787 " %(file)s\n" % {"file":myfile}))
3789 except (IOError, OSError), e:
3790 if e.errno != errno.ENOENT:
3795 mystat = os.stat(myfile_path)
3797 if e.errno != errno.ENOENT:
3802 apply_secpass_permissions(
3803 myfile_path, gid=portage_gid, mode=0664, mask=02,
3805 except portage.exception.PortageException, e:
3806 if not os.access(myfile_path, os.R_OK):
3807 writemsg("!!! Failed to adjust permissions:" + \
3808 " %s\n" % str(e), noiselevel=-1)
3810 # If the file is empty then it's obviously invalid. Remove
3811 # the empty file and try to download if possible.
3812 if mystat.st_size == 0:
3813 if distdir_writable:
3815 os.unlink(myfile_path)
3816 except EnvironmentError:
3818 elif myfile not in mydigests:
3819 # We don't have a digest, but the file exists. We must
3820 # assume that it is fully downloaded.
3823 if mystat.st_size < mydigests[myfile]["size"] and \
3825 fetched = 1 # Try to resume this download.
3826 elif parallel_fetchonly and \
3827 mystat.st_size == mydigests[myfile]["size"]:
3828 eout = portage.output.EOutput()
3830 mysettings.get("PORTAGE_QUIET") == "1"
3832 "%s size ;-)" % (myfile, ))
3836 verified_ok, reason = portage.checksum.verify_all(
3837 myfile_path, mydigests[myfile])
3839 writemsg("!!! Previously fetched" + \
3840 " file: '%s'\n" % myfile, noiselevel=-1)
3841 writemsg("!!! Reason: %s\n" % reason[0],
3843 writemsg(("!!! Got: %s\n" + \
3844 "!!! Expected: %s\n") % \
3845 (reason[1], reason[2]), noiselevel=-1)
3846 if reason[0] == "Insufficient data for checksum verification":
3848 if distdir_writable:
3850 _checksum_failure_temp_file(
3851 mysettings["DISTDIR"], myfile)
3852 writemsg_stdout("Refetching... " + \
3853 "File renamed to '%s'\n\n" % \
3854 temp_filename, noiselevel=-1)
3856 eout = portage.output.EOutput()
3858 mysettings.get("PORTAGE_QUIET", None) == "1"
3859 digests = mydigests.get(myfile)
3861 digests = digests.keys()
3864 "%s %s ;-)" % (myfile, " ".join(digests)))
3866 continue # fetch any remaining files
3868 # Create a reversed list since that is optimal for list.pop().
3869 uri_list = filedict[myfile][:]
3871 checksum_failure_count = 0
3872 tried_locations = set()
3874 loc = uri_list.pop()
3875 # Eliminate duplicates here in case we've switched to
3876 # "primaryuri" mode on the fly due to a checksum failure.
3877 if loc in tried_locations:
3879 tried_locations.add(loc)
3881 writemsg_stdout(loc+" ", noiselevel=-1)
3883 # allow different fetchcommands per protocol
3884 protocol = loc[0:loc.find("://")]
3885 if "FETCHCOMMAND_" + protocol.upper() in mysettings:
3886 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
3888 fetchcommand=mysettings["FETCHCOMMAND"]
3889 if "RESUMECOMMAND_" + protocol.upper() in mysettings:
3890 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
3892 resumecommand=mysettings["RESUMECOMMAND"]
3897 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
3900 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
3902 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
3903 if not mysettings.get(var_name, None):
3904 writemsg(("!!! %s is unset. It should " + \
3905 "have been defined in /etc/make.globals.\n") \
3906 % var_name, noiselevel=-1)
3911 if fetched != 2 and has_space:
3912 #we either need to resume or start the download
3915 mystat = os.stat(myfile_path)
3917 if e.errno != errno.ENOENT:
3922 if mystat.st_size < fetch_resume_size:
3923 writemsg((">>> Deleting distfile with size " + \
3924 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3925 "ME_MIN_SIZE)\n") % mystat.st_size)
3927 os.unlink(myfile_path)
3929 if e.errno != errno.ENOENT:
3935 writemsg(">>> Resuming download...\n")
3936 locfetch=resumecommand
3939 locfetch=fetchcommand
3940 writemsg_stdout(">>> Downloading '%s'\n" % \
3941 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
3943 "DISTDIR": mysettings["DISTDIR"],
3947 import shlex, StringIO
3948 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
3949 lexer.whitespace_split = True
3950 myfetch = [varexpand(x, mydict=variables) for x in lexer]
3954 myret = _spawn_fetch(mysettings, myfetch)
3958 apply_secpass_permissions(myfile_path,
3959 gid=portage_gid, mode=0664, mask=02)
3960 except portage.exception.FileNotFound, e:
3962 except portage.exception.PortageException, e:
3963 if not os.access(myfile_path, os.R_OK):
3964 writemsg("!!! Failed to adjust permissions:" + \
3965 " %s\n" % str(e), noiselevel=-1)
3967 # If the file is empty then it's obviously invalid. Don't
3968 # trust the return value from the fetcher. Remove the
3969 # empty file and try to download again.
3971 if os.stat(myfile_path).st_size == 0:
3972 os.unlink(myfile_path)
3975 except EnvironmentError:
3978 if mydigests is not None and myfile in mydigests:
3980 mystat = os.stat(myfile_path)
3982 if e.errno != errno.ENOENT:
3987 # no exception? file exists. let digestcheck() report
3988 # an appropriately for size or checksum errors
3990 # If the fetcher reported success and the file is
3991 # too small, it's probably because the digest is
3992 # bad (upstream changed the distfile). In this
3993 # case we don't want to attempt to resume. Show a
3994 # digest verification failure to that the user gets
3995 # a clue about what just happened.
3996 if myret != os.EX_OK and \
3997 mystat.st_size < mydigests[myfile]["size"]:
3998 # Fetch failed... Try the next one... Kill 404 files though.
3999 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4000 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4001 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
4003 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4004 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
4007 except (IOError, OSError):
4012 # File is the correct size--check the checksums for the fetched
4013 # file NOW, for those users who don't have a stable/continuous
4014 # net connection. This way we have a chance to try to download
4015 # from another mirror...
4016 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4019 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
4021 writemsg("!!! Reason: "+reason[0]+"\n",
4023 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
4024 (reason[1], reason[2]), noiselevel=-1)
4025 if reason[0] == "Insufficient data for checksum verification":
4028 _checksum_failure_temp_file(
4029 mysettings["DISTDIR"], myfile)
4030 writemsg_stdout("Refetching... " + \
4031 "File renamed to '%s'\n\n" % \
4032 temp_filename, noiselevel=-1)
4034 checksum_failure_count += 1
4035 if checksum_failure_count == \
4036 checksum_failure_primaryuri:
4037 # Switch to "primaryuri" mode in order
4038 # to increase the probablility of
4041 primaryuri_dict.get(myfile)
4044 reversed(primaryuris))
4045 if checksum_failure_count >= \
4046 checksum_failure_max_tries:
4049 eout = portage.output.EOutput()
4050 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4051 digests = mydigests.get(myfile)
4053 eout.ebegin("%s %s ;-)" % \
4054 (myfile, " ".join(sorted(digests))))
4062 elif mydigests!=None:
4063 writemsg("No digest file available and download failed.\n\n",
4066 if use_locks and file_lock:
4067 portage.locks.unlockfile(file_lock)
4070 writemsg_stdout("\n", noiselevel=-1)
4072 if restrict_fetch and not restrict_fetch_msg:
4073 restrict_fetch_msg = True
4074 msg = ("\n!!! %s/%s" + \
4075 " has fetch restriction turned on.\n" + \
4076 "!!! This probably means that this " + \
4077 "ebuild's files must be downloaded\n" + \
4078 "!!! manually. See the comments in" + \
4079 " the ebuild for more information.\n\n") % \
4080 (mysettings["CATEGORY"], mysettings["PF"])
4081 portage.util.writemsg_level(msg,
4082 level=logging.ERROR, noiselevel=-1)
4083 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
4084 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
4086 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
4087 private_tmpdir = None
4088 if not parallel_fetchonly and not have_builddir:
4089 # When called by digestgen(), it's normal that
4090 # PORTAGE_BUILDDIR doesn't exist. It's helpful
4091 # to show the pkg_nofetch output though, so go
4092 # ahead and create a temporary PORTAGE_BUILDDIR.
4093 # Use a temporary config instance to avoid altering
4094 # the state of the one that's been passed in.
4095 mysettings = config(clone=mysettings)
4096 from tempfile import mkdtemp
4098 private_tmpdir = mkdtemp("", "._portage_fetch_.",
4101 if e.errno != portage.exception.PermissionDenied.errno:
4103 raise portage.exception.PermissionDenied(global_tmpdir)
4104 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
4105 mysettings.backup_changes("PORTAGE_TMPDIR")
4106 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4107 portage.doebuild_environment(mysettings["EBUILD"], "fetch",
4108 mysettings["ROOT"], mysettings, debug, 1, None)
4109 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
4110 have_builddir = True
4112 if not parallel_fetchonly and have_builddir:
4113 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
4114 # ensuring sane $PWD (bug #239560) and storing elog
4115 # messages. Therefore, calling code needs to ensure that
4116 # PORTAGE_BUILDDIR is already clean and locked here.
4118 # All the pkg_nofetch goes to stderr since it's considered
4119 # to be an error message.
4121 0 : sys.stdin.fileno(),
4122 1 : sys.stderr.fileno(),
4123 2 : sys.stderr.fileno(),
4126 ebuild_phase = mysettings.get("EBUILD_PHASE")
4128 mysettings["EBUILD_PHASE"] = "nofetch"
4129 spawn(_shell_quote(EBUILD_SH_BINARY) + \
4130 " nofetch", mysettings, fd_pipes=fd_pipes)
4132 if ebuild_phase is None:
4133 mysettings.pop("EBUILD_PHASE", None)
4135 mysettings["EBUILD_PHASE"] = ebuild_phase
4136 if private_tmpdir is not None:
4137 shutil.rmtree(private_tmpdir)
4139 elif restrict_fetch:
4143 elif not filedict[myfile]:
4144 writemsg("Warning: No mirrors available for file" + \
4145 " '%s'\n" % (myfile), noiselevel=-1)
4147 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
4153 failed_files.add(myfile)
4160 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
4162 Generates a digest file if missing. Assumes all files are available.
4163 DEPRECATED: this now only is a compability wrapper for
4164 portage.manifest.Manifest()
4165 NOTE: manifestonly and overwrite are useless with manifest2 and
4166 are therefore ignored."""
4167 if myportdb is None:
4168 writemsg("Warning: myportdb not specified to digestgen\n")
4171 global _doebuild_manifest_exempt_depend
4173 _doebuild_manifest_exempt_depend += 1
4175 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
4176 for cpv in fetchlist_dict:
4178 for myfile in fetchlist_dict[cpv]:
4179 distfiles_map.setdefault(myfile, []).append(cpv)
4180 except portage.exception.InvalidDependString, e:
4181 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4184 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
4185 manifest1_compat = False
4186 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
4187 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
4188 # Don't require all hashes since that can trigger excessive
4189 # fetches when sufficient digests already exist. To ease transition
4190 # while Manifest 1 is being removed, only require hashes that will
4191 # exist before and after the transition.
4192 required_hash_types = set()
4193 required_hash_types.add("size")
4194 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4195 dist_hashes = mf.fhashdict.get("DIST", {})
4197 # To avoid accidental regeneration of digests with the incorrect
4198 # files (such as partially downloaded files), trigger the fetch
4199 # code if the file exists and it's size doesn't match the current
4200 # manifest entry. If there really is a legitimate reason for the
4201 # digest to change, `ebuild --force digest` can be used to avoid
4202 # triggering this code (or else the old digests can be manually
4203 # removed from the Manifest).
4205 for myfile in distfiles_map:
4206 myhashes = dist_hashes.get(myfile)
4208 missing_files.append(myfile)
4210 size = myhashes.get("size")
4213 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4215 if e.errno != errno.ENOENT:
4219 missing_files.append(myfile)
4221 if required_hash_types.difference(myhashes):
4222 missing_files.append(myfile)
4225 if st.st_size == 0 or size is not None and size != st.st_size:
4226 missing_files.append(myfile)
4230 mytree = os.path.realpath(os.path.dirname(
4231 os.path.dirname(mysettings["O"])))
4232 fetch_settings = config(clone=mysettings)
4233 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4234 for myfile in missing_files:
4236 for cpv in distfiles_map[myfile]:
4237 myebuild = os.path.join(mysettings["O"],
4238 catsplit(cpv)[1] + ".ebuild")
4239 # for RESTRICT=fetch, mirror, etc...
4240 doebuild_environment(myebuild, "fetch",
4241 mysettings["ROOT"], fetch_settings,
4243 uri_map = myportdb.getFetchMap(cpv, mytree=mytree)
4244 myuris = {myfile:uri_map[myfile]}
4245 fetch_settings["A"] = myfile # for use by pkg_nofetch()
4246 if fetch(myuris, fetch_settings):
4250 writemsg(("!!! File %s doesn't exist, can't update " + \
4251 "Manifest\n") % myfile, noiselevel=-1)
4253 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
4255 mf.create(requiredDistfiles=myarchives,
4256 assumeDistHashesSometimes=True,
4257 assumeDistHashesAlways=(
4258 "assume-digests" in mysettings.features))
4259 except portage.exception.FileNotFound, e:
4260 writemsg(("!!! File %s doesn't exist, can't update " + \
4261 "Manifest\n") % e, noiselevel=-1)
4263 except portage.exception.PortagePackageException, e:
4264 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
4267 mf.write(sign=False)
4268 except portage.exception.PermissionDenied, e:
4269 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
4271 if "assume-digests" not in mysettings.features:
4272 distlist = mf.fhashdict.get("DIST", {}).keys()
4275 for filename in distlist:
4276 if not os.path.exists(
4277 os.path.join(mysettings["DISTDIR"], filename)):
4278 auto_assumed.append(filename)
4280 mytree = os.path.realpath(
4281 os.path.dirname(os.path.dirname(mysettings["O"])))
4282 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
4283 pkgs = myportdb.cp_list(cp, mytree=mytree)
4285 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
4286 str(len(auto_assumed)).rjust(18)) + "\n")
4287 for pkg_key in pkgs:
4288 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
4289 pv = pkg_key.split("/")[1]
4290 for filename in auto_assumed:
4291 if filename in fetchlist:
4293 " %s::%s\n" % (pv, filename))
4296 _doebuild_manifest_exempt_depend -= 1
4298 def digestParseFile(myfilename, mysettings=None):
4299 """(filename) -- Parses a given file for entries matching:
4300 <checksumkey> <checksum_hex_string> <filename> <filesize>
4301 Ignores lines that don't start with a valid checksum identifier
4302 and returns a dict with the filenames as keys and {checksumkey:checksum}
4304 DEPRECATED: this function is now only a compability wrapper for
4305 portage.manifest.Manifest()."""
4307 mysplit = myfilename.split(os.sep)
4308 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
4309 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
4310 elif mysplit[-1] == "Manifest":
4311 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
4313 if mysettings is None:
4315 mysettings = config(clone=settings)
4317 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
4319 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
4320 """Verifies checksums. Assumes all files have been downloaded.
4321 DEPRECATED: this is now only a compability wrapper for
4322 portage.manifest.Manifest()."""
4323 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4325 pkgdir = mysettings["O"]
4326 manifest_path = os.path.join(pkgdir, "Manifest")
4327 if not os.path.exists(manifest_path):
4328 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4334 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4335 manifest_empty = True
4336 for d in mf.fhashdict.itervalues():
4338 manifest_empty = False
4341 writemsg("!!! Manifest is empty: '%s'\n" % manifest_path,
4347 eout = portage.output.EOutput()
4348 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4350 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
4351 eout.ebegin("checking ebuild checksums ;-)")
4352 mf.checkTypeHashes("EBUILD")
4354 eout.ebegin("checking auxfile checksums ;-)")
4355 mf.checkTypeHashes("AUX")
4357 eout.ebegin("checking miscfile checksums ;-)")
4358 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4361 eout.ebegin("checking %s ;-)" % f)
4362 mf.checkFileHashes(mf.findFile(f), f)
4366 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
4368 except portage.exception.FileNotFound, e:
4370 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
4373 except portage.exception.DigestException, e:
4375 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
4376 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4377 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4378 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4379 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4381 # Make sure that all of the ebuilds are actually listed in the Manifest.
4382 for f in os.listdir(pkgdir):
4383 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4384 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4385 os.path.join(pkgdir, f), noiselevel=-1)
4388 """ epatch will just grab all the patches out of a directory, so we have to
4389 make sure there aren't any foreign files that it might grab."""
4390 filesdir = os.path.join(pkgdir, "files")
4391 for parent, dirs, files in os.walk(filesdir):
4393 if d.startswith(".") or d == "CVS":
4396 if f.startswith("."):
4398 f = os.path.join(parent, f)[len(filesdir) + 1:]
4399 file_type = mf.findFile(f)
4400 if file_type != "AUX" and not f.startswith("digest-"):
4401 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4402 os.path.join(filesdir, f), noiselevel=-1)
4407 # parse actionmap to spawn ebuild with the appropriate args
4408 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
4409 logfile=None, fd_pipes=None, returnpid=False):
4410 if not returnpid and \
4411 (alwaysdep or "noauto" not in mysettings.features):
4412 # process dependency first
4413 if "dep" in actionmap[mydo]:
4414 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
4415 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
4416 fd_pipes=fd_pipes, returnpid=returnpid)
4420 eapi = mysettings["EAPI"]
4422 if mydo == "configure" and eapi in ("0", "1", "2_pre1"):
4425 if mydo == "prepare" and eapi in ("0", "1", "2_pre1", "2_pre2"):
4428 kwargs = actionmap[mydo]["args"]
4429 mysettings["EBUILD_PHASE"] = mydo
4430 _doebuild_exit_status_unlink(
4431 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4434 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
4435 mysettings, debug=debug, logfile=logfile,
4436 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
4438 mysettings["EBUILD_PHASE"] = ""
4442 msg = _doebuild_exit_status_check(mydo, mysettings)
4445 from textwrap import wrap
4446 from portage.elog.messages import eerror
4447 for l in wrap(msg, 72):
4448 eerror(l, phase=mydo, key=mysettings.mycpv)
4450 _post_phase_userpriv_perms(mysettings)
4451 if mydo == "install":
4452 _check_build_log(mysettings)
4453 if phase_retval == os.EX_OK:
4454 phase_retval = _post_src_install_checks(mysettings)
4456 if mydo == "test" and phase_retval != os.EX_OK and \
4457 "test-fail-continue" in mysettings.features:
4458 phase_retval = os.EX_OK
4462 _post_phase_cmds = {
4466 "install_symlink_html_docs"],
4471 "preinst_selinux_labels",
4472 "preinst_suid_scan",
4476 "postinst_bsdflags"]
4479 def _post_phase_userpriv_perms(mysettings):
4480 if "userpriv" in mysettings.features and secpass >= 2:
4481 """ Privileged phases may have left files that need to be made
4482 writable to a less privileged user."""
4483 apply_recursive_permissions(mysettings["T"],
4484 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4485 filemode=060, filemask=0)
4487 def _post_src_install_checks(mysettings):
4488 _post_src_install_uid_fix(mysettings)
4489 global _post_phase_cmds
4490 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
4491 if retval != os.EX_OK:
4492 writemsg("!!! install_qa_check failed; exiting.\n",
4496 def _check_build_log(mysettings, out=None):
4498 Search the content of $PORTAGE_LOG_FILE if it exists
4499 and generate the following QA Notices when appropriate:
4501 * Automake "maintainer mode"
4503 * Unrecognized configure options
4505 logfile = mysettings.get("PORTAGE_LOG_FILE")
4509 f = open(logfile, 'rb')
4510 except EnvironmentError:
4513 am_maintainer_mode = []
4514 bash_command_not_found = []
4515 bash_command_not_found_re = re.compile(
4516 r'(.*): line (\d*): (.*): command not found$')
4517 command_not_found_exclude_re = re.compile(r'/configure: line ')
4518 helper_missing_file = []
4519 helper_missing_file_re = re.compile(
4520 r'^!!! (do|new).*: .* does not exist$')
4522 configure_opts_warn = []
4523 configure_opts_warn_re = re.compile(
4524 r'^configure: WARNING: [Uu]nrecognized options: ')
4525 am_maintainer_mode_re = re.compile(r'/missing --run ')
4526 am_maintainer_mode_exclude_re = \
4527 re.compile(r'/missing --run (autoheader|makeinfo)')
4529 make_jobserver_re = \
4530 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
4535 if am_maintainer_mode_re.search(line) is not None and \
4536 am_maintainer_mode_exclude_re.search(line) is None:
4537 am_maintainer_mode.append(line.rstrip("\n"))
4539 if bash_command_not_found_re.match(line) is not None and \
4540 command_not_found_exclude_re.search(line) is None:
4541 bash_command_not_found.append(line.rstrip("\n"))
4543 if helper_missing_file_re.match(line) is not None:
4544 helper_missing_file.append(line.rstrip("\n"))
4546 if configure_opts_warn_re.match(line) is not None:
4547 configure_opts_warn.append(line.rstrip("\n"))
4549 if make_jobserver_re.match(line) is not None:
4550 make_jobserver.append(line.rstrip("\n"))
4555 from portage.elog.messages import eqawarn
4556 def _eqawarn(lines):
4558 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
4559 from textwrap import wrap
4562 if am_maintainer_mode:
4563 msg = ["QA Notice: Automake \"maintainer mode\" detected:"]
4565 msg.extend("\t" + line for line in am_maintainer_mode)
4568 "If you patch Makefile.am, " + \
4569 "configure.in, or configure.ac then you " + \
4570 "should use autotools.eclass and " + \
4571 "eautomake or eautoreconf. Exceptions " + \
4572 "are limited to system packages " + \
4573 "for which it is impossible to run " + \
4574 "autotools during stage building. " + \
4575 "See http://www.gentoo.org/p" + \
4576 "roj/en/qa/autofailure.xml for more information.",
4580 if bash_command_not_found:
4581 msg = ["QA Notice: command not found:"]
4583 msg.extend("\t" + line for line in bash_command_not_found)
4586 if helper_missing_file:
4587 msg = ["QA Notice: file does not exist:"]
4589 msg.extend("\t" + line[4:] for line in helper_missing_file)
4592 if configure_opts_warn:
4593 msg = ["QA Notice: Unrecognized configure options:"]
4595 msg.extend("\t" + line for line in configure_opts_warn)
4599 msg = ["QA Notice: make jobserver unavailable:"]
4601 msg.extend("\t" + line for line in make_jobserver)
4604 def _post_src_install_uid_fix(mysettings):
4606 Files in $D with user and group bits that match the "portage"
4607 user or group are automatically mapped to PORTAGE_INST_UID and
4608 PORTAGE_INST_GID if necessary. The chown system call may clear
4609 S_ISUID and S_ISGID bits, so those bits are restored if
4612 inst_uid = int(mysettings["PORTAGE_INST_UID"])
4613 inst_gid = int(mysettings["PORTAGE_INST_GID"])
4614 for parent, dirs, files in os.walk(mysettings["D"]):
4615 for fname in chain(dirs, files):
4616 fpath = os.path.join(parent, fname)
4617 mystat = os.lstat(fpath)
4618 if mystat.st_uid != portage_uid and \
4619 mystat.st_gid != portage_gid:
4623 if mystat.st_uid == portage_uid:
4625 if mystat.st_gid == portage_gid:
4627 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
4628 mode=mystat.st_mode, stat_cached=mystat,
4631 def _post_pkg_preinst_cmd(mysettings):
4633 Post phase logic and tasks that have been factored out of
4634 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4635 can be used to wipe out any gmon.out files created during
4636 previous functions (in case any tools were built with -pg
4640 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4641 misc_sh_binary = os.path.join(portage_bin_path,
4642 os.path.basename(MISC_SH_BINARY))
4644 mysettings["EBUILD_PHASE"] = ""
4645 global _post_phase_cmds
4646 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
4650 def _post_pkg_postinst_cmd(mysettings):
4652 Post phase logic and tasks that have been factored out of
4656 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4657 misc_sh_binary = os.path.join(portage_bin_path,
4658 os.path.basename(MISC_SH_BINARY))
4660 mysettings["EBUILD_PHASE"] = ""
4661 global _post_phase_cmds
4662 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
4666 def _spawn_misc_sh(mysettings, commands, **kwargs):
4668 @param mysettings: the ebuild config
4669 @type mysettings: config
4670 @param commands: a list of function names to call in misc-functions.sh
4671 @type commands: list
4673 @returns: the return value from the spawn() call
4676 # Note: PORTAGE_BIN_PATH may differ from the global
4677 # constant when portage is reinstalling itself.
4678 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4679 misc_sh_binary = os.path.join(portage_bin_path,
4680 os.path.basename(MISC_SH_BINARY))
4681 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
4682 _doebuild_exit_status_unlink(
4683 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4684 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4685 logfile = mysettings.get("PORTAGE_LOG_FILE")
4686 mydo = mysettings["EBUILD_PHASE"]
4688 rval = spawn(mycommand, mysettings, debug=debug,
4689 logfile=logfile, **kwargs)
4692 msg = _doebuild_exit_status_check(mydo, mysettings)
4695 from textwrap import wrap
4696 from portage.elog.messages import eerror
4697 for l in wrap(msg, 72):
4698 eerror(l, phase=mydo, key=mysettings.mycpv)
4701 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
4703 def _eapi_is_deprecated(eapi):
4704 return eapi in _deprecated_eapis
4706 def eapi_is_supported(eapi):
4707 eapi = str(eapi).strip()
4709 if _eapi_is_deprecated(eapi):
4718 return eapi <= portage.const.EAPI
4720 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
4722 ebuild_path = os.path.abspath(myebuild)
4723 pkg_dir = os.path.dirname(ebuild_path)
4725 if "CATEGORY" in mysettings.configdict["pkg"]:
4726 cat = mysettings.configdict["pkg"]["CATEGORY"]
4728 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
4729 mypv = os.path.basename(ebuild_path)[:-7]
4730 mycpv = cat+"/"+mypv
4731 mysplit=pkgsplit(mypv,silent=0)
4733 raise portage.exception.IncorrectParameter(
4734 "Invalid ebuild path: '%s'" % myebuild)
4736 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
4737 # so that the caller can override it.
4738 tmpdir = mysettings["PORTAGE_TMPDIR"]
4740 if mydo != "depend" and mycpv != mysettings.mycpv:
4741 """For performance reasons, setcpv only triggers reset when it
4742 detects a package-specific change in config. For the ebuild
4743 environment, a reset call is forced in order to ensure that the
4744 latest env.d variables are used."""
4746 mysettings.reset(use_cache=use_cache)
4747 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
4749 # config.reset() might have reverted a change made by the caller,
4750 # so restore it to it's original value.
4751 mysettings["PORTAGE_TMPDIR"] = tmpdir
4753 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
4754 mysettings["EBUILD_PHASE"] = mydo
4756 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
4758 # We are disabling user-specific bashrc files.
4759 mysettings["BASH_ENV"] = INVALID_ENV_FILE
4761 if debug: # Otherwise it overrides emerge's settings.
4762 # We have no other way to set debug... debug can't be passed in
4763 # due to how it's coded... Don't overwrite this so we can use it.
4764 mysettings["PORTAGE_DEBUG"] = "1"
4766 mysettings["ROOT"] = myroot
4767 mysettings["STARTDIR"] = getcwd()
4768 mysettings["EBUILD"] = ebuild_path
4769 mysettings["O"] = pkg_dir
4770 mysettings.configdict["pkg"]["CATEGORY"] = cat
4771 mysettings["FILESDIR"] = pkg_dir+"/files"
4772 mysettings["PF"] = mypv
4774 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
4775 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
4776 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
4778 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
4779 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
4781 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
4782 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
4783 mysettings["PN"] = mysplit[0]
4784 mysettings["PV"] = mysplit[1]
4785 mysettings["PR"] = mysplit[2]
4787 if portage.util.noiselimit < 0:
4788 mysettings["PORTAGE_QUIET"] = "1"
4790 if mydo != "depend":
4791 # Metadata vars such as EAPI and RESTRICT are
4792 # set by the above config.setcpv() call.
4793 eapi = mysettings["EAPI"]
4794 if not eapi_is_supported(eapi):
4795 # can't do anything with this.
4796 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
4798 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
4799 portage.dep.use_reduce(portage.dep.paren_reduce(
4800 mysettings["RESTRICT"]),
4801 uselist=mysettings["PORTAGE_USE"].split())))
4802 except portage.exception.InvalidDependString:
4803 # RESTRICT is validated again inside doebuild, so let this go
4804 mysettings["PORTAGE_RESTRICT"] = ""
4806 if mysplit[2] == "r0":
4807 mysettings["PVR"]=mysplit[1]
4809 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
4811 if "PATH" in mysettings:
4812 mysplit=mysettings["PATH"].split(":")
4815 # Note: PORTAGE_BIN_PATH may differ from the global constant
4816 # when portage is reinstalling itself.
4817 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4818 if portage_bin_path not in mysplit:
4819 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
4821 # Sandbox needs cannonical paths.
4822 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
4823 mysettings["PORTAGE_TMPDIR"])
4824 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
4825 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
4827 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
4828 # locations in order to prevent interference.
4829 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
4830 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4831 mysettings["PKG_TMPDIR"],
4832 mysettings["CATEGORY"], mysettings["PF"])
4834 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
4835 mysettings["BUILD_PREFIX"],
4836 mysettings["CATEGORY"], mysettings["PF"])
4838 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
4839 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
4840 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
4841 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
4843 mysettings["PORTAGE_BASHRC"] = os.path.join(
4844 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
4845 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
4846 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
4848 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
4849 if mydo != "depend" and "KV" not in mysettings:
4850 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
4852 # Regular source tree
4853 mysettings["KV"]=mykv
4856 mysettings.backup_changes("KV")
4858 # Allow color.map to control colors associated with einfo, ewarn, etc...
4860 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
4861 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
4862 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
4864 def prepare_build_dirs(myroot, mysettings, cleanup):
4866 clean_dirs = [mysettings["HOME"]]
4868 # We enable cleanup when we want to make sure old cruft (such as the old
4869 # environment) doesn't interfere with the current phase.
4871 clean_dirs.append(mysettings["T"])
4873 for clean_dir in clean_dirs:
4875 shutil.rmtree(clean_dir)
4877 if errno.ENOENT == oe.errno:
4879 elif errno.EPERM == oe.errno:
4880 writemsg("%s\n" % oe, noiselevel=-1)
4881 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
4882 clean_dir, noiselevel=-1)
4887 def makedirs(dir_path):
4889 os.makedirs(dir_path)
4891 if errno.EEXIST == oe.errno:
4893 elif errno.EPERM == oe.errno:
4894 writemsg("%s\n" % oe, noiselevel=-1)
4895 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
4896 dir_path, noiselevel=-1)
4902 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
4904 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
4905 mydirs.append(os.path.dirname(mydirs[-1]))
4908 for mydir in mydirs:
4909 portage.util.ensure_dirs(mydir)
4910 portage.util.apply_secpass_permissions(mydir,
4911 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
4912 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
4913 """These directories don't necessarily need to be group writable.
4914 However, the setup phase is commonly run as a privileged user prior
4915 to the other phases being run by an unprivileged user. Currently,
4916 we use the portage group to ensure that the unprivleged user still
4917 has write access to these directories in any case."""
4918 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
4919 portage.util.apply_secpass_permissions(mysettings[dir_key],
4920 uid=portage_uid, gid=portage_gid)
4921 except portage.exception.PermissionDenied, e:
4922 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
4924 except portage.exception.OperationNotPermitted, e:
4925 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
4927 except portage.exception.FileNotFound, e:
4928 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
4931 _prepare_workdir(mysettings)
4932 _prepare_features_dirs(mysettings)
4934 def _adjust_perms_msg(settings, msg):
4937 writemsg(msg, noiselevel=-1)
4939 background = settings.get("PORTAGE_BACKGROUND") == "1"
4940 log_path = settings.get("PORTAGE_LOG_FILE")
4943 if background and log_path is not None:
4945 log_file = open(log_path, 'a')
4957 if log_file is not None:
4960 def _prepare_features_dirs(mysettings):
4964 "basedir_var":"CCACHE_DIR",
4965 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
4966 "always_recurse":False},
4968 "basedir_var":"DISTCC_DIR",
4969 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
4970 "subdirs":("lock", "state"),
4971 "always_recurse":True}
4976 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4977 from portage.data import secpass
4978 droppriv = secpass >= 2 and \
4979 "userpriv" in mysettings.features and \
4980 "userpriv" not in restrict
4981 for myfeature, kwargs in features_dirs.iteritems():
4982 if myfeature in mysettings.features:
4983 basedir = mysettings[kwargs["basedir_var"]]
4985 basedir = kwargs["default_dir"]
4986 mysettings[kwargs["basedir_var"]] = basedir
4988 mydirs = [mysettings[kwargs["basedir_var"]]]
4989 if "subdirs" in kwargs:
4990 for subdir in kwargs["subdirs"]:
4991 mydirs.append(os.path.join(basedir, subdir))
4992 for mydir in mydirs:
4993 modified = portage.util.ensure_dirs(mydir)
4994 # Generally, we only want to apply permissions for
4995 # initial creation. Otherwise, we don't know exactly what
4996 # permissions the user wants, so should leave them as-is.
4997 droppriv_fix = False
5000 if st.st_gid != portage_gid or \
5001 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
5003 if not droppriv_fix:
5004 # Check permissions of files in the directory.
5005 for filename in os.listdir(mydir):
5007 subdir_st = os.lstat(
5008 os.path.join(mydir, filename))
5011 if subdir_st.st_gid != portage_gid or \
5012 ((stat.S_ISDIR(subdir_st.st_mode) and \
5013 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
5018 _adjust_perms_msg(mysettings,
5019 colorize("WARN", " * ") + \
5020 "Adjusting permissions " + \
5021 "for FEATURES=userpriv: '%s'\n" % mydir)
5023 _adjust_perms_msg(mysettings,
5024 colorize("WARN", " * ") + \
5025 "Adjusting permissions " + \
5026 "for FEATURES=%s: '%s'\n" % (myfeature, mydir))
5028 if modified or kwargs["always_recurse"] or droppriv_fix:
5030 raise # The feature is disabled if a single error
5031 # occurs during permissions adjustment.
5032 if not apply_recursive_permissions(mydir,
5033 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5034 filemode=filemode, filemask=modemask, onerror=onerror):
5035 raise portage.exception.OperationNotPermitted(
5036 "Failed to apply recursive permissions for the portage group.")
5037 except portage.exception.PortageException, e:
5038 mysettings.features.remove(myfeature)
5039 mysettings["FEATURES"] = " ".join(mysettings.features)
5040 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5041 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
5042 (kwargs["basedir_var"], basedir), noiselevel=-1)
5043 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
5047 def _prepare_workdir(mysettings):
5050 mode = mysettings["PORTAGE_WORKDIR_MODE"]
5052 parsed_mode = int(mode, 8)
5057 if parsed_mode & 07777 != parsed_mode:
5058 raise ValueError("Invalid file mode: %s" % mode)
5060 workdir_mode = parsed_mode
5062 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
5063 except ValueError, e:
5065 writemsg("%s\n" % e)
5066 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
5067 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
5068 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
5070 apply_secpass_permissions(mysettings["WORKDIR"],
5071 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
5072 except portage.exception.FileNotFound:
5073 pass # ebuild.sh will create it
5075 if mysettings.get("PORT_LOGDIR", "") == "":
5076 while "PORT_LOGDIR" in mysettings:
5077 del mysettings["PORT_LOGDIR"]
5078 if "PORT_LOGDIR" in mysettings:
5080 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
5082 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
5083 uid=portage_uid, gid=portage_gid, mode=02770)
5084 except portage.exception.PortageException, e:
5085 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5086 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
5087 mysettings["PORT_LOGDIR"], noiselevel=-1)
5088 writemsg("!!! Disabling logging.\n", noiselevel=-1)
5089 while "PORT_LOGDIR" in mysettings:
5090 del mysettings["PORT_LOGDIR"]
5091 if "PORT_LOGDIR" in mysettings and \
5092 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
5093 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
5094 if not os.path.exists(logid_path):
5095 f = open(logid_path, "w")
5098 logid_time = time.strftime("%Y%m%d-%H%M%S",
5099 time.gmtime(os.stat(logid_path).st_mtime))
5100 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5101 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
5102 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
5103 del logid_path, logid_time
5105 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
5106 # enabled since it is possible that local SELinux security policies
5107 # do not allow ouput to be piped out of the sesandbox domain.
5108 if not (mysettings.selinux_enabled() and \
5109 "sesandbox" in mysettings.features):
5110 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5111 mysettings["T"], "build.log")
5113 def _doebuild_exit_status_check(mydo, settings):
5115 Returns an error string if the shell appeared
5116 to exit unsuccessfully, None otherwise.
5118 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
5119 if not exit_status_file or \
5120 os.path.exists(exit_status_file):
5122 msg = ("The ebuild phase '%s' has exited " % mydo) + \
5123 "unexpectedly. This type of behavior " + \
5124 "is known to be triggered " + \
5125 "by things such as failed variable " + \
5126 "assignments (bug #190128) or bad substitution " + \
5127 "errors (bug #200313). Normally, before exiting, bash should " + \
5128 "have displayed an error message above. If bash did not " + \
5129 "produce an error message above, it's possible " + \
5130 "that the ebuild has called `exit` when it " + \
5131 "should have called `die` instead. This behavior may also " + \
5132 "be triggered by a corrupt bash binary or a hardware " + \
5133 "problem such as memory or cpu malfunction. If the problem is not " + \
5134 "reproducible or it appears to occur randomly, then it is likely " + \
5135 "to be triggered by a hardware problem. " + \
5136 "If you suspect a hardware problem then you should " + \
5137 "try some basic hardware diagnostics such as memtest. " + \
5138 "Please do not report this as a bug unless it is consistently " + \
5139 "reproducible and you are sure that your bash binary and hardware " + \
5140 "are functioning properly."
5143 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
5144 if retval != os.EX_OK:
5146 msg = _doebuild_exit_status_check(mydo, settings)
5149 from textwrap import wrap
5150 from portage.elog.messages import eerror
5151 for l in wrap(msg, 72):
5152 eerror(l, phase=mydo, key=settings.mycpv)
5155 def _doebuild_exit_status_unlink(exit_status_file):
5157 Double check to make sure it really doesn't exist
5158 and raise an OSError if it still does (it shouldn't).
5159 OSError if necessary.
5161 if not exit_status_file:
5164 os.unlink(exit_status_file)
5167 if os.path.exists(exit_status_file):
5168 os.unlink(exit_status_file)
5170 _doebuild_manifest_exempt_depend = 0
5171 _doebuild_manifest_cache = None
5172 _doebuild_broken_ebuilds = set()
5173 _doebuild_broken_manifests = set()
5175 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
5176 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
5177 mydbapi=None, vartree=None, prev_mtimes=None,
5178 fd_pipes=None, returnpid=False):
5181 Wrapper function that invokes specific ebuild phases through the spawning
5184 @param myebuild: name of the ebuild to invoke the phase on (CPV)
5185 @type myebuild: String
5186 @param mydo: Phase to run
5188 @param myroot: $ROOT (usually '/', see man make.conf)
5189 @type myroot: String
5190 @param mysettings: Portage Configuration
5191 @type mysettings: instance of portage.config
5192 @param debug: Turns on various debug information (eg, debug for spawn)
5193 @type debug: Boolean
5194 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
5195 @type listonly: Boolean
5196 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
5197 @type fetchonly: Boolean
5198 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
5199 @type cleanup: Boolean
5200 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
5201 @type dbkey: Dict or String
5202 @param use_cache: Enables the cache
5203 @type use_cache: Boolean
5204 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
5205 @type fetchall: Boolean
5206 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
5208 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
5209 @type mydbapi: portdbapi instance
5210 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
5211 @type vartree: vartree instance
5212 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
5213 @type prev_mtimes: dictionary
5219 Most errors have an accompanying error message.
5221 listonly and fetchonly are only really necessary for operations involving 'fetch'
5222 prev_mtimes are only necessary for merge operations.
5223 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
5228 writemsg("Warning: tree not specified to doebuild\n")
5232 # chunked out deps for each phase, so that ebuild binary can use it
5233 # to collapse targets down.
5236 "unpack": ["setup"],
5237 "prepare": ["unpack"],
5238 "configure": ["prepare"],
5239 "compile":["configure"],
5240 "test": ["compile"],
5243 "package":["install"],
5247 mydbapi = db[myroot][tree].dbapi
5249 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
5250 vartree = db[myroot]["vartree"]
5252 features = mysettings.features
5253 noauto = "noauto" in features
5254 from portage.data import secpass
5256 clean_phases = ("clean", "cleanrm")
5257 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
5258 "config", "info", "setup", "depend",
5259 "fetch", "fetchall", "digest",
5260 "unpack", "prepare", "configure", "compile", "test",
5261 "install", "rpm", "qmerge", "merge",
5262 "package","unmerge", "manifest"]
5264 if mydo not in validcommands:
5265 validcommands.sort()
5266 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
5268 for vcount in range(len(validcommands)):
5270 writemsg("\n!!! ", noiselevel=-1)
5271 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
5272 writemsg("\n", noiselevel=-1)
5275 if mydo == "fetchall":
5279 parallel_fetchonly = mydo in ("fetch", "fetchall") and \
5280 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
5282 if mydo not in clean_phases and not os.path.exists(myebuild):
5283 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
5287 global _doebuild_manifest_exempt_depend
5289 if "strict" in features and \
5290 "digest" not in features and \
5291 tree == "porttree" and \
5292 mydo not in ("digest", "manifest", "help") and \
5293 not _doebuild_manifest_exempt_depend:
5294 # Always verify the ebuild checksums before executing it.
5295 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
5296 _doebuild_broken_ebuilds
5298 if myebuild in _doebuild_broken_ebuilds:
5301 pkgdir = os.path.dirname(myebuild)
5302 manifest_path = os.path.join(pkgdir, "Manifest")
5304 # Avoid checking the same Manifest several times in a row during a
5305 # regen with an empty cache.
5306 if _doebuild_manifest_cache is None or \
5307 _doebuild_manifest_cache.getFullname() != manifest_path:
5308 _doebuild_manifest_cache = None
5309 if not os.path.exists(manifest_path):
5310 out = portage.output.EOutput()
5311 out.eerror("Manifest not found for '%s'" % (myebuild,))
5312 _doebuild_broken_ebuilds.add(myebuild)
5314 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5317 mf = _doebuild_manifest_cache
5320 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
5322 out = portage.output.EOutput()
5323 out.eerror("Missing digest for '%s'" % (myebuild,))
5324 _doebuild_broken_ebuilds.add(myebuild)
5326 except portage.exception.FileNotFound:
5327 out = portage.output.EOutput()
5328 out.eerror("A file listed in the Manifest " + \
5329 "could not be found: '%s'" % (myebuild,))
5330 _doebuild_broken_ebuilds.add(myebuild)
5332 except portage.exception.DigestException, e:
5333 out = portage.output.EOutput()
5334 out.eerror("Digest verification failed:")
5335 out.eerror("%s" % e.value[0])
5336 out.eerror("Reason: %s" % e.value[1])
5337 out.eerror("Got: %s" % e.value[2])
5338 out.eerror("Expected: %s" % e.value[3])
5339 _doebuild_broken_ebuilds.add(myebuild)
5342 if mf.getFullname() in _doebuild_broken_manifests:
5345 if mf is not _doebuild_manifest_cache:
5347 # Make sure that all of the ebuilds are
5348 # actually listed in the Manifest.
5349 for f in os.listdir(pkgdir):
5350 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
5351 f = os.path.join(pkgdir, f)
5352 if f not in _doebuild_broken_ebuilds:
5353 out = portage.output.EOutput()
5354 out.eerror("A file is not listed in the " + \
5355 "Manifest: '%s'" % (f,))
5356 _doebuild_broken_manifests.add(manifest_path)
5359 # Only cache it if the above stray files test succeeds.
5360 _doebuild_manifest_cache = mf
5362 def exit_status_check(retval):
5363 if retval != os.EX_OK:
5365 msg = _doebuild_exit_status_check(mydo, mysettings)
5368 from textwrap import wrap
5369 from portage.elog.messages import eerror
5370 for l in wrap(msg, 72):
5371 eerror(l, phase=mydo, key=mysettings.mycpv)
5374 # Note: PORTAGE_BIN_PATH may differ from the global
5375 # constant when portage is reinstalling itself.
5376 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5377 ebuild_sh_binary = os.path.join(portage_bin_path,
5378 os.path.basename(EBUILD_SH_BINARY))
5379 misc_sh_binary = os.path.join(portage_bin_path,
5380 os.path.basename(MISC_SH_BINARY))
5383 builddir_lock = None
5388 if mydo in ("digest", "manifest", "help"):
5389 # Temporarily exempt the depend phase from manifest checks, in case
5390 # aux_get calls trigger cache generation.
5391 _doebuild_manifest_exempt_depend += 1
5393 # If we don't need much space and we don't need a constant location,
5394 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
5395 # so that there's no need for locking and it can be used even if the
5396 # user isn't in the portage group.
5397 if mydo in ("info",):
5398 from tempfile import mkdtemp
5400 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
5401 mysettings["PORTAGE_TMPDIR"] = tmpdir
5403 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
5406 if mydo in clean_phases:
5407 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
5408 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
5409 logfile=None, returnpid=returnpid)
5412 # get possible slot information from the deps file
5413 if mydo == "depend":
5414 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
5415 droppriv = "userpriv" in mysettings.features
5417 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5418 mysettings, fd_pipes=fd_pipes, returnpid=True,
5421 elif isinstance(dbkey, dict):
5422 mysettings["dbkey"] = ""
5425 0:sys.stdin.fileno(),
5426 1:sys.stdout.fileno(),
5427 2:sys.stderr.fileno(),
5429 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5431 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
5432 os.close(pw) # belongs exclusively to the child process now
5436 mybytes.append(os.read(pr, maxbytes))
5440 mybytes = "".join(mybytes)
5442 for k, v in izip(auxdbkeys, mybytes.splitlines()):
5444 retval = os.waitpid(mypids[0], 0)[1]
5445 portage.process.spawned_pids.remove(mypids[0])
5446 # If it got a signal, return the signal that was sent, but
5447 # shift in order to distinguish it from a return value. (just
5448 # like portage.process.spawn() would do).
5450 retval = (retval & 0xff) << 8
5452 # Otherwise, return its exit code.
5453 retval = retval >> 8
5454 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
5455 # Don't trust bash's returncode if the
5456 # number of lines is incorrect.
5460 mysettings["dbkey"] = dbkey
5462 mysettings["dbkey"] = \
5463 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
5465 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
5469 # Validate dependency metadata here to ensure that ebuilds with invalid
5470 # data are never installed via the ebuild command. Don't bother when
5471 # returnpid == True since there's no need to do this every time emerge
5474 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
5475 if rval != os.EX_OK:
5478 if "PORTAGE_TMPDIR" not in mysettings or \
5479 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
5480 writemsg("The directory specified in your " + \
5481 "PORTAGE_TMPDIR variable, '%s',\n" % \
5482 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
5483 writemsg("does not exist. Please create this directory or " + \
5484 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
5487 # as some people use a separate PORTAGE_TMPDIR mount
5488 # we prefer that as the checks below would otherwise be pointless
5490 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
5491 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
5493 checkdir = mysettings["PORTAGE_TMPDIR"]
5495 if not os.access(checkdir, os.W_OK):
5496 writemsg("%s is not writable.\n" % checkdir + \
5497 "Likely cause is that you've mounted it as readonly.\n" \
5501 from tempfile import NamedTemporaryFile
5502 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
5503 os.chmod(fd.name, 0755)
5504 if not os.access(fd.name, os.X_OK):
5505 writemsg("Can not execute files in %s\n" % checkdir + \
5506 "Likely cause is that you've mounted it with one of the\n" + \
5507 "following mount options: 'noexec', 'user', 'users'\n\n" + \
5508 "Please make sure that portage can execute files in this directory.\n" \
5515 if mydo == "unmerge":
5516 return unmerge(mysettings["CATEGORY"],
5517 mysettings["PF"], myroot, mysettings, vartree=vartree)
5519 # Build directory creation isn't required for any of these.
5520 have_build_dirs = False
5521 if not parallel_fetchonly and mydo not in ("digest", "help", "manifest"):
5522 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
5525 have_build_dirs = True
5527 # emerge handles logging externally
5529 # PORTAGE_LOG_FILE is set by the
5530 # above prepare_build_dirs() call.
5531 logfile = mysettings.get("PORTAGE_LOG_FILE")
5534 env_file = os.path.join(mysettings["T"], "environment")
5538 env_stat = os.stat(env_file)
5540 if e.errno != errno.ENOENT:
5544 saved_env = os.path.join(
5545 os.path.dirname(myebuild), "environment.bz2")
5546 if not os.path.isfile(saved_env):
5550 "bzip2 -dc %s > %s" % \
5551 (_shell_quote(saved_env),
5552 _shell_quote(env_file)))
5554 env_stat = os.stat(env_file)
5556 if e.errno != errno.ENOENT:
5559 if os.WIFEXITED(retval) and \
5560 os.WEXITSTATUS(retval) == os.EX_OK and \
5561 env_stat and env_stat.st_size > 0:
5562 # This is a signal to ebuild.sh, so that it knows to filter
5563 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
5564 # would be preserved between normal phases.
5565 open(env_file + ".raw", "w")
5567 writemsg(("!!! Error extracting saved " + \
5568 "environment: '%s'\n") % \
5569 saved_env, noiselevel=-1)
5573 if e.errno != errno.ENOENT:
5580 for var in ("ARCH", ):
5581 value = mysettings.get(var)
5582 if value and value.strip():
5584 msg = ("%s is not set... " % var) + \
5585 ("Are you missing the '%setc/make.profile' symlink? " % \
5586 mysettings["PORTAGE_CONFIGROOT"]) + \
5587 "Is the symlink correct? " + \
5588 "Is your portage tree complete?"
5589 from portage.elog.messages import eerror
5590 from textwrap import wrap
5591 for line in wrap(msg, 70):
5592 eerror(line, phase="setup", key=mysettings.mycpv)
5593 from portage.elog import elog_process
5594 elog_process(mysettings.mycpv, mysettings)
5596 del env_file, env_stat, saved_env
5597 _doebuild_exit_status_unlink(
5598 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5600 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
5602 # if any of these are being called, handle them -- running them out of
5603 # the sandbox -- and stop now.
5605 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
5606 mysettings, debug=debug, free=1, logfile=logfile)
5607 elif mydo == "setup":
5609 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
5610 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
5611 returnpid=returnpid)
5614 retval = exit_status_check(retval)
5616 """ Privileged phases may have left files that need to be made
5617 writable to a less privileged user."""
5618 apply_recursive_permissions(mysettings["T"],
5619 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
5620 filemode=060, filemask=0)
5622 elif mydo == "preinst":
5623 phase_retval = spawn(
5624 _shell_quote(ebuild_sh_binary) + " " + mydo,
5625 mysettings, debug=debug, free=1, logfile=logfile,
5626 fd_pipes=fd_pipes, returnpid=returnpid)
5631 phase_retval = exit_status_check(phase_retval)
5632 if phase_retval == os.EX_OK:
5633 _doebuild_exit_status_unlink(
5634 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5635 mysettings.pop("EBUILD_PHASE", None)
5636 phase_retval = spawn(
5637 " ".join(_post_pkg_preinst_cmd(mysettings)),
5638 mysettings, debug=debug, free=1, logfile=logfile)
5639 phase_retval = exit_status_check(phase_retval)
5640 if phase_retval != os.EX_OK:
5641 writemsg("!!! post preinst failed; exiting.\n",
5644 elif mydo == "postinst":
5645 phase_retval = spawn(
5646 _shell_quote(ebuild_sh_binary) + " " + mydo,
5647 mysettings, debug=debug, free=1, logfile=logfile,
5648 fd_pipes=fd_pipes, returnpid=returnpid)
5653 phase_retval = exit_status_check(phase_retval)
5654 if phase_retval == os.EX_OK:
5655 _doebuild_exit_status_unlink(
5656 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5657 mysettings.pop("EBUILD_PHASE", None)
5658 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
5659 mysettings, debug=debug, free=1, logfile=logfile)
5660 phase_retval = exit_status_check(phase_retval)
5661 if phase_retval != os.EX_OK:
5662 writemsg("!!! post postinst failed; exiting.\n",
5665 elif mydo in ("prerm", "postrm", "config", "info"):
5667 _shell_quote(ebuild_sh_binary) + " " + mydo,
5668 mysettings, debug=debug, free=1, logfile=logfile,
5669 fd_pipes=fd_pipes, returnpid=returnpid)
5674 retval = exit_status_check(retval)
5677 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
5679 emerge_skip_distfiles = returnpid
5680 # Only try and fetch the files if we are going to need them ...
5681 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
5682 # unpack compile install`, we will try and fetch 4 times :/
5683 need_distfiles = not emerge_skip_distfiles and \
5684 (mydo in ("fetch", "unpack") or \
5685 mydo not in ("digest", "manifest") and "noauto" not in features)
5686 alist = mysettings.configdict["pkg"].get("A")
5687 aalist = mysettings.configdict["pkg"].get("AA")
5688 if need_distfiles or alist is None or aalist is None:
5689 # Make sure we get the correct tree in case there are overlays.
5690 mytree = os.path.realpath(
5691 os.path.dirname(os.path.dirname(mysettings["O"])))
5692 useflags = mysettings["PORTAGE_USE"].split()
5694 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
5696 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
5697 except portage.exception.InvalidDependString, e:
5698 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5699 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
5703 mysettings.configdict["pkg"]["A"] = " ".join(alist)
5704 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
5706 alist = set(alist.split())
5707 aalist = set(aalist.split())
5708 if ("mirror" in features) or fetchall:
5716 # Files are already checked inside fetch(),
5717 # so do not check them again.
5721 if not emerge_skip_distfiles and \
5722 need_distfiles and not fetch(
5723 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
5726 if mydo == "fetch" and listonly:
5730 if mydo == "manifest":
5731 return not digestgen(aalist, mysettings, overwrite=1,
5732 manifestonly=1, myportdb=mydbapi)
5733 elif mydo == "digest":
5734 return not digestgen(aalist, mysettings, overwrite=1,
5736 elif "digest" in mysettings.features:
5737 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
5738 except portage.exception.PermissionDenied, e:
5739 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
5740 if mydo in ("digest", "manifest"):
5743 # See above comment about fetching only when needed
5744 if not emerge_skip_distfiles and \
5745 not digestcheck(checkme, mysettings, "strict" in features):
5751 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
5752 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
5753 orig_distdir = mysettings["DISTDIR"]
5754 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
5755 edpath = mysettings["DISTDIR"] = \
5756 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
5757 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0755)
5759 # Remove any unexpected files or directories.
5760 for x in os.listdir(edpath):
5761 symlink_path = os.path.join(edpath, x)
5762 st = os.lstat(symlink_path)
5763 if x in alist and stat.S_ISLNK(st.st_mode):
5765 if stat.S_ISDIR(st.st_mode):
5766 shutil.rmtree(symlink_path)
5768 os.unlink(symlink_path)
5770 # Check for existing symlinks and recreate if necessary.
5772 symlink_path = os.path.join(edpath, x)
5773 target = os.path.join(orig_distdir, x)
5775 link_target = os.readlink(symlink_path)
5777 os.symlink(target, symlink_path)
5779 if link_target != target:
5780 os.unlink(symlink_path)
5781 os.symlink(target, symlink_path)
5783 #initial dep checks complete; time to process main commands
5785 restrict = mysettings["PORTAGE_RESTRICT"].split()
5786 nosandbox = (("userpriv" in features) and \
5787 ("usersandbox" not in features) and \
5788 "userpriv" not in restrict and \
5789 "nouserpriv" not in restrict)
5790 if nosandbox and ("userpriv" not in features or \
5791 "userpriv" in restrict or \
5792 "nouserpriv" in restrict):
5793 nosandbox = ("sandbox" not in features and \
5794 "usersandbox" not in features)
5796 sesandbox = mysettings.selinux_enabled() and \
5797 "sesandbox" in mysettings.features
5799 droppriv = "userpriv" in mysettings.features and \
5800 "userpriv" not in restrict and \
5803 fakeroot = "fakeroot" in mysettings.features
5805 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
5806 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
5808 # args are for the to spawn function
5810 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
5811 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5812 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
5813 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5814 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5815 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
5816 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
5817 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5818 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
5821 # merge the deps in so we have again a 'full' actionmap
5822 # be glad when this can die.
5824 if len(actionmap_deps.get(x, [])):
5825 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
5827 if mydo in actionmap:
5828 if mydo == "package":
5829 # Make sure the package directory exists before executing
5830 # this phase. This can raise PermissionDenied if
5831 # the current user doesn't have write access to $PKGDIR.
5832 parent_dir = os.path.join(mysettings["PKGDIR"],
5833 mysettings["CATEGORY"])
5834 portage.util.ensure_dirs(parent_dir)
5835 if not os.access(parent_dir, os.W_OK):
5836 raise portage.exception.PermissionDenied(
5837 "access('%s', os.W_OK)" % parent_dir)
5838 retval = spawnebuild(mydo,
5839 actionmap, mysettings, debug, logfile=logfile,
5840 fd_pipes=fd_pipes, returnpid=returnpid)
5841 elif mydo=="qmerge":
5842 # check to ensure install was run. this *only* pops up when users
5843 # forget it and are using ebuild
5844 if not os.path.exists(
5845 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
5846 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
5849 # qmerge is a special phase that implies noclean.
5850 if "noclean" not in mysettings.features:
5851 mysettings.features.append("noclean")
5852 #qmerge is specifically not supposed to do a runtime dep check
5854 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
5855 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
5856 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
5857 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
5859 retval = spawnebuild("install", actionmap, mysettings, debug,
5860 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
5861 returnpid=returnpid)
5862 retval = exit_status_check(retval)
5863 if retval != os.EX_OK:
5864 # The merge phase handles this already. Callers don't know how
5865 # far this function got, so we have to call elog_process() here
5866 # so that it's only called once.
5867 from portage.elog import elog_process
5868 elog_process(mysettings.mycpv, mysettings)
5869 if retval == os.EX_OK:
5870 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
5871 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
5872 "build-info"), myroot, mysettings,
5873 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
5874 vartree=vartree, prev_mtimes=prev_mtimes)
5876 print "!!! Unknown mydo:",mydo
5884 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
5885 shutil.rmtree(tmpdir)
5887 portage.locks.unlockdir(builddir_lock)
5889 # Make sure that DISTDIR is restored to it's normal value before we return!
5890 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
5891 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
5892 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
5896 if os.stat(logfile).st_size == 0:
5901 if mydo in ("digest", "manifest", "help"):
5902 # If necessary, depend phase has been triggered by aux_get calls
5903 # and the exemption is no longer needed.
5904 _doebuild_manifest_exempt_depend -= 1
5906 def _validate_deps(mysettings, myroot, mydo, mydbapi):
5908 invalid_dep_exempt_phases = \
5909 set(["clean", "cleanrm", "help", "prerm", "postrm"])
5910 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
5911 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
5912 other_keys = ["SLOT"]
5913 all_keys = dep_keys + misc_keys + other_keys
5914 metadata = dict(izip(all_keys,
5915 mydbapi.aux_get(mysettings.mycpv, all_keys)))
5917 class FakeTree(object):
5918 def __init__(self, mydb):
5920 dep_check_trees = {myroot:{}}
5921 dep_check_trees[myroot]["porttree"] = \
5922 FakeTree(fakedbapi(settings=mysettings))
5925 for dep_type in dep_keys:
5926 mycheck = dep_check(metadata[dep_type], None, mysettings,
5927 myuse="all", myroot=myroot, trees=dep_check_trees)
5929 msgs.append(" %s: %s\n %s\n" % (
5930 dep_type, metadata[dep_type], mycheck[1]))
5934 portage.dep.use_reduce(
5935 portage.dep.paren_reduce(metadata[k]), matchall=True)
5936 except portage.exception.InvalidDependString, e:
5937 msgs.append(" %s: %s\n %s\n" % (
5938 k, metadata[k], str(e)))
5940 if not metadata["SLOT"]:
5941 msgs.append(" SLOT is undefined\n")
5944 portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \
5945 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
5947 portage.util.writemsg_level(x,
5948 level=logging.ERROR, noiselevel=-1)
5949 if mydo not in invalid_dep_exempt_phases:
5956 def _movefile(src, dest, **kwargs):
5957 """Calls movefile and raises a PortageException if an error occurs."""
5958 if movefile(src, dest, **kwargs) is None:
5959 raise portage.exception.PortageException(
5960 "mv '%s' '%s'" % (src, dest))
5962 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
5963 hardlink_candidates=None):
5964 """moves a file from src to dest, preserving all permissions and attributes; mtime will
5965 be preserved even when moving across filesystems. Returns true on success and false on
5966 failure. Move is atomic."""
5967 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
5969 if mysettings is None:
5971 mysettings = settings
5972 selinux_enabled = mysettings.selinux_enabled()
5977 except SystemExit, e:
5979 except Exception, e:
5980 print "!!! Stating source file failed... movefile()"
5986 dstat=os.lstat(dest)
5987 except (OSError, IOError):
5988 dstat=os.lstat(os.path.dirname(dest))
5992 if destexists and dstat.st_flags != 0:
5993 bsd_chflags.lchflags(dest, 0)
5994 # Use normal stat/chflags for the parent since we want to
5995 # follow any symlinks to the real parent directory.
5996 pflags = os.stat(os.path.dirname(dest)).st_flags
5998 bsd_chflags.chflags(os.path.dirname(dest), 0)
6001 if stat.S_ISLNK(dstat[stat.ST_MODE]):
6005 except SystemExit, e:
6007 except Exception, e:
6010 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6012 target=os.readlink(src)
6013 if mysettings and mysettings["D"]:
6014 if target.find(mysettings["D"])==0:
6015 target=target[len(mysettings["D"]):]
6016 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
6019 sid = selinux.get_lsid(src)
6020 selinux.secure_symlink(target,dest,sid)
6022 os.symlink(target,dest)
6023 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6024 # utime() only works on the target of a symlink, so it's not
6025 # possible to perserve mtime on symlinks.
6026 return os.lstat(dest)[stat.ST_MTIME]
6027 except SystemExit, e:
6029 except Exception, e:
6030 print "!!! failed to properly create symlink:"
6031 print "!!!",dest,"->",target
6036 # Since identical files might be merged to multiple filesystems,
6037 # so os.link() calls might fail for some paths, so try them all.
6038 # For atomic replacement, first create the link as a temp file
6039 # and them use os.rename() to replace the destination.
6040 if hardlink_candidates:
6041 head, tail = os.path.split(dest)
6042 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
6043 (tail, os.getpid()))
6045 os.unlink(hardlink_tmp)
6047 if e.errno != errno.ENOENT:
6048 writemsg("!!! Failed to remove hardlink temp file: %s\n" % \
6049 (hardlink_tmp,), noiselevel=-1)
6050 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6053 for hardlink_src in hardlink_candidates:
6055 os.link(hardlink_src, hardlink_tmp)
6060 os.rename(hardlink_tmp, dest)
6062 writemsg("!!! Failed to rename %s to %s\n" % \
6063 (hardlink_tmp, dest), noiselevel=-1)
6064 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6071 renamefailed = False
6072 if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
6075 ret=selinux.secure_rename(src,dest)
6077 ret=os.rename(src,dest)
6079 except SystemExit, e:
6081 except Exception, e:
6082 if e[0]!=errno.EXDEV:
6083 # Some random error.
6084 print "!!! Failed to move",src,"to",dest
6087 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
6090 if stat.S_ISREG(sstat[stat.ST_MODE]):
6091 try: # For safety copy then move it over.
6093 selinux.secure_copy(src,dest+"#new")
6094 selinux.secure_rename(dest+"#new",dest)
6096 shutil.copyfile(src,dest+"#new")
6097 os.rename(dest+"#new",dest)
6099 except SystemExit, e:
6101 except Exception, e:
6102 print '!!! copy',src,'->',dest,'failed.'
6106 #we don't yet handle special, so we need to fall back to /bin/mv
6108 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
6110 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
6112 print "!!! Failed to move special file:"
6113 print "!!! '"+src+"' to '"+dest+"'"
6115 return None # failure
6118 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6119 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6121 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6122 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
6124 except SystemExit, e:
6126 except Exception, e:
6127 print "!!! Failed to chown/chmod/unlink in movefile()"
6134 newmtime = long(os.stat(dest).st_mtime)
6136 if newmtime is not None:
6137 os.utime(dest, (newmtime, newmtime))
6139 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
6140 newmtime = long(sstat.st_mtime)
6142 # The utime can fail here with EPERM even though the move succeeded.
6143 # Instead of failing, use stat to return the mtime if possible.
6145 newmtime = long(os.stat(dest).st_mtime)
6147 writemsg("!!! Failed to stat in movefile()\n", noiselevel=-1)
6148 writemsg("!!! %s\n" % dest, noiselevel=-1)
6149 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6153 # Restore the flags we saved before moving
6155 bsd_chflags.chflags(os.path.dirname(dest), pflags)
6159 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
6160 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
6162 if not os.access(myroot, os.W_OK):
6163 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
6166 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
6167 vartree=vartree, blockers=blockers, scheduler=scheduler)
6168 return mylink.merge(pkgloc, infloc, myroot, myebuild,
6169 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6171 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
6172 ldpath_mtimes=None, scheduler=None):
6173 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
6174 vartree=vartree, scheduler=scheduler)
6175 vartree = mylink.vartree
6179 vartree.dbapi.plib_registry.load()
6180 vartree.dbapi.plib_registry.pruneNonExisting()
6181 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
6182 ldpath_mtimes=ldpath_mtimes)
6183 if retval == os.EX_OK:
6188 vartree.dbapi.linkmap._clear_cache()
6191 def getCPFromCPV(mycpv):
6192 """Calls pkgsplit on a cpv and returns only the cp."""
6193 return pkgsplit(mycpv)[0]
6195 def dep_virtual(mysplit, mysettings):
6196 "Does virtual dependency conversion"
6198 myvirtuals = mysettings.getvirtuals()
6200 if isinstance(x, list):
6201 newsplit.append(dep_virtual(x, mysettings))
6204 mychoices = myvirtuals.get(mykey, None)
6206 if len(mychoices) == 1:
6207 a = x.replace(mykey, mychoices[0])
6210 # blocker needs "and" not "or(||)".
6215 a.append(x.replace(mykey, y))
6221 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
6222 trees=None, use_mask=None, use_force=None, **kwargs):
6223 """Recursively expand new-style virtuals so as to collapse one or more
6224 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
6225 zero cost regardless of whether or not they are currently installed. Virtual
6226 blockers are supported but only when the virtual expands to a single
6227 atom because it wouldn't necessarily make sense to block all the components
6228 of a compound virtual. When more than one new-style virtual is matched,
6229 the matches are sorted from highest to lowest versions and the atom is
6230 expanded to || ( highest match ... lowest match )."""
6232 # According to GLEP 37, RDEPEND is the only dependency type that is valid
6233 # for new-style virtuals. Repoman should enforce this.
6234 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
6235 portdb = trees[myroot]["porttree"].dbapi
6236 repoman = isinstance(mydbapi, portdbapi)
6237 if kwargs["use_binaries"]:
6238 portdb = trees[myroot]["bintree"].dbapi
6239 myvirtuals = mysettings.getvirtuals()
6240 myuse = kwargs["myuse"]
6245 elif isinstance(x, list):
6246 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
6247 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
6248 use_force=use_force, **kwargs))
6251 if not isinstance(x, portage.dep.Atom):
6253 x = portage.dep.Atom(x)
6254 except portage.exception.InvalidAtom:
6255 if portage.dep._dep_check_strict:
6256 raise portage.exception.ParseError(
6257 "invalid atom: '%s'" % x)
6259 if repoman and x.use and x.use.conditional:
6260 evaluated_atom = portage.dep.remove_slot(x)
6262 evaluated_atom += ":%s" % x.slot
6263 evaluated_atom += str(x.use._eval_qa_conditionals(
6264 use_mask, use_force))
6265 x = portage.dep.Atom(evaluated_atom)
6267 if not repoman and \
6268 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
6269 if x.use.conditional:
6270 evaluated_atom = portage.dep.remove_slot(x)
6272 evaluated_atom += ":%s" % x.slot
6273 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
6274 x = portage.dep.Atom(evaluated_atom)
6276 mykey = dep_getkey(x)
6277 if not mykey.startswith("virtual/"):
6280 mychoices = myvirtuals.get(mykey, [])
6281 isblocker = x.startswith("!")
6283 # Virtual blockers are no longer expanded here since
6284 # the un-expanded virtual atom is more useful for
6285 # maintaining a cache of blocker atoms.
6292 matches = portdb.match(match_atom)
6293 # Use descending order to prefer higher versions.
6296 # only use new-style matches
6297 if cpv.startswith("virtual/"):
6298 pkgs.append((cpv, catpkgsplit(cpv)[1:], portdb))
6299 if not (pkgs or mychoices):
6300 # This one couldn't be expanded as a new-style virtual. Old-style
6301 # virtuals have already been expanded by dep_virtual, so this one
6302 # is unavailable and dep_zapdeps will identify it as such. The
6303 # atom is not eliminated here since it may still represent a
6304 # dependency that needs to be satisfied.
6307 if not pkgs and len(mychoices) == 1:
6308 newsplit.append(x.replace(mykey, mychoices[0]))
6315 cpv, pv_split, db = y
6316 depstring = " ".join(db.aux_get(cpv, dep_keys))
6317 pkg_kwargs = kwargs.copy()
6318 if isinstance(db, portdbapi):
6323 use_split = db.aux_get(cpv, ["USE"])[0].split()
6324 pkg_kwargs["myuse"] = use_split
6326 print "Virtual Parent: ", y[0]
6327 print "Virtual Depstring:", depstring
6328 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
6329 trees=trees, **pkg_kwargs)
6331 raise portage.exception.ParseError(
6332 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
6334 virtual_atoms = [atom for atom in mycheck[1] \
6335 if not atom.startswith("!")]
6336 if len(virtual_atoms) == 1:
6337 # It wouldn't make sense to block all the components of a
6338 # compound virtual, so only a single atom block is allowed.
6339 a.append("!" + virtual_atoms[0])
6341 mycheck[1].append("="+y[0]) # pull in the new-style virtual
6342 a.append(mycheck[1])
6343 # Plain old-style virtuals. New-style virtuals are preferred.
6345 a.append(x.replace(mykey, y))
6346 if isblocker and not a:
6347 # Probably a compound virtual. Pass the atom through unprocessed.
6353 def dep_eval(deplist):
6356 if deplist[0]=="||":
6357 #or list; we just need one "1"
6358 for x in deplist[1:]:
6359 if isinstance(x, list):
6364 #XXX: unless there's no available atoms in the list
6365 #in which case we need to assume that everything is
6366 #okay as some ebuilds are relying on an old bug.
6367 if len(deplist) == 1:
6372 if isinstance(x, list):
6379 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
6380 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
6381 Returned deplist contains steps that must be taken to satisfy dependencies."""
6385 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
6386 if not reduced or unreduced == ["||"] or dep_eval(reduced):
6389 if unreduced[0] != "||":
6391 for dep, satisfied in izip(unreduced, reduced):
6392 if isinstance(dep, list):
6393 unresolved += dep_zapdeps(dep, satisfied, myroot,
6394 use_binaries=use_binaries, trees=trees)
6396 unresolved.append(dep)
6399 # We're at a ( || atom ... ) type level and need to make a choice
6400 deps = unreduced[1:]
6401 satisfieds = reduced[1:]
6403 # Our preference order is for an the first item that:
6404 # a) contains all unmasked packages with the same key as installed packages
6405 # b) contains all unmasked packages
6406 # c) contains masked installed packages
6407 # d) is the first item
6410 preferred_not_installed = []
6411 preferred_any_slot = []
6412 possible_upgrades = []
6415 # Alias the trees we'll be checking availability against
6416 parent = trees[myroot].get("parent")
6417 graph_db = trees[myroot].get("graph_db")
6419 if "vartree" in trees[myroot]:
6420 vardb = trees[myroot]["vartree"].dbapi
6422 mydbapi = trees[myroot]["bintree"].dbapi
6424 mydbapi = trees[myroot]["porttree"].dbapi
6426 # Sort the deps into preferred (installed) and other
6427 # with values of [[required_atom], availablility]
6428 for dep, satisfied in izip(deps, satisfieds):
6429 if isinstance(dep, list):
6430 atoms = dep_zapdeps(dep, satisfied, myroot,
6431 use_binaries=use_binaries, trees=trees)
6437 other.append((atoms, None, False))
6440 all_available = True
6445 avail_pkg = mydbapi.match(atom)
6447 avail_pkg = avail_pkg[-1] # highest (ascending order)
6448 avail_slot = "%s:%s" % (dep_getkey(atom),
6449 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
6451 all_available = False
6454 versions[avail_slot] = avail_pkg
6456 this_choice = (atoms, versions, all_available)
6458 # The "all installed" criterion is not version or slot specific.
6459 # If any version of a package is installed then we assume that it
6460 # is preferred over other possible packages choices.
6461 all_installed = True
6462 for atom in set([dep_getkey(atom) for atom in atoms \
6463 if atom[:1] != "!"]):
6464 # New-style virtuals have zero cost to install.
6465 if not vardb.match(atom) and not atom.startswith("virtual/"):
6466 all_installed = False
6468 all_installed_slots = False
6470 all_installed_slots = True
6471 for slot_atom in versions:
6472 # New-style virtuals have zero cost to install.
6473 if not vardb.match(slot_atom) and \
6474 not slot_atom.startswith("virtual/"):
6475 all_installed_slots = False
6478 if all_installed_slots:
6479 preferred.append(this_choice)
6481 preferred_any_slot.append(this_choice)
6482 elif graph_db is None:
6483 possible_upgrades.append(this_choice)
6486 for slot_atom in versions:
6487 # New-style virtuals have zero cost to install.
6488 if not graph_db.match(slot_atom) and \
6489 not slot_atom.startswith("virtual/"):
6490 all_in_graph = False
6494 preferred_not_installed.append(this_choice)
6496 # Check if the atom would result in a direct circular
6497 # dependency and try to avoid that if it seems likely
6498 # to be unresolvable.
6499 cpv_slot_list = [parent]
6500 circular_atom = None
6504 if vardb.match(atom):
6505 # If the atom is satisfied by an installed
6506 # version then it's not a circular dep.
6508 if dep_getkey(atom) != parent.cp:
6510 if match_from_list(atom, cpv_slot_list):
6511 circular_atom = atom
6513 if circular_atom is None:
6514 preferred_not_installed.append(this_choice)
6516 other.append(this_choice)
6518 possible_upgrades.append(this_choice)
6520 other.append(this_choice)
6522 # Compare the "all_installed" choices against the "all_available" choices
6523 # for possible missed upgrades. The main purpose of this code is to find
6524 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
6525 # into || ( highest version ... lowest version ). We want to prefer the
6526 # highest all_available version of the new-style virtual when there is a
6527 # lower all_installed version.
6528 preferred.extend(preferred_not_installed)
6529 preferred.extend(preferred_any_slot)
6530 preferred.extend(possible_upgrades)
6531 possible_upgrades = preferred[1:]
6532 for possible_upgrade in possible_upgrades:
6533 atoms, versions, all_available = possible_upgrade
6534 myslots = set(versions)
6535 for other_choice in preferred:
6536 if possible_upgrade is other_choice:
6537 # possible_upgrade will not be promoted, so move on
6539 o_atoms, o_versions, o_all_available = other_choice
6540 intersecting_slots = myslots.intersection(o_versions)
6541 if not intersecting_slots:
6544 has_downgrade = False
6545 for myslot in intersecting_slots:
6546 myversion = versions[myslot]
6547 o_version = o_versions[myslot]
6548 difference = pkgcmp(catpkgsplit(myversion)[1:],
6549 catpkgsplit(o_version)[1:])
6554 has_downgrade = True
6556 if has_upgrade and not has_downgrade:
6557 preferred.remove(possible_upgrade)
6558 o_index = preferred.index(other_choice)
6559 preferred.insert(o_index, possible_upgrade)
6562 # preferred now contains a) and c) from the order above with
6563 # the masked flag differentiating the two. other contains b)
6564 # and d) so adding other to preferred will give us a suitable
6565 # list to iterate over.
6566 preferred.extend(other)
6568 for allow_masked in (False, True):
6569 for atoms, versions, all_available in preferred:
6570 if all_available or allow_masked:
6573 assert(False) # This point should not be reachable
6576 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
6582 mydep = dep_getcpv(orig_dep)
6583 myindex = orig_dep.index(mydep)
6584 prefix = orig_dep[:myindex]
6585 postfix = orig_dep[myindex+len(mydep):]
6586 expanded = cpv_expand(mydep, mydb=mydb,
6587 use_cache=use_cache, settings=settings)
6589 return portage.dep.Atom(prefix + expanded + postfix)
6590 except portage.exception.InvalidAtom:
6591 # Missing '=' prefix is allowed for backward compatibility.
6592 if not isvalidatom("=" + prefix + expanded + postfix):
6594 return portage.dep.Atom("=" + prefix + expanded + postfix)
6596 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
6597 use_cache=1, use_binaries=0, myroot="/", trees=None):
6598 """Takes a depend string and parses the condition."""
6599 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
6600 #check_config_instance(mysettings)
6602 trees = globals()["db"]
6606 myusesplit = mysettings["PORTAGE_USE"].split()
6609 # We've been given useflags to use.
6610 #print "USE FLAGS PASSED IN."
6612 #if "bindist" in myusesplit:
6613 # print "BINDIST is set!"
6615 # print "BINDIST NOT set."
6617 #we are being run by autouse(), don't consult USE vars yet.
6618 # WE ALSO CANNOT USE SETTINGS
6621 #convert parenthesis to sublists
6623 mysplit = portage.dep.paren_reduce(depstring)
6624 except portage.exception.InvalidDependString, e:
6629 useforce.add(mysettings["ARCH"])
6631 # This masking/forcing is only for repoman. In other cases, relevant
6632 # masking/forcing should have already been applied via
6633 # config.regenerate(). Also, binary or installed packages may have
6634 # been built with flags that are now masked, and it would be
6635 # inconsistent to mask them now. Additionally, myuse may consist of
6636 # flags from a parent package that is being merged to a $ROOT that is
6637 # different from the one that mysettings represents.
6638 mymasks.update(mysettings.usemask)
6639 mymasks.update(mysettings.archlist())
6640 mymasks.discard(mysettings["ARCH"])
6641 useforce.update(mysettings.useforce)
6642 useforce.difference_update(mymasks)
6644 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
6645 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
6646 except portage.exception.InvalidDependString, e:
6649 # Do the || conversions
6650 mysplit=portage.dep.dep_opconvert(mysplit)
6653 #dependencies were reduced to nothing
6656 # Recursively expand new-style virtuals so as to
6657 # collapse one or more levels of indirection.
6659 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
6660 use=use, mode=mode, myuse=myuse,
6661 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
6662 use_binaries=use_binaries, myroot=myroot, trees=trees)
6663 except portage.exception.ParseError, e:
6667 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
6668 if mysplit2 is None:
6669 return [0,"Invalid token"]
6671 writemsg("\n\n\n", 1)
6672 writemsg("mysplit: %s\n" % (mysplit), 1)
6673 writemsg("mysplit2: %s\n" % (mysplit2), 1)
6676 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
6677 use_binaries=use_binaries, trees=trees)
6678 except portage.exception.InvalidAtom, e:
6679 if portage.dep._dep_check_strict:
6680 raise # This shouldn't happen.
6681 # dbapi.match() failed due to an invalid atom in
6682 # the dependencies of an installed package.
6683 return [0, "Invalid atom: '%s'" % (e,)]
6685 mylist = flatten(myzaps)
6686 writemsg("myzaps: %s\n" % (myzaps), 1)
6687 writemsg("mylist: %s\n" % (mylist), 1)
6692 writemsg("mydict: %s\n" % (mydict), 1)
6693 return [1,mydict.keys()]
6695 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
6696 "Reduces the deplist to ones and zeros"
6697 deplist=mydeplist[:]
6698 for mypos, token in enumerate(deplist):
6699 if isinstance(deplist[mypos], list):
6701 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
6702 elif deplist[mypos]=="||":
6704 elif token[:1] == "!":
6705 deplist[mypos] = False
6707 mykey = dep_getkey(deplist[mypos])
6708 if mysettings and mykey in mysettings.pprovideddict and \
6709 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
6711 elif mydbapi is None:
6712 # Assume nothing is satisfied. This forces dep_zapdeps to
6713 # return all of deps the deps that have been selected
6714 # (excluding those satisfied by package.provided).
6715 deplist[mypos] = False
6718 x = mydbapi.xmatch(mode, deplist[mypos])
6719 if mode.startswith("minimum-"):
6726 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
6729 if deplist[mypos][0]=="!":
6733 #encountered invalid string
6737 def cpv_getkey(mycpv):
6738 myslash=mycpv.split("/")
6739 mysplit=pkgsplit(myslash[-1])
6742 return myslash[0]+"/"+mysplit[0]
6748 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
6749 mysplit=mykey.split("/")
6750 if settings is None:
6751 settings = globals()["settings"]
6752 virts = settings.getvirtuals("/")
6753 virts_p = settings.get_virts_p("/")
6755 if hasattr(mydb, "cp_list"):
6756 for x in mydb.categories:
6757 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
6759 if mykey in virts_p:
6760 return(virts_p[mykey][0])
6761 return "null/"+mykey
6763 if hasattr(mydb, "cp_list"):
6764 if not mydb.cp_list(mykey, use_cache=use_cache) and \
6765 virts and mykey in virts:
6766 return virts[mykey][0]
6769 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
6770 """Given a string (packagename or virtual) expand it into a valid
6771 cat/package string. Virtuals use the mydb to determine which provided
6772 virtual is a valid choice and defaults to the first element when there
6773 are no installed/available candidates."""
6774 myslash=mycpv.split("/")
6775 mysplit=pkgsplit(myslash[-1])
6776 if settings is None:
6777 settings = globals()["settings"]
6778 virts = settings.getvirtuals("/")
6779 virts_p = settings.get_virts_p("/")
6781 # this is illegal case.
6784 elif len(myslash)==2:
6786 mykey=myslash[0]+"/"+mysplit[0]
6789 if mydb and virts and mykey in virts:
6790 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
6791 if hasattr(mydb, "cp_list"):
6792 if not mydb.cp_list(mykey, use_cache=use_cache):
6793 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
6794 mykey_orig = mykey[:]
6795 for vkey in virts[mykey]:
6796 # The virtuals file can contain a versioned atom, so
6797 # it may be necessary to remove the operator and
6798 # version from the atom before it is passed into
6800 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
6802 writemsg("virts chosen: %s\n" % (mykey), 1)
6804 if mykey == mykey_orig:
6805 mykey=virts[mykey][0]
6806 writemsg("virts defaulted: %s\n" % (mykey), 1)
6807 #we only perform virtual expansion if we are passed a dbapi
6809 #specific cpv, no category, ie. "foo-1.0"
6817 if mydb and hasattr(mydb, "categories"):
6818 for x in mydb.categories:
6819 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
6820 matches.append(x+"/"+myp)
6821 if len(matches) > 1:
6822 virtual_name_collision = False
6823 if len(matches) == 2:
6825 if not x.startswith("virtual/"):
6826 # Assume that the non-virtual is desired. This helps
6827 # avoid the ValueError for invalid deps that come from
6828 # installed packages (during reverse blocker detection,
6832 virtual_name_collision = True
6833 if not virtual_name_collision:
6834 # AmbiguousPackageName inherits from ValueError,
6835 # for backward compatibility with calling code
6836 # that already handles ValueError.
6837 raise portage.exception.AmbiguousPackageName(matches)
6841 if not mykey and not isinstance(mydb, list):
6843 mykey=virts_p[myp][0]
6844 #again, we only perform virtual expansion if we have a dbapi (not a list)
6848 if mysplit[2]=="r0":
6849 return mykey+"-"+mysplit[1]
6851 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
6855 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
6856 from portage.util import grablines
6857 if settings is None:
6858 settings = globals()["settings"]
6860 portdb = globals()["portdb"]
6861 mysplit = catpkgsplit(mycpv)
6863 raise ValueError("invalid CPV: %s" % mycpv)
6864 if metadata is None:
6865 db_keys = list(portdb._aux_cache_keys)
6867 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6869 if not portdb.cpv_exists(mycpv):
6871 if metadata is None:
6872 # Can't access SLOT due to corruption.
6873 cpv_slot_list = [mycpv]
6875 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6876 mycp=mysplit[0]+"/"+mysplit[1]
6878 # XXX- This is a temporary duplicate of code from the config constructor.
6879 locations = [os.path.join(settings["PORTDIR"], "profiles")]
6880 locations.extend(settings.profiles)
6881 for ov in settings["PORTDIR_OVERLAY"].split():
6882 profdir = os.path.join(normalize_path(ov), "profiles")
6883 if os.path.isdir(profdir):
6884 locations.append(profdir)
6885 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
6886 USER_CONFIG_PATH.lstrip(os.path.sep)))
6888 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
6890 if mycp in settings.pmaskdict:
6891 for x in settings.pmaskdict[mycp]:
6892 if match_from_list(x, cpv_slot_list):
6896 for pmask in pmasklists:
6897 pmask_filename = os.path.join(pmask[0], "package.mask")
6898 for i in xrange(len(pmask[1])):
6899 l = pmask[1][i].strip()
6905 comment_valid = i + 1
6907 if comment_valid != i:
6910 return (comment, pmask_filename)
6913 elif comment_valid != -1:
6914 # Apparently this comment applies to muliple masks, so
6915 # it remains valid until a blank line is encountered.
6922 def getmaskingstatus(mycpv, settings=None, portdb=None):
6923 if settings is None:
6924 settings = config(clone=globals()["settings"])
6926 portdb = globals()["portdb"]
6930 if not isinstance(mycpv, basestring):
6931 # emerge passed in a Package instance
6934 metadata = pkg.metadata
6935 installed = pkg.installed
6937 mysplit = catpkgsplit(mycpv)
6939 raise ValueError("invalid CPV: %s" % mycpv)
6940 if metadata is None:
6941 db_keys = list(portdb._aux_cache_keys)
6943 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
6945 if not portdb.cpv_exists(mycpv):
6947 return ["corruption"]
6948 if "?" in metadata["LICENSE"]:
6949 settings.setcpv(mycpv, mydb=metadata)
6950 metadata["USE"] = settings["PORTAGE_USE"]
6952 metadata["USE"] = ""
6953 mycp=mysplit[0]+"/"+mysplit[1]
6958 if settings._getProfileMaskAtom(mycpv, metadata):
6959 rValue.append("profile")
6961 # package.mask checking
6962 if settings._getMaskAtom(mycpv, metadata):
6963 rValue.append("package.mask")
6966 eapi = metadata["EAPI"]
6967 mygroups = metadata["KEYWORDS"]
6968 licenses = metadata["LICENSE"]
6969 slot = metadata["SLOT"]
6970 if eapi.startswith("-"):
6972 if not eapi_is_supported(eapi):
6973 return ["EAPI %s" % eapi]
6974 elif _eapi_is_deprecated(eapi) and not installed:
6975 return ["EAPI %s" % eapi]
6976 egroups = settings.configdict["backupenv"].get(
6977 "ACCEPT_KEYWORDS", "").split()
6978 mygroups = mygroups.split()
6979 pgroups = settings["ACCEPT_KEYWORDS"].split()
6980 myarch = settings["ARCH"]
6981 if pgroups and myarch not in pgroups:
6982 """For operating systems other than Linux, ARCH is not necessarily a
6984 myarch = pgroups[0].lstrip("~")
6986 cp = dep_getkey(mycpv)
6987 pkgdict = settings.pkeywordsdict.get(cp)
6990 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
6991 for atom, pkgkeywords in pkgdict.iteritems():
6992 if match_from_list(atom, cpv_slot_list):
6994 pgroups.extend(pkgkeywords)
6995 if matches or egroups:
6996 pgroups.extend(egroups)
6999 if x.startswith("-"):
7003 inc_pgroups.discard(x[1:])
7006 pgroups = inc_pgroups
7011 for keyword in pgroups:
7012 if keyword in mygroups:
7021 elif gp=="-"+myarch and myarch in pgroups:
7024 elif gp=="~"+myarch and myarch in pgroups:
7029 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
7030 if missing_licenses:
7031 allowed_tokens = set(["||", "(", ")"])
7032 allowed_tokens.update(missing_licenses)
7033 license_split = licenses.split()
7034 license_split = [x for x in license_split \
7035 if x in allowed_tokens]
7036 msg = license_split[:]
7037 msg.append("license(s)")
7038 rValue.append(" ".join(msg))
7039 except portage.exception.InvalidDependString, e:
7040 rValue.append("LICENSE: "+str(e))
7042 # Only show KEYWORDS masks for installed packages
7043 # if they're not masked for any other reason.
7044 if kmask and (not installed or not rValue):
7045 rValue.append(kmask+" keyword")
7051 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
7052 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
7053 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
7054 'PDEPEND', 'PROVIDE', 'EAPI',
7055 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
7056 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
7058 auxdbkeylen=len(auxdbkeys)
7060 from portage.dbapi import dbapi
7061 from portage.dbapi.virtual import fakedbapi
7062 from portage.dbapi.bintree import bindbapi, binarytree
7063 from portage.dbapi.vartree import vardbapi, vartree, dblink
7064 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
7066 class FetchlistDict(UserDict.DictMixin):
7067 """This provide a mapping interface to retrieve fetch lists. It's used
7068 to allow portage.manifest.Manifest to access fetch lists via a standard
7069 mapping interface rather than use the dbapi directly."""
7070 def __init__(self, pkgdir, settings, mydbapi):
7071 """pkgdir is a directory containing ebuilds and settings is passed into
7072 portdbapi.getfetchlist for __getitem__ calls."""
7073 self.pkgdir = pkgdir
7074 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7075 self.settings = settings
7076 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7077 self.portdb = mydbapi
7078 def __getitem__(self, pkg_key):
7079 """Returns the complete fetch list for a given package."""
7080 return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
7081 def __contains__(self, cpv):
7082 return cpv in self.keys()
7083 def has_key(self, pkg_key):
7084 """Returns true if the given package exists within pkgdir."""
7085 return pkg_key in self
7087 """Returns keys for all packages within pkgdir"""
7088 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7090 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
7091 vartree=None, prev_mtimes=None, blockers=None):
7092 """will merge a .tbz2 file, returning a list of runtime dependencies
7093 that must be satisfied, or None if there was a merge error. This
7094 code assumes the package exists."""
7097 mydbapi = db[myroot]["bintree"].dbapi
7099 vartree = db[myroot]["vartree"]
7100 if mytbz2[-5:]!=".tbz2":
7101 print "!!! Not a .tbz2 file"
7107 did_merge_phase = False
7110 """ Don't lock the tbz2 file because the filesytem could be readonly or
7111 shared by a cluster."""
7112 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
7114 mypkg = os.path.basename(mytbz2)[:-5]
7115 xptbz2 = portage.xpak.tbz2(mytbz2)
7116 mycat = xptbz2.getfile("CATEGORY")
7118 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7121 mycat = mycat.strip()
7123 # These are the same directories that would be used at build time.
7124 builddir = os.path.join(
7125 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7126 catdir = os.path.dirname(builddir)
7127 pkgloc = os.path.join(builddir, "image")
7128 infloc = os.path.join(builddir, "build-info")
7129 myebuild = os.path.join(
7130 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7131 portage.util.ensure_dirs(os.path.dirname(catdir),
7132 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7133 catdir_lock = portage.locks.lockdir(catdir)
7134 portage.util.ensure_dirs(catdir,
7135 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7137 shutil.rmtree(builddir)
7138 except (IOError, OSError), e:
7139 if e.errno != errno.ENOENT:
7142 for mydir in (builddir, pkgloc, infloc):
7143 portage.util.ensure_dirs(mydir, uid=portage_uid,
7144 gid=portage_gid, mode=0755)
7145 writemsg_stdout(">>> Extracting info\n")
7146 xptbz2.unpackinfo(infloc)
7147 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
7148 # Store the md5sum in the vdb.
7149 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7150 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
7153 # This gives bashrc users an opportunity to do various things
7154 # such as remove binary packages after they're installed.
7155 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
7156 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
7157 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7159 # Eventually we'd like to pass in the saved ebuild env here.
7160 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7161 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7162 if retval != os.EX_OK:
7163 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7166 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7167 retval = portage.process.spawn_bash(
7168 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7169 env=mysettings.environ())
7170 if retval != os.EX_OK:
7171 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7173 #portage.locks.unlockfile(tbz2_lock)
7176 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7177 treetype="bintree", blockers=blockers)
7178 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7179 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7180 did_merge_phase = True
7181 success = retval == os.EX_OK
7184 mysettings.pop("PORTAGE_BINPKG_FILE", None)
7186 portage.locks.unlockfile(tbz2_lock)
7188 if not did_merge_phase:
7189 # The merge phase handles this already. Callers don't know how
7190 # far this function got, so we have to call elog_process() here
7191 # so that it's only called once.
7192 from portage.elog import elog_process
7193 elog_process(mycat + "/" + mypkg, mysettings)
7196 shutil.rmtree(builddir)
7197 except (IOError, OSError), e:
7198 if e.errno != errno.ENOENT:
7202 def deprecated_profile_check(settings=None):
7204 if settings is not None:
7205 config_root = settings["PORTAGE_CONFIGROOT"]
7206 deprecated_profile_file = os.path.join(config_root,
7207 DEPRECATED_PROFILE_FILE.lstrip(os.sep))
7208 if not os.access(deprecated_profile_file, os.R_OK):
7210 deprecatedfile = open(deprecated_profile_file, "r")
7211 dcontent = deprecatedfile.readlines()
7212 deprecatedfile.close()
7213 writemsg(colorize("BAD", "\n!!! Your current profile is " + \
7214 "deprecated and not supported anymore.") + "\n", noiselevel=-1)
7216 writemsg(colorize("BAD","!!! Please refer to the " + \
7217 "Gentoo Upgrading Guide.") + "\n", noiselevel=-1)
7219 newprofile = dcontent[0]
7220 writemsg(colorize("BAD", "!!! Please upgrade to the " + \
7221 "following profile if possible:") + "\n", noiselevel=-1)
7222 writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
7223 if len(dcontent) > 1:
7224 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7225 for myline in dcontent[1:]:
7226 writemsg(myline, noiselevel=-1)
7227 writemsg("\n\n", noiselevel=-1)
7230 # gets virtual package settings
7231 def getvirtuals(myroot):
7233 writemsg("--- DEPRECATED call to getvirtual\n")
7234 return settings.getvirtuals(myroot)
7236 def commit_mtimedb(mydict=None, filename=None):
7239 if "mtimedb" not in globals() or mtimedb is None:
7243 if filename is None:
7245 filename = mtimedbfile
7246 mydict["version"] = VERSION
7247 d = {} # for full backward compat, pickle it as a plain dict object.
7250 f = atomic_ofstream(filename)
7251 pickle.dump(d, f, -1)
7253 portage.util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
7254 except (IOError, OSError), e:
7258 global uid,portage_gid,portdb,db
7259 if secpass and os.environ.get("SANDBOX_ON") != "1":
7260 close_portdbapi_caches()
7263 atexit_register(portageexit)
7265 def _global_updates(trees, prev_mtimes):
7267 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7269 @param trees: A dictionary containing portage trees.
7271 @param prev_mtimes: A dictionary containing mtimes of files located in
7272 $PORTDIR/profiles/updates/.
7273 @type prev_mtimes: dict
7274 @rtype: None or List
7275 @return: None if no were no updates, otherwise a list of update commands
7276 that have been performed.
7278 # only do this if we're root and not running repoman/ebuild digest
7280 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7283 mysettings = trees["/"]["vartree"].settings
7284 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7287 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7288 update_data = grab_updates(updpath)
7290 update_data = grab_updates(updpath, prev_mtimes)
7291 except portage.exception.DirectoryNotFound:
7292 writemsg("--- 'profiles/updates' is empty or " + \
7293 "not available. Empty portage tree?\n", noiselevel=1)
7296 if len(update_data) > 0:
7297 do_upgrade_packagesmessage = 0
7300 for mykey, mystat, mycontent in update_data:
7301 writemsg_stdout("\n\n")
7302 writemsg_stdout(colorize("GOOD",
7303 "Performing Global Updates: ")+bold(mykey)+"\n")
7304 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7305 writemsg_stdout(" " + bold(".") + "='update pass' " + \
7306 bold("*") + "='binary update' " + bold("#") + \
7307 "='/var/db update' " + bold("@") + "='/var/db move'\n" + \
7308 " " + bold("s") + "='/var/db SLOT move' " + \
7309 bold("%") + "='binary move' " + bold("S") + \
7310 "='binary SLOT move'\n " + \
7311 bold("p") + "='update /etc/portage/package.*'\n")
7312 valid_updates, errors = parse_updates(mycontent)
7313 myupd.extend(valid_updates)
7314 writemsg_stdout(len(valid_updates) * "." + "\n")
7315 if len(errors) == 0:
7316 # Update our internal mtime since we
7317 # processed all of our directives.
7318 timestamps[mykey] = long(mystat.st_mtime)
7321 writemsg("%s\n" % msg, noiselevel=-1)
7323 world_file = os.path.join(root, WORLD_FILE)
7324 world_list = grabfile(world_file)
7325 world_modified = False
7326 for update_cmd in myupd:
7327 for pos, atom in enumerate(world_list):
7328 new_atom = update_dbentry(update_cmd, atom)
7329 if atom != new_atom:
7330 world_list[pos] = new_atom
7331 world_modified = True
7334 write_atomic(world_file,
7335 "".join("%s\n" % (x,) for x in world_list))
7337 update_config_files("/",
7338 mysettings.get("CONFIG_PROTECT","").split(),
7339 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7342 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7343 settings=mysettings)
7344 vardb = trees["/"]["vartree"].dbapi
7345 bindb = trees["/"]["bintree"].dbapi
7346 if not os.access(bindb.bintree.pkgdir, os.W_OK):
7348 for update_cmd in myupd:
7349 if update_cmd[0] == "move":
7350 moves = vardb.move_ent(update_cmd)
7352 writemsg_stdout(moves * "@")
7354 moves = bindb.move_ent(update_cmd)
7356 writemsg_stdout(moves * "%")
7357 elif update_cmd[0] == "slotmove":
7358 moves = vardb.move_slot_ent(update_cmd)
7360 writemsg_stdout(moves * "s")
7362 moves = bindb.move_slot_ent(update_cmd)
7364 writemsg_stdout(moves * "S")
7366 # The above global updates proceed quickly, so they
7367 # are considered a single mtimedb transaction.
7368 if len(timestamps) > 0:
7369 # We do not update the mtime in the mtimedb
7370 # until after _all_ of the above updates have
7371 # been processed because the mtimedb will
7372 # automatically commit when killed by ctrl C.
7373 for mykey, mtime in timestamps.iteritems():
7374 prev_mtimes[mykey] = mtime
7376 # We gotta do the brute force updates for these now.
7377 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7378 "fixpackages" in mysettings.features:
7379 def onUpdate(maxval, curval):
7381 writemsg_stdout("#")
7382 vardb.update_ents(myupd, onUpdate=onUpdate)
7384 def onUpdate(maxval, curval):
7386 writemsg_stdout("*")
7387 bindb.update_ents(myupd, onUpdate=onUpdate)
7389 do_upgrade_packagesmessage = 1
7391 # Update progress above is indicated by characters written to stdout so
7392 # we print a couple new lines here to separate the progress output from
7397 if do_upgrade_packagesmessage and bindb and \
7399 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7400 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7401 writemsg_stdout("\n")
7405 #continue setting up other trees
7407 class MtimeDB(dict):
7408 def __init__(self, filename):
7410 self.filename = filename
7411 self._load(filename)
7413 def _load(self, filename):
7416 mypickle = pickle.Unpickler(f)
7417 mypickle.find_global = None
7421 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
7422 if isinstance(e, pickle.UnpicklingError):
7423 writemsg("!!! Error loading '%s': %s\n" % \
7424 (filename, str(e)), noiselevel=-1)
7429 d["updates"] = d["old"]
7434 d.setdefault("starttime", 0)
7435 d.setdefault("version", "")
7436 for k in ("info", "ldpath", "updates"):
7439 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7440 "starttime", "updates", "version"))
7443 if k not in mtimedbkeys:
7444 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7447 self._clean_data = copy.deepcopy(d)
7450 if not self.filename:
7454 # Only commit if the internal state has changed.
7455 if d != self._clean_data:
7456 commit_mtimedb(mydict=d, filename=self.filename)
7457 self._clean_data = copy.deepcopy(d)
7459 def create_trees(config_root=None, target_root=None, trees=None):
7463 # clean up any existing portdbapi instances
7464 for myroot in trees:
7465 portdb = trees[myroot]["porttree"].dbapi
7466 portdb.close_caches()
7467 portdbapi.portdbapi_instances.remove(portdb)
7468 del trees[myroot]["porttree"], myroot, portdb
7470 settings = config(config_root=config_root, target_root=target_root,
7471 config_incrementals=portage.const.INCREMENTALS)
7474 myroots = [(settings["ROOT"], settings)]
7475 if settings["ROOT"] != "/":
7476 settings = config(config_root=None, target_root="/",
7477 config_incrementals=portage.const.INCREMENTALS)
7478 # When ROOT != "/" we only want overrides from the calling
7479 # environment to apply to the config that's associated
7480 # with ROOT != "/", so we wipe out the "backupenv" for the
7481 # config that is associated with ROOT == "/" and regenerate
7482 # it's incrementals.
7483 # Preserve backupenv values that are initialized in the config
7484 # constructor. Also, preserve XARGS since it is set by the
7485 # portage.data module.
7487 backupenv_whitelist = settings._environ_whitelist
7488 backupenv = settings.configdict["backupenv"]
7489 env_d = settings.configdict["env.d"]
7490 for k, v in os.environ.iteritems():
7491 if k in backupenv_whitelist:
7494 v == backupenv.get(k):
7495 backupenv.pop(k, None)
7496 settings.regenerate()
7498 myroots.append((settings["ROOT"], settings))
7500 for myroot, mysettings in myroots:
7501 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, None))
7502 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
7503 trees[myroot].addLazySingleton(
7504 "vartree", vartree, myroot, categories=mysettings.categories,
7505 settings=mysettings)
7506 trees[myroot].addLazySingleton("porttree",
7507 portagetree, myroot, settings=mysettings)
7508 trees[myroot].addLazySingleton("bintree",
7509 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
7512 class _LegacyGlobalProxy(portage.util.ObjectProxy):
7514 Instances of these serve as proxies to global variables
7515 that are initialized on demand.
7517 def __init__(self, name):
7518 portage.util.ObjectProxy.__init__(self)
7519 object.__setattr__(self, '_name', name)
7521 def _get_target(self):
7522 init_legacy_globals()
7523 name = object.__getattribute__(self, '_name')
7524 return globals()[name]
7526 class _PortdbProxy(portage.util.ObjectProxy):
7528 The portdb is initialized separately from the rest
7529 of the variables, since sometimes the other variables
7530 are needed while the portdb is not.
7533 def _get_target(self):
7534 init_legacy_globals()
7535 global db, portdb, root, _portdb_initialized
7536 if not _portdb_initialized:
7537 portdb = db[root]["porttree"].dbapi
7538 _portdb_initialized = True
7541 class _MtimedbProxy(portage.util.ObjectProxy):
7543 The mtimedb is independent from the portdb and other globals.
7546 def __init__(self, name):
7547 portage.util.ObjectProxy.__init__(self)
7548 object.__setattr__(self, '_name', name)
7550 def _get_target(self):
7551 global mtimedb, mtimedbfile, _mtimedb_initialized
7552 if not _mtimedb_initialized:
7553 mtimedbfile = os.path.join("/",
7554 CACHE_PATH.lstrip(os.path.sep), "mtimedb")
7555 mtimedb = MtimeDB(mtimedbfile)
7556 _mtimedb_initialized = True
7557 name = object.__getattribute__(self, '_name')
7558 return globals()[name]
7560 _legacy_global_var_names = ("archlist", "db", "features",
7561 "groups", "mtimedb", "mtimedbfile", "pkglines",
7562 "portdb", "profiledir", "root", "selinux_enabled",
7563 "settings", "thirdpartymirrors", "usedefaults")
7565 def _disable_legacy_globals():
7567 This deletes the ObjectProxy instances that are used
7568 for lazy initialization of legacy global variables.
7569 The purpose of deleting them is to prevent new code
7570 from referencing these deprecated variables.
7572 global _legacy_global_var_names
7573 for k in _legacy_global_var_names:
7574 globals().pop(k, None)
7576 # Initialization of legacy globals. No functions/classes below this point
7577 # please! When the above functions and classes become independent of the
7578 # below global variables, it will be possible to make the below code
7579 # conditional on a backward compatibility flag (backward compatibility could
7580 # be disabled via an environment variable, for example). This will enable new
7581 # code that is aware of this flag to import portage without the unnecessary
7582 # overhead (and other issues!) of initializing the legacy globals.
7584 def init_legacy_globals():
7585 global _globals_initialized
7586 if _globals_initialized:
7588 _globals_initialized = True
7590 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
7591 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
7592 profiledir, flushmtimedb
7594 # Portage needs to ensure a sane umask for the files it creates.
7598 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
7599 kwargs[k] = os.environ.get(envvar, "/")
7601 global _initializing_globals
7602 _initializing_globals = True
7603 db = create_trees(**kwargs)
7604 del _initializing_globals
7606 settings = db["/"]["vartree"].settings
7610 settings = db[myroot]["vartree"].settings
7613 root = settings["ROOT"]
7616 # ========================================================================
7618 # These attributes should not be used
7619 # within Portage under any circumstances.
7620 # ========================================================================
7621 archlist = settings.archlist()
7622 features = settings.features
7623 groups = settings["ACCEPT_KEYWORDS"].split()
7624 pkglines = settings.packages
7625 selinux_enabled = settings.selinux_enabled()
7626 thirdpartymirrors = settings.thirdpartymirrors()
7627 usedefaults = settings.use_defs
7629 if os.path.isdir(PROFILE_PATH):
7630 profiledir = PROFILE_PATH
7631 def flushmtimedb(record):
7632 writemsg("portage.flushmtimedb() is DEPRECATED\n")
7633 # ========================================================================
7635 # These attributes should not be used
7636 # within Portage under any circumstances.
7637 # ========================================================================
7641 _mtimedb_initialized = False
7642 mtimedb = _MtimedbProxy("mtimedb")
7643 mtimedbfile = _MtimedbProxy("mtimedbfile")
7645 _portdb_initialized = False
7646 portdb = _PortdbProxy()
7648 _globals_initialized = False
7650 for k in ("db", "settings", "root", "selinux_enabled",
7651 "archlist", "features", "groups",
7652 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
7654 globals()[k] = _LegacyGlobalProxy(k)
7659 # ============================================================================
7660 # ============================================================================