1 # portage.py -- core Portage functionality
2 # Copyright 1998-2009 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
24 import cPickle as pickle
30 from time import sleep
31 from random import shuffle
32 from itertools import chain, izip
35 except ImportError, e:
36 sys.stderr.write("\n\n")
37 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
38 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
39 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
41 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
42 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
43 sys.stderr.write(" "+str(e)+"\n\n");
47 if platform.system() in ["FreeBSD"]:
50 def _chflags(path, flags, opts=""):
51 cmd = "chflags %s %o '%s'" % (opts, flags, path)
52 status, output = commands.getstatusoutput(cmd)
53 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
55 # Try to generate an ENOENT error if appropriate.
60 # Make sure the binary exists.
61 if not portage.process.find_binary("chflags"):
62 raise portage.exception.CommandNotFound("chflags")
63 # Now we're not sure exactly why it failed or what
64 # the real errno was, so just report EPERM.
65 e = OSError(errno.EPERM, output)
70 def _lchflags(path, flags):
71 return _chflags(path, flags, opts="-h")
72 bsd_chflags.chflags = _chflags
73 bsd_chflags.lchflags = _lchflags
76 from portage.cache.cache_errors import CacheError
77 import portage.proxy.lazyimport
78 import portage.proxy as proxy
79 proxy.lazyimport.lazyimport(globals(),
81 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
84 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
85 'get_operator,isjustname,isspecific,isvalidatom,' + \
86 'match_from_list,match_to_list',
87 'portage.eclass_cache',
90 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
92 'portage.output:bold,colorize',
94 'portage.process:atexit_register,run_exitfuncs',
95 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
96 'parse_updates,update_config_files,update_dbentries,' + \
99 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
100 'apply_recursive_permissions,dump_traceback,getconfig,' + \
101 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
102 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
103 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
104 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
105 'writemsg_stdout,write_atomic',
107 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \
108 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
113 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
114 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
115 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
116 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
117 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
118 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
119 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
120 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
122 from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \
123 portage_uid, portage_gid, userpriv_groups
124 from portage.manifest import Manifest
125 import portage.exception
126 from portage.localization import _
128 except ImportError, e:
129 sys.stderr.write("\n\n")
130 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
131 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
132 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
133 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
134 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
135 sys.stderr.write("!!! a recovery of portage.\n")
136 sys.stderr.write(" "+str(e)+"\n\n")
141 import portage._selinux as selinux
143 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
148 # ===========================================================================
149 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
150 # ===========================================================================
154 modname = ".".join(name.split(".")[:-1])
155 mod = __import__(modname)
156 components = name.split('.')
157 for comp in components[1:]:
158 mod = getattr(mod, comp)
161 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
163 if x in top_dict and key in top_dict[x]:
165 return copy.deepcopy(top_dict[x][key])
167 return top_dict[x][key]
171 raise KeyError("Key not found in list; '%s'" % key)
174 "this fixes situations where the current directory doesn't exist"
177 except OSError: #dir doesn't exist
182 def abssymlink(symlink):
183 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
184 mylink=os.readlink(symlink)
186 mydir=os.path.dirname(symlink)
187 mylink=mydir+"/"+mylink
188 return os.path.normpath(mylink)
194 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
195 global cacheHit,cacheMiss,cacheStale
196 mypath = normalize_path(my_original_path)
197 if mypath in dircache:
199 cached_mtime, list, ftype = dircache[mypath]
202 cached_mtime, list, ftype = -1, [], []
204 pathstat = os.stat(mypath)
205 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
206 mtime = pathstat.st_mtime
208 raise portage.exception.DirectoryNotFound(mypath)
209 except EnvironmentError, e:
210 if e.errno == portage.exception.PermissionDenied.errno:
211 raise portage.exception.PermissionDenied(mypath)
216 except portage.exception.PortageException:
220 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
221 if mtime != cached_mtime or time.time() - mtime < 4:
222 if mypath in dircache:
225 list = os.listdir(mypath)
226 except EnvironmentError, e:
227 if e.errno != errno.EACCES:
230 raise portage.exception.PermissionDenied(mypath)
235 pathstat = os.stat(mypath+"/"+x)
237 pathstat = os.lstat(mypath+"/"+x)
239 if stat.S_ISREG(pathstat[stat.ST_MODE]):
241 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
243 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
247 except (IOError, OSError):
249 dircache[mypath] = mtime, list, ftype
253 for x in range(0, len(list)):
254 if list[x] in ignorelist:
257 if list[x][:2] != ".#":
258 ret_list.append(list[x])
259 ret_ftype.append(ftype[x])
261 ret_list.append(list[x])
262 ret_ftype.append(ftype[x])
264 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
265 return ret_list, ret_ftype
267 _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git')
269 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
270 EmptyOnError=False, dirsonly=False):
272 Portage-specific implementation of os.listdir
274 @param mypath: Path whose contents you wish to list
276 @param recursive: Recursively scan directories contained within mypath
277 @type recursive: Boolean
278 @param filesonly; Only return files, not more directories
279 @type filesonly: Boolean
280 @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
281 @type ignorecvs: Boolean
282 @param ignorelist: List of filenames/directories to exclude
283 @type ignorelist: List
284 @param followSymlinks: Follow Symlink'd files and directories
285 @type followSymlinks: Boolean
286 @param EmptyOnError: Return [] if an error occurs.
287 @type EmptyOnError: Boolean
288 @param dirsonly: Only return directories.
289 @type dirsonly: Boolean
291 @returns: A list of files and directories (or just files or just directories) or an empty list.
294 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
301 if not (filesonly or dirsonly or recursive):
307 if ftype[x] == 1 and not \
308 (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs):
309 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
313 for y in range(0,len(l)):
314 l[y]=list[x]+"/"+l[y]
320 for x in range(0,len(ftype)):
322 rlist=rlist+[list[x]]
325 for x in range(0, len(ftype)):
327 rlist = rlist + [list[x]]
333 def flatten(mytokens):
334 """this function now turns a [1,[2,3]] list into
335 a [1,2,3] list and returns it."""
338 if isinstance(x, list):
339 newlist.extend(flatten(x))
344 #beautiful directed graph object
346 class digraph(object):
348 """Create an empty digraph"""
350 # { node : ( { child : priority } , { parent : priority } ) }
354 def add(self, node, parent, priority=0):
355 """Adds the specified node with the specified parent.
357 If the dep is a soft-dep and the node already has a hard
358 relationship to the parent, the relationship is left as hard."""
360 if node not in self.nodes:
361 self.nodes[node] = ({}, {}, node)
362 self.order.append(node)
367 if parent not in self.nodes:
368 self.nodes[parent] = ({}, {}, parent)
369 self.order.append(parent)
371 priorities = self.nodes[node][1].get(parent)
372 if priorities is None:
374 self.nodes[node][1][parent] = priorities
375 self.nodes[parent][0][node] = priorities
376 priorities.append(priority)
379 def remove(self, node):
380 """Removes the specified node from the digraph, also removing
381 and ties to other nodes in the digraph. Raises KeyError if the
382 node doesn't exist."""
384 if node not in self.nodes:
387 for parent in self.nodes[node][1]:
388 del self.nodes[parent][0][node]
389 for child in self.nodes[node][0]:
390 del self.nodes[child][1][node]
393 self.order.remove(node)
395 def difference_update(self, t):
397 Remove all given nodes from node_set. This is more efficient
398 than multiple calls to the remove() method.
400 if isinstance(t, (list, tuple)) or \
401 not hasattr(t, "__contains__"):
404 for node in self.order:
408 for parent in self.nodes[node][1]:
409 del self.nodes[parent][0][node]
410 for child in self.nodes[node][0]:
411 del self.nodes[child][1][node]
415 def remove_edge(self, child, parent):
417 Remove edge in the direction from child to parent. Note that it is
418 possible for a remaining edge to exist in the opposite direction.
419 Any endpoint vertices that become isolated will remain in the graph.
422 # Nothing should be modified when a KeyError is raised.
423 for k in parent, child:
424 if k not in self.nodes:
427 # Make sure the edge exists.
428 if child not in self.nodes[parent][0]:
429 raise KeyError(child)
430 if parent not in self.nodes[child][1]:
431 raise KeyError(parent)
434 del self.nodes[child][1][parent]
435 del self.nodes[parent][0][child]
438 return iter(self.order)
440 def contains(self, node):
441 """Checks if the digraph contains mynode"""
442 return node in self.nodes
444 def get(self, key, default=None):
445 node_data = self.nodes.get(key, self)
446 if node_data is self:
451 """Return a list of all nodes in the graph"""
454 def child_nodes(self, node, ignore_priority=None):
455 """Return all children of the specified node"""
456 if ignore_priority is None:
457 return list(self.nodes[node][0])
459 if hasattr(ignore_priority, '__call__'):
460 for child, priorities in self.nodes[node][0].iteritems():
461 for priority in priorities:
462 if not ignore_priority(priority):
463 children.append(child)
466 for child, priorities in self.nodes[node][0].iteritems():
467 if ignore_priority < priorities[-1]:
468 children.append(child)
471 def parent_nodes(self, node, ignore_priority=None):
472 """Return all parents of the specified node"""
473 if ignore_priority is None:
474 return list(self.nodes[node][1])
476 if hasattr(ignore_priority, '__call__'):
477 for parent, priorities in self.nodes[node][1].iteritems():
478 for priority in priorities:
479 if not ignore_priority(priority):
480 parents.append(parent)
483 for parent, priorities in self.nodes[node][1].iteritems():
484 if ignore_priority < priorities[-1]:
485 parents.append(parent)
488 def leaf_nodes(self, ignore_priority=None):
489 """Return all nodes that have no children
491 If ignore_soft_deps is True, soft deps are not counted as
492 children in calculations."""
495 if ignore_priority is None:
496 for node in self.order:
497 if not self.nodes[node][0]:
498 leaf_nodes.append(node)
499 elif hasattr(ignore_priority, '__call__'):
500 for node in self.order:
502 for child, priorities in self.nodes[node][0].iteritems():
503 for priority in priorities:
504 if not ignore_priority(priority):
510 leaf_nodes.append(node)
512 for node in self.order:
514 for child, priorities in self.nodes[node][0].iteritems():
515 if ignore_priority < priorities[-1]:
519 leaf_nodes.append(node)
522 def root_nodes(self, ignore_priority=None):
523 """Return all nodes that have no parents.
525 If ignore_soft_deps is True, soft deps are not counted as
526 parents in calculations."""
529 if ignore_priority is None:
530 for node in self.order:
531 if not self.nodes[node][1]:
532 root_nodes.append(node)
533 elif hasattr(ignore_priority, '__call__'):
534 for node in self.order:
536 for parent, priorities in self.nodes[node][1].iteritems():
537 for priority in priorities:
538 if not ignore_priority(priority):
544 root_nodes.append(node)
546 for node in self.order:
548 for parent, priorities in self.nodes[node][1].iteritems():
549 if ignore_priority < priorities[-1]:
553 root_nodes.append(node)
557 """Checks if the digraph is empty"""
558 return len(self.nodes) == 0
564 for children, parents, node in self.nodes.itervalues():
566 for child, priorities in children.iteritems():
567 priorities_clone = memo.get(id(priorities))
568 if priorities_clone is None:
569 priorities_clone = priorities[:]
570 memo[id(priorities)] = priorities_clone
571 children_clone[child] = priorities_clone
573 for parent, priorities in parents.iteritems():
574 priorities_clone = memo.get(id(priorities))
575 if priorities_clone is None:
576 priorities_clone = priorities[:]
577 memo[id(priorities)] = priorities_clone
578 parents_clone[parent] = priorities_clone
579 clone.nodes[node] = (children_clone, parents_clone, node)
580 clone.order = self.order[:]
583 # Backward compatibility
586 allzeros = leaf_nodes
588 __contains__ = contains
592 def delnode(self, node):
599 leaf_nodes = self.leaf_nodes()
604 def hasallzeros(self, ignore_priority=None):
605 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
608 def debug_print(self):
610 writemsg(s, noiselevel=-1)
611 for node in self.nodes:
612 output("%s " % (node,))
613 if self.nodes[node][0]:
614 output("depends on\n")
616 output("(no children)\n")
617 for child, priorities in self.nodes[node][0].iteritems():
618 output(" %s (%s)\n" % (child, priorities[-1],))
620 #parse /etc/env.d and generate /etc/profile.env
622 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
623 env=None, writemsg_level=None):
624 if writemsg_level is None:
625 writemsg_level = portage.util.writemsg_level
626 if target_root is None:
628 target_root = settings["ROOT"]
629 if prev_mtimes is None:
631 prev_mtimes = mtimedb["ldpath"]
634 envd_dir = os.path.join(target_root, "etc", "env.d")
635 portage.util.ensure_dirs(envd_dir, mode=0755)
636 fns = listdir(envd_dir, EmptyOnError=1)
642 if not x[0].isdigit() or not x[1].isdigit():
644 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
650 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
651 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
652 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
653 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
654 "PYTHONPATH", "ROOTPATH"])
659 file_path = os.path.join(envd_dir, x)
661 myconfig = getconfig(file_path, expand=False)
662 except portage.exception.ParseError, e:
663 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
667 # broken symlink or file removed by a concurrent process
668 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
670 config_list.append(myconfig)
671 if "SPACE_SEPARATED" in myconfig:
672 space_separated.update(myconfig["SPACE_SEPARATED"].split())
673 del myconfig["SPACE_SEPARATED"]
674 if "COLON_SEPARATED" in myconfig:
675 colon_separated.update(myconfig["COLON_SEPARATED"].split())
676 del myconfig["COLON_SEPARATED"]
680 for var in space_separated:
682 for myconfig in config_list:
684 for item in myconfig[var].split():
685 if item and not item in mylist:
687 del myconfig[var] # prepare for env.update(myconfig)
689 env[var] = " ".join(mylist)
690 specials[var] = mylist
692 for var in colon_separated:
694 for myconfig in config_list:
696 for item in myconfig[var].split(":"):
697 if item and not item in mylist:
699 del myconfig[var] # prepare for env.update(myconfig)
701 env[var] = ":".join(mylist)
702 specials[var] = mylist
704 for myconfig in config_list:
705 """Cumulative variables have already been deleted from myconfig so that
706 they won't be overwritten by this dict.update call."""
709 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
711 myld = open(ldsoconf_path)
712 myldlines=myld.readlines()
716 #each line has at least one char (a newline)
720 except (IOError, OSError), e:
721 if e.errno != errno.ENOENT:
725 ld_cache_update=False
727 newld = specials["LDPATH"]
729 #ld.so.conf needs updating and ldconfig needs to be run
730 myfd = atomic_ofstream(ldsoconf_path)
731 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
732 myfd.write("# contents of /etc/env.d directory\n")
733 for x in specials["LDPATH"]:
738 # Update prelink.conf if we are prelink-enabled
740 newprelink = atomic_ofstream(
741 os.path.join(target_root, "etc", "prelink.conf"))
742 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
743 newprelink.write("# contents of /etc/env.d directory\n")
745 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
746 newprelink.write("-l "+x+"\n");
747 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
753 for y in specials["PRELINK_PATH_MASK"]:
762 newprelink.write("-h "+x+"\n")
763 for x in specials["PRELINK_PATH_MASK"]:
764 newprelink.write("-b "+x+"\n")
767 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
768 # granularity is possible. In order to avoid the potential ambiguity of
769 # mtimes that differ by less than 1 second, sleep here if any of the
770 # directories have been modified during the current second.
771 sleep_for_mtime_granularity = False
772 current_time = long(time.time())
773 mtime_changed = False
775 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
776 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
778 newldpathtime = long(os.stat(x).st_mtime)
779 lib_dirs.add(normalize_path(x))
781 if oe.errno == errno.ENOENT:
786 # ignore this path because it doesn't exist
789 if newldpathtime == current_time:
790 sleep_for_mtime_granularity = True
792 if prev_mtimes[x] == newldpathtime:
795 prev_mtimes[x] = newldpathtime
798 prev_mtimes[x] = newldpathtime
802 ld_cache_update = True
805 not ld_cache_update and \
806 contents is not None:
807 libdir_contents_changed = False
808 for mypath, mydata in contents.iteritems():
809 if mydata[0] not in ("obj","sym"):
811 head, tail = os.path.split(mypath)
813 libdir_contents_changed = True
815 if not libdir_contents_changed:
818 ldconfig = "/sbin/ldconfig"
819 if "CHOST" in env and "CBUILD" in env and \
820 env["CHOST"] != env["CBUILD"]:
821 from portage.process import find_binary
822 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
824 # Only run ldconfig as needed
825 if (ld_cache_update or makelinks) and ldconfig:
826 # ldconfig has very different behaviour between FreeBSD and Linux
827 if ostype=="Linux" or ostype.lower().endswith("gnu"):
828 # We can't update links if we haven't cleaned other versions first, as
829 # an older package installed ON TOP of a newer version will cause ldconfig
830 # to overwrite the symlinks we just made. -X means no links. After 'clean'
831 # we can safely create links.
832 writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \
835 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
837 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
838 elif ostype in ("FreeBSD","DragonFly"):
839 writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \
841 os.system(("cd / ; %s -elf -i " + \
842 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
843 (ldconfig, target_root, target_root))
845 del specials["LDPATH"]
847 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
848 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
849 cenvnotice = penvnotice[:]
850 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
851 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
853 #create /etc/profile.env for bash support
854 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
855 outfile.write(penvnotice)
857 env_keys = [ x for x in env if x != "LDPATH" ]
861 if v.startswith('$') and not v.startswith('${'):
862 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
864 outfile.write("export %s='%s'\n" % (k, v))
867 #create /etc/csh.env for (t)csh support
868 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
869 outfile.write(cenvnotice)
871 outfile.write("setenv %s '%s'\n" % (x, env[x]))
874 if sleep_for_mtime_granularity:
875 while current_time == long(time.time()):
878 def ExtractKernelVersion(base_dir):
880 Try to figure out what kernel version we are running
881 @param base_dir: Path to sources (usually /usr/src/linux)
882 @type base_dir: string
883 @rtype: tuple( version[string], error[string])
885 1. tuple( version[string], error[string])
886 Either version or error is populated (but never both)
890 pathname = os.path.join(base_dir, 'Makefile')
892 f = open(pathname, 'r')
893 except OSError, details:
894 return (None, str(details))
895 except IOError, details:
896 return (None, str(details))
900 lines.append(f.readline())
901 except OSError, details:
902 return (None, str(details))
903 except IOError, details:
904 return (None, str(details))
906 lines = [l.strip() for l in lines]
910 #XXX: The following code relies on the ordering of vars within the Makefile
912 # split on the '=' then remove annoying whitespace
913 items = line.split("=")
914 items = [i.strip() for i in items]
915 if items[0] == 'VERSION' or \
916 items[0] == 'PATCHLEVEL':
919 elif items[0] == 'SUBLEVEL':
921 elif items[0] == 'EXTRAVERSION' and \
922 items[-1] != items[0]:
925 # Grab a list of files named localversion* and sort them
926 localversions = os.listdir(base_dir)
927 for x in range(len(localversions)-1,-1,-1):
928 if localversions[x][:12] != "localversion":
932 # Append the contents of each to the version string, stripping ALL whitespace
933 for lv in localversions:
934 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
936 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
937 kernelconfig = getconfig(base_dir+"/.config")
938 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
939 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
941 return (version,None)
943 def autouse(myvartree, use_cache=1, mysettings=None):
945 autuse returns a list of USE variables auto-enabled to packages being installed
947 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
948 @type myvartree: vartree
949 @param use_cache: read values from cache
950 @type use_cache: Boolean
951 @param mysettings: Instance of config
952 @type mysettings: config
954 @returns: A string containing a list of USE variables that are enabled via use.defaults
956 if mysettings is None:
958 mysettings = settings
959 if mysettings.profile_path is None:
962 usedefaults = mysettings.use_defs
963 for myuse in usedefaults:
965 for mydep in usedefaults[myuse]:
966 if not myvartree.dep_match(mydep,use_cache=True):
970 myusevars += " "+myuse
973 def check_config_instance(test):
974 if not isinstance(test, config):
975 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
977 def _lazy_iuse_regex(iuse_implicit):
979 The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
980 and the value is only used when an ebuild phase needs to be executed
981 (it's used only to generate QA notices).
983 # Escape anything except ".*" which is supposed to pass through from
984 # _get_implicit_iuse().
985 regex = sorted(re.escape(x) for x in iuse_implicit)
986 regex = "^(%s)$" % "|".join(regex)
987 regex = regex.replace("\\.\\*", ".*")
990 class config(object):
992 This class encompasses the main portage configuration. Data is pulled from
993 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
994 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
997 Generally if you need data like USE flags, FEATURES, environment variables,
998 virtuals ...etc you look in here.
1002 "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
1003 "EBUILD_PHASE", "EMERGE_FROM", "HOMEPAGE", "INHERITED", "IUSE",
1004 "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
1005 "PORTAGE_CONFIGROOT", "PORTAGE_IUSE", "PORTAGE_REPO_NAME",
1006 "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
1007 "ROOT", "SLOT", "SRC_URI"
1010 _environ_whitelist = []
1012 # Whitelisted variables are always allowed to enter the ebuild
1013 # environment. Generally, this only includes special portage
1014 # variables. Ebuilds can unset variables that are not whitelisted
1015 # and rely on them remaining unset for future phases, without them
1016 # leaking back in from various locations (bug #189417). It's very
1017 # important to set our special BASH_ENV variable in the ebuild
1018 # environment in order to prevent sandbox from sourcing /etc/profile
1019 # in it's bashrc (causing major leakage).
1020 _environ_whitelist += [
1021 "BASH_ENV", "BUILD_PREFIX", "D",
1022 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
1023 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
1024 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
1025 "FEATURES", "FILESDIR", "HOME", "PATH",
1027 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
1028 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
1030 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
1031 "PORTAGE_BINPKG_TMPFILE",
1033 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
1034 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
1035 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
1037 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
1038 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
1039 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
1040 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
1041 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
1042 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
1043 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
1044 "USE_EXPAND", "USE_ORDER", "WORKDIR",
1048 # user config variables
1049 _environ_whitelist += [
1050 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
1053 _environ_whitelist += [
1054 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
1057 # misc variables inherited from the calling environment
1058 _environ_whitelist += [
1059 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
1060 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
1061 "TERM", "TERMCAP", "USER",
1064 # other variables inherited from the calling environment
1065 _environ_whitelist += [
1066 "CVS_RSH", "ECHANGELOG_USER",
1068 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
1069 "STY", "WINDOW", "XAUTHORITY",
1072 _environ_whitelist = frozenset(_environ_whitelist)
1074 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
1076 # Filter selected variables in the config.environ() method so that
1077 # they don't needlessly propagate down into the ebuild environment.
1078 _environ_filter = []
1080 # misc variables inherited from the calling environment
1081 _environ_filter += [
1082 "INFOPATH", "MANPATH",
1085 # variables that break bash
1086 _environ_filter += [
1090 # portage config variables and variables set directly by portage
1091 _environ_filter += [
1092 "ACCEPT_KEYWORDS", "AUTOCLEAN",
1093 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1094 "CONFIG_PROTECT_MASK", "EMERGE_DEFAULT_OPTS",
1095 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1096 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1097 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1098 "PORTAGE_BACKGROUND",
1099 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1100 "PORTAGE_COUNTER_HASH",
1101 "PORTAGE_ECLASS_WARNING_ENABLE", "PORTAGE_ELOG_CLASSES",
1102 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1103 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1104 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1106 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1107 "PORTAGE_PACKAGE_EMPTY_ABORT",
1108 "PORTAGE_RO_DISTDIRS",
1109 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1110 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1111 "QUICKPKG_DEFAULT_OPTS",
1112 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1113 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1116 _environ_filter = frozenset(_environ_filter)
1118 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1119 config_incrementals=None, config_root=None, target_root=None,
1120 local_config=True, env=None):
1122 @param clone: If provided, init will use deepcopy to copy by value the instance.
1123 @type clone: Instance of config class.
1124 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1125 and then calling instance.setcpv(mycpv).
1127 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1128 @type config_profile_path: String
1129 @param config_incrementals: List of incremental variables
1130 (defaults to portage.const.INCREMENTALS)
1131 @type config_incrementals: List
1132 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1133 @type config_root: String
1134 @param target_root: __init__ override of $ROOT env variable.
1135 @type target_root: String
1136 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1137 ignore local config (keywording and unmasking)
1138 @type local_config: Boolean
1139 @param env: The calling environment which is used to override settings.
1140 Defaults to os.environ if unspecified.
1144 # When initializing the global portage.settings instance, avoid
1145 # raising exceptions whenever possible since exceptions thrown
1146 # from 'import portage' or 'import portage.exceptions' statements
1147 # can practically render the api unusable for api consumers.
1148 tolerant = "_initializing_globals" in globals()
1150 self.already_in_regenerate = 0
1155 self.modifiedkeys = []
1157 self._accept_chost_re = None
1161 self.dirVirtuals = None
1164 # Virtuals obtained from the vartree
1165 self.treeVirtuals = {}
1166 # Virtuals by user specification. Includes negatives.
1167 self.userVirtuals = {}
1168 # Virtual negatives from user specifications.
1169 self.negVirtuals = {}
1170 # Virtuals added by the depgraph via self.setinst().
1171 self._depgraphVirtuals = {}
1173 self.user_profile_dir = None
1174 self.local_config = local_config
1177 self.incrementals = copy.deepcopy(clone.incrementals)
1178 self.profile_path = copy.deepcopy(clone.profile_path)
1179 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1180 self.local_config = copy.deepcopy(clone.local_config)
1182 self.module_priority = copy.deepcopy(clone.module_priority)
1183 self.modules = copy.deepcopy(clone.modules)
1185 self.depcachedir = copy.deepcopy(clone.depcachedir)
1187 self.packages = copy.deepcopy(clone.packages)
1188 self.virtuals = copy.deepcopy(clone.virtuals)
1190 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1191 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1192 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1193 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1194 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1196 self.use_defs = copy.deepcopy(clone.use_defs)
1197 self.usemask = copy.deepcopy(clone.usemask)
1198 self.usemask_list = copy.deepcopy(clone.usemask_list)
1199 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1200 self.useforce = copy.deepcopy(clone.useforce)
1201 self.useforce_list = copy.deepcopy(clone.useforce_list)
1202 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1203 self.puse = copy.deepcopy(clone.puse)
1204 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1205 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1206 self.mycpv = copy.deepcopy(clone.mycpv)
1208 self.configlist = copy.deepcopy(clone.configlist)
1209 self.lookuplist = self.configlist[:]
1210 self.lookuplist.reverse()
1212 "env.d": self.configlist[0],
1213 "pkginternal": self.configlist[1],
1214 "globals": self.configlist[2],
1215 "defaults": self.configlist[3],
1216 "conf": self.configlist[4],
1217 "pkg": self.configlist[5],
1218 "auto": self.configlist[6],
1219 "backupenv": self.configlist[7],
1220 "env": self.configlist[8] }
1221 self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
1222 self.profiles = copy.deepcopy(clone.profiles)
1223 self.backupenv = self.configdict["backupenv"]
1224 self.pusedict = copy.deepcopy(clone.pusedict)
1225 self.categories = copy.deepcopy(clone.categories)
1226 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1227 self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1228 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1229 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1230 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1231 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1232 self.features = copy.deepcopy(clone.features)
1234 self._accept_license = copy.deepcopy(clone._accept_license)
1235 self._plicensedict = copy.deepcopy(clone._plicensedict)
1238 def check_var_directory(varname, var):
1239 if not os.path.isdir(var):
1240 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1241 "Please correct this.\n") % (varname, var),
1243 raise portage.exception.DirectoryNotFound(var)
1245 if config_root is None:
1248 config_root = normalize_path(os.path.abspath(
1249 config_root)).rstrip(os.path.sep) + os.path.sep
1251 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1253 self.depcachedir = DEPCACHE_PATH
1255 if not config_profile_path:
1256 config_profile_path = \
1257 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1258 if os.path.isdir(config_profile_path):
1259 self.profile_path = config_profile_path
1261 self.profile_path = None
1263 self.profile_path = config_profile_path[:]
1265 if config_incrementals is None:
1266 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1268 self.incrementals = copy.deepcopy(config_incrementals)
1270 self.module_priority = ["user","default"]
1272 self.modules["user"] = getconfig(
1273 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1274 if self.modules["user"] is None:
1275 self.modules["user"] = {}
1276 self.modules["default"] = {
1277 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1278 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1284 # back up our incremental variables:
1286 self._use_expand_dict = {}
1287 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1288 self.configlist.append({})
1289 self.configdict["env.d"] = self.configlist[-1]
1291 self.configlist.append({})
1292 self.configdict["pkginternal"] = self.configlist[-1]
1294 # The symlink might not exist or might not be a symlink.
1295 if self.profile_path is None:
1299 def addProfile(currentPath):
1300 parentsFile = os.path.join(currentPath, "parent")
1301 eapi_file = os.path.join(currentPath, "eapi")
1303 eapi = open(eapi_file).readline().strip()
1307 if not eapi_is_supported(eapi):
1308 raise portage.exception.ParseError(
1309 "Profile contains unsupported " + \
1310 "EAPI '%s': '%s'" % \
1311 (eapi, os.path.realpath(eapi_file),))
1312 if os.path.exists(parentsFile):
1313 parents = grabfile(parentsFile)
1315 raise portage.exception.ParseError(
1316 "Empty parent file: '%s'" % parentsFile)
1317 for parentPath in parents:
1318 parentPath = normalize_path(os.path.join(
1319 currentPath, parentPath))
1320 if os.path.exists(parentPath):
1321 addProfile(parentPath)
1323 raise portage.exception.ParseError(
1324 "Parent '%s' not found: '%s'" % \
1325 (parentPath, parentsFile))
1326 self.profiles.append(currentPath)
1328 addProfile(os.path.realpath(self.profile_path))
1329 except portage.exception.ParseError, e:
1330 writemsg("!!! Unable to parse profile: '%s'\n" % \
1331 self.profile_path, noiselevel=-1)
1332 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1335 if local_config and self.profiles:
1336 custom_prof = os.path.join(
1337 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1338 if os.path.exists(custom_prof):
1339 self.user_profile_dir = custom_prof
1340 self.profiles.append(custom_prof)
1343 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1344 self.packages = stack_lists(self.packages_list, incremental=1)
1345 del self.packages_list
1346 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1349 self.prevmaskdict={}
1350 for x in self.packages:
1351 mycatpkg=dep_getkey(x)
1352 if mycatpkg not in self.prevmaskdict:
1353 self.prevmaskdict[mycatpkg]=[x]
1355 self.prevmaskdict[mycatpkg].append(x)
1357 self._pkeywords_list = []
1358 rawpkeywords = [grabdict_package(
1359 os.path.join(x, "package.keywords"), recursive=1) \
1360 for x in self.profiles]
1361 for i in xrange(len(self.profiles)):
1363 for k, v in rawpkeywords[i].iteritems():
1364 cpdict.setdefault(dep_getkey(k), {})[k] = v
1365 self._pkeywords_list.append(cpdict)
1367 # get profile-masked use flags -- INCREMENTAL Child over parent
1368 self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1369 recursive=1) for x in self.profiles]
1370 self.usemask = set(stack_lists(
1371 self.usemask_list, incremental=True))
1372 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1373 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1376 self.pusemask_list = []
1377 rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1378 recursive=1) for x in self.profiles]
1379 for i in xrange(len(self.profiles)):
1381 for k, v in rawpusemask[i].iteritems():
1382 cpdict.setdefault(dep_getkey(k), {})[k] = v
1383 self.pusemask_list.append(cpdict)
1386 self.pkgprofileuse = []
1387 rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1388 juststrings=True, recursive=1) for x in self.profiles]
1389 for i in xrange(len(self.profiles)):
1391 for k, v in rawprofileuse[i].iteritems():
1392 cpdict.setdefault(dep_getkey(k), {})[k] = v
1393 self.pkgprofileuse.append(cpdict)
1396 self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1397 recursive=1) for x in self.profiles]
1398 self.useforce = set(stack_lists(
1399 self.useforce_list, incremental=True))
1401 self.puseforce_list = []
1402 rawpuseforce = [grabdict_package(
1403 os.path.join(x, "package.use.force"), recursive=1) \
1404 for x in self.profiles]
1405 for i in xrange(len(self.profiles)):
1407 for k, v in rawpuseforce[i].iteritems():
1408 cpdict.setdefault(dep_getkey(k), {})[k] = v
1409 self.puseforce_list.append(cpdict)
1412 make_conf = getconfig(
1413 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1414 tolerant=tolerant, allow_sourcing=True)
1415 if make_conf is None:
1418 # Allow ROOT setting to come from make.conf if it's not overridden
1419 # by the constructor argument (from the calling environment).
1420 if target_root is None and "ROOT" in make_conf:
1421 target_root = make_conf["ROOT"]
1422 if not target_root.strip():
1424 if target_root is None:
1427 target_root = normalize_path(os.path.abspath(
1428 target_root)).rstrip(os.path.sep) + os.path.sep
1430 portage.util.ensure_dirs(target_root)
1431 check_var_directory("ROOT", target_root)
1433 # The expand_map is used for variable substitution
1434 # in getconfig() calls, and the getconfig() calls
1435 # update expand_map with the value of each variable
1436 # assignment that occurs. Variable substitution occurs
1437 # in the following order, which corresponds to the
1438 # order of appearance in self.lookuplist:
1445 # Notably absent is "env", since we want to avoid any
1446 # interaction with the calling environment that might
1447 # lead to unexpected results.
1450 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1452 # env_d will be None if profile.env doesn't exist.
1454 self.configdict["env.d"].update(env_d)
1455 expand_map.update(env_d)
1457 # backupenv is used for calculating incremental variables.
1460 self.backupenv = env.copy()
1463 # Remove duplicate values so they don't override updated
1464 # profile.env values later (profile.env is reloaded in each
1465 # call to self.regenerate).
1466 for k, v in env_d.iteritems():
1468 if self.backupenv[k] == v:
1469 del self.backupenv[k]
1474 self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1476 # make.globals should not be relative to config_root
1477 # because it only contains constants.
1478 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1479 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1484 if self.mygcfg is None:
1487 self.configlist.append(self.mygcfg)
1488 self.configdict["globals"]=self.configlist[-1]
1490 self.make_defaults_use = []
1493 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1494 expand=expand_map) for x in self.profiles]
1496 for cfg in mygcfg_dlists:
1498 self.make_defaults_use.append(cfg.get("USE", ""))
1500 self.make_defaults_use.append("")
1501 self.mygcfg = stack_dicts(mygcfg_dlists,
1502 incrementals=portage.const.INCREMENTALS, ignore_none=1)
1503 if self.mygcfg is None:
1505 self.configlist.append(self.mygcfg)
1506 self.configdict["defaults"]=self.configlist[-1]
1508 self.mygcfg = getconfig(
1509 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1510 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1511 if self.mygcfg is None:
1514 # Don't allow the user to override certain variables in make.conf
1515 profile_only_variables = self.configdict["defaults"].get(
1516 "PROFILE_ONLY_VARIABLES", "").split()
1517 for k in profile_only_variables:
1518 self.mygcfg.pop(k, None)
1520 self.configlist.append(self.mygcfg)
1521 self.configdict["conf"]=self.configlist[-1]
1523 self.configlist.append(util.LazyItemsDict())
1524 self.configdict["pkg"]=self.configlist[-1]
1527 self.configlist.append({})
1528 self.configdict["auto"]=self.configlist[-1]
1530 self.configlist.append(self.backupenv) # XXX Why though?
1531 self.configdict["backupenv"]=self.configlist[-1]
1533 # Don't allow the user to override certain variables in the env
1534 for k in profile_only_variables:
1535 self.backupenv.pop(k, None)
1537 self.configlist.append(self.configdict["env"])
1539 # make lookuplist for loading package.*
1540 self.lookuplist=self.configlist[:]
1541 self.lookuplist.reverse()
1543 # Blacklist vars that could interfere with portage internals.
1544 for blacklisted in self._env_blacklist:
1545 for cfg in self.lookuplist:
1546 cfg.pop(blacklisted, None)
1547 del blacklisted, cfg
1549 self["PORTAGE_CONFIGROOT"] = config_root
1550 self.backup_changes("PORTAGE_CONFIGROOT")
1551 self["ROOT"] = target_root
1552 self.backup_changes("ROOT")
1555 self.pkeywordsdict = {}
1556 self._plicensedict = {}
1557 self.punmaskdict = {}
1558 abs_user_config = os.path.join(config_root,
1559 USER_CONFIG_PATH.lstrip(os.path.sep))
1561 # locations for "categories" and "arch.list" files
1562 locations = [os.path.join(self["PORTDIR"], "profiles")]
1563 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1564 pmask_locations.extend(self.profiles)
1566 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1567 special cases are needed here."""
1568 overlay_profiles = []
1569 for ov in self["PORTDIR_OVERLAY"].split():
1570 ov = normalize_path(ov)
1571 profiles_dir = os.path.join(ov, "profiles")
1572 if os.path.isdir(profiles_dir):
1573 overlay_profiles.append(profiles_dir)
1574 locations += overlay_profiles
1576 pmask_locations.extend(overlay_profiles)
1579 locations.append(abs_user_config)
1580 pmask_locations.append(abs_user_config)
1581 pusedict = grabdict_package(
1582 os.path.join(abs_user_config, "package.use"), recursive=1)
1583 for key in pusedict.keys():
1584 cp = dep_getkey(key)
1585 if cp not in self.pusedict:
1586 self.pusedict[cp] = {}
1587 self.pusedict[cp][key] = pusedict[key]
1590 pkgdict = grabdict_package(
1591 os.path.join(abs_user_config, "package.keywords"),
1593 for key in pkgdict.keys():
1594 # default to ~arch if no specific keyword is given
1595 if not pkgdict[key]:
1597 if self.configdict["defaults"] and \
1598 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1599 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1602 for keyword in groups:
1603 if not keyword[0] in "~-":
1604 mykeywordlist.append("~"+keyword)
1605 pkgdict[key] = mykeywordlist
1606 cp = dep_getkey(key)
1607 if cp not in self.pkeywordsdict:
1608 self.pkeywordsdict[cp] = {}
1609 self.pkeywordsdict[cp][key] = pkgdict[key]
1612 licdict = grabdict_package(os.path.join(
1613 abs_user_config, "package.license"), recursive=1)
1614 for k, v in licdict.iteritems():
1616 cp_dict = self._plicensedict.get(cp)
1619 self._plicensedict[cp] = cp_dict
1620 cp_dict[k] = self.expandLicenseTokens(v)
1622 #getting categories from an external file now
1623 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1624 self.categories = tuple(sorted(
1625 stack_lists(categories, incremental=1)))
1628 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1629 archlist = stack_lists(archlist, incremental=1)
1630 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1632 # package.mask and package.unmask
1635 for x in pmask_locations:
1636 pkgmasklines.append(grabfile_package(
1637 os.path.join(x, "package.mask"), recursive=1))
1638 pkgunmasklines.append(grabfile_package(
1639 os.path.join(x, "package.unmask"), recursive=1))
1640 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1641 pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
1644 for x in pkgmasklines:
1645 mycatpkg=dep_getkey(x)
1646 if mycatpkg in self.pmaskdict:
1647 self.pmaskdict[mycatpkg].append(x)
1649 self.pmaskdict[mycatpkg]=[x]
1651 for x in pkgunmasklines:
1652 mycatpkg=dep_getkey(x)
1653 if mycatpkg in self.punmaskdict:
1654 self.punmaskdict[mycatpkg].append(x)
1656 self.punmaskdict[mycatpkg]=[x]
1658 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1659 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1660 has_invalid_data = False
1661 for x in range(len(pkgprovidedlines)-1, -1, -1):
1662 myline = pkgprovidedlines[x]
1663 if not isvalidatom("=" + myline):
1664 writemsg("Invalid package name in package.provided:" + \
1665 " %s\n" % myline, noiselevel=-1)
1666 has_invalid_data = True
1667 del pkgprovidedlines[x]
1669 cpvr = catpkgsplit(pkgprovidedlines[x])
1670 if not cpvr or cpvr[0] == "null":
1671 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1673 has_invalid_data = True
1674 del pkgprovidedlines[x]
1676 if cpvr[0] == "virtual":
1677 writemsg("Virtual package in package.provided: %s\n" % \
1678 myline, noiselevel=-1)
1679 has_invalid_data = True
1680 del pkgprovidedlines[x]
1682 if has_invalid_data:
1683 writemsg("See portage(5) for correct package.provided usage.\n",
1685 self.pprovideddict = {}
1686 for x in pkgprovidedlines:
1690 mycatpkg=dep_getkey(x)
1691 if mycatpkg in self.pprovideddict:
1692 self.pprovideddict[mycatpkg].append(x)
1694 self.pprovideddict[mycatpkg]=[x]
1696 # parse licensegroups
1697 self._license_groups = {}
1699 self._license_groups.update(
1700 grabdict(os.path.join(x, "license_groups")))
1702 # reasonable defaults; this is important as without USE_ORDER,
1703 # USE will always be "" (nothing set)!
1704 if "USE_ORDER" not in self:
1705 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1707 self["PORTAGE_GID"] = str(portage_gid)
1708 self.backup_changes("PORTAGE_GID")
1710 if self.get("PORTAGE_DEPCACHEDIR", None):
1711 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1712 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1713 self.backup_changes("PORTAGE_DEPCACHEDIR")
1715 overlays = self.get("PORTDIR_OVERLAY","").split()
1719 ov = normalize_path(ov)
1720 if os.path.isdir(ov):
1723 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1724 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1725 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1726 self.backup_changes("PORTDIR_OVERLAY")
1728 if "CBUILD" not in self and "CHOST" in self:
1729 self["CBUILD"] = self["CHOST"]
1730 self.backup_changes("CBUILD")
1732 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1733 self.backup_changes("PORTAGE_BIN_PATH")
1734 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1735 self.backup_changes("PORTAGE_PYM_PATH")
1737 # Expand license groups
1738 # This has to do be done for each config layer before regenerate()
1739 # in order for incremental negation to work properly.
1741 for c in self.configdict.itervalues():
1742 v = c.get("ACCEPT_LICENSE")
1745 v = " ".join(self.expandLicenseTokens(v.split()))
1746 c["ACCEPT_LICENSE"] = v
1749 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1751 self[var] = str(int(self.get(var, "0")))
1753 writemsg(("!!! %s='%s' is not a valid integer. " + \
1754 "Falling back to '0'.\n") % (var, self[var]),
1757 self.backup_changes(var)
1759 # initialize self.features
1763 self._accept_license = \
1764 set(self.get("ACCEPT_LICENSE", "").split())
1765 # In order to enforce explicit acceptance for restrictive
1766 # licenses that require it, "*" will not be allowed in the
1767 # user config. Don't enforce this until license groups are
1768 # fully implemented in the tree.
1769 #self._accept_license.discard("*")
1770 if not self._accept_license:
1771 self._accept_license = set(["*"])
1773 # repoman will accept any license
1774 self._accept_license = set(["*"])
1776 if not portage.process.sandbox_capable and \
1777 ("sandbox" in self.features or "usersandbox" in self.features):
1778 if self.profile_path is not None and \
1779 os.path.realpath(self.profile_path) == \
1780 os.path.realpath(PROFILE_PATH):
1781 """ Don't show this warning when running repoman and the
1782 sandbox feature came from a profile that doesn't belong to
1784 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1785 " binary. Disabling...\n\n"), noiselevel=-1)
1786 if "sandbox" in self.features:
1787 self.features.remove("sandbox")
1788 if "usersandbox" in self.features:
1789 self.features.remove("usersandbox")
1791 self["FEATURES"] = " ".join(sorted(self.features))
1792 self.backup_changes("FEATURES")
1799 def _init_dirs(self):
1801 Create a few directories that are critical to portage operation
1803 if not os.access(self["ROOT"], os.W_OK):
1806 # gid, mode, mask, preserve_perms
1808 "tmp" : ( -1, 01777, 0, True),
1809 "var/tmp" : ( -1, 01777, 0, True),
1810 PRIVATE_PATH : ( portage_gid, 02750, 02, False),
1811 CACHE_PATH.lstrip(os.path.sep) : (portage_gid, 0755, 02, False)
1814 for mypath, (gid, mode, modemask, preserve_perms) \
1815 in dir_mode_map.iteritems():
1816 mydir = os.path.join(self["ROOT"], mypath)
1817 if preserve_perms and os.path.isdir(mydir):
1818 # Only adjust permissions on some directories if
1819 # they don't exist yet. This gives freedom to the
1820 # user to adjust permissions to suit their taste.
1823 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1824 except portage.exception.PortageException, e:
1825 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1827 writemsg("!!! %s\n" % str(e),
1830 def expandLicenseTokens(self, tokens):
1831 """ Take a token from ACCEPT_LICENSE or package.license and expand it
1832 if it's a group token (indicated by @) or just return it if it's not a
1833 group. If a group is negated then negate all group elements."""
1834 expanded_tokens = []
1836 expanded_tokens.extend(self._expandLicenseToken(x, None))
1837 return expanded_tokens
1839 def _expandLicenseToken(self, token, traversed_groups):
1842 if token.startswith("-"):
1844 license_name = token[1:]
1846 license_name = token
1847 if not license_name.startswith("@"):
1848 rValue.append(token)
1850 group_name = license_name[1:]
1851 if not traversed_groups:
1852 traversed_groups = set()
1853 license_group = self._license_groups.get(group_name)
1854 if group_name in traversed_groups:
1855 writemsg(("Circular license group reference" + \
1856 " detected in '%s'\n") % group_name, noiselevel=-1)
1857 rValue.append("@"+group_name)
1859 traversed_groups.add(group_name)
1860 for l in license_group:
1861 if l.startswith("-"):
1862 writemsg(("Skipping invalid element %s" + \
1863 " in license group '%s'\n") % (l, group_name),
1866 rValue.extend(self._expandLicenseToken(l, traversed_groups))
1868 writemsg("Undefined license group '%s'\n" % group_name,
1870 rValue.append("@"+group_name)
1872 rValue = ["-" + token for token in rValue]
1876 """Validate miscellaneous settings and display warnings if necessary.
1877 (This code was previously in the global scope of portage.py)"""
1879 groups = self["ACCEPT_KEYWORDS"].split()
1880 archlist = self.archlist()
1882 writemsg("--- 'profiles/arch.list' is empty or " + \
1883 "not available. Empty portage tree?\n", noiselevel=1)
1885 for group in groups:
1886 if group not in archlist and \
1887 not (group.startswith("-") and group[1:] in archlist) and \
1888 group not in ("*", "~*", "**"):
1889 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1892 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1893 PROFILE_PATH.lstrip(os.path.sep))
1894 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1895 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1896 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1897 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1899 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1900 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1902 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1903 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1904 if os.path.exists(abs_user_virtuals):
1905 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1906 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1907 writemsg("!!! this new location.\n\n")
1909 if "fakeroot" in self.features and \
1910 not portage.process.fakeroot_capable:
1911 writemsg("!!! FEATURES=fakeroot is enabled, but the " + \
1912 "fakeroot binary is not installed.\n", noiselevel=-1)
1914 def loadVirtuals(self,root):
1915 """Not currently used by portage."""
1916 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1917 self.getvirtuals(root)
1919 def load_best_module(self,property_string):
1920 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1923 mod = load_mod(best_mod)
1925 if best_mod.startswith("cache."):
1926 best_mod = "portage." + best_mod
1928 mod = load_mod(best_mod)
1941 def modifying(self):
1943 raise Exception("Configuration is locked.")
1945 def backup_changes(self,key=None):
1947 if key and key in self.configdict["env"]:
1948 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1950 raise KeyError("No such key defined in environment: %s" % key)
1952 def reset(self,keeping_pkg=0,use_cache=1):
1954 Restore environment from self.backupenv, call self.regenerate()
1955 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1956 @type keeping_pkg: Boolean
1957 @param use_cache: Should self.regenerate use the cache or not
1958 @type use_cache: Boolean
1962 self.configdict["env"].clear()
1963 self.configdict["env"].update(self.backupenv)
1965 self.modifiedkeys = []
1969 self.configdict["pkg"].clear()
1970 self.configdict["pkginternal"].clear()
1971 self.configdict["defaults"]["USE"] = \
1972 " ".join(self.make_defaults_use)
1973 self.usemask = set(stack_lists(
1974 self.usemask_list, incremental=True))
1975 self.useforce = set(stack_lists(
1976 self.useforce_list, incremental=True))
1977 self.regenerate(use_cache=use_cache)
1979 def load_infodir(self,infodir):
1980 warnings.warn("portage.config.load_infodir() is deprecated",
1984 class _lazy_use_expand(object):
1986 Lazily evaluate USE_EXPAND variables since they are only needed when
1987 an ebuild shell is spawned. Variables values are made consistent with
1988 the previously calculated USE settings.
1991 def __init__(self, use, usemask, iuse_implicit,
1992 use_expand_split, use_expand_dict):
1994 self._usemask = usemask
1995 self._iuse_implicit = iuse_implicit
1996 self._use_expand_split = use_expand_split
1997 self._use_expand_dict = use_expand_dict
1999 def __getitem__(self, key):
2000 prefix = key.lower() + '_'
2001 prefix_len = len(prefix)
2002 expand_flags = set( x[prefix_len:] for x in self._use \
2003 if x[:prefix_len] == prefix )
2004 var_split = self._use_expand_dict.get(key, '').split()
2005 # Preserve the order of var_split because it can matter for things
2007 var_split = [ x for x in var_split if x in expand_flags ]
2008 var_split.extend(expand_flags.difference(var_split))
2009 has_wildcard = '*' in expand_flags
2011 var_split = [ x for x in var_split if x != "*" ]
2013 for x in self._iuse_implicit:
2014 if x[:prefix_len] == prefix:
2015 has_iuse.add(x[prefix_len:])
2017 # * means to enable everything in IUSE that's not masked
2019 usemask = self._usemask
2020 for suffix in has_iuse:
2022 if x not in usemask:
2023 if suffix not in expand_flags:
2024 var_split.append(suffix)
2026 # If there is a wildcard and no matching flags in IUSE then
2027 # LINGUAS should be unset so that all .mo files are
2030 # Make the flags unique and filter them according to IUSE.
2031 # Also, continue to preserve order for things like LINGUAS
2032 # and filter any duplicates that variable may contain.
2033 filtered_var_split = []
2034 remaining = has_iuse.intersection(var_split)
2038 filtered_var_split.append(x)
2039 var_split = filtered_var_split
2042 value = ' '.join(var_split)
2044 # Don't export empty USE_EXPAND vars unless the user config
2045 # exports them as empty. This is required for vars such as
2046 # LINGUAS, where unset and empty have different meanings.
2048 # ebuild.sh will see this and unset the variable so
2049 # that things like LINGUAS work properly
2055 # It's not in IUSE, so just allow the variable content
2056 # to pass through if it is defined somewhere. This
2057 # allows packages that support LINGUAS but don't
2058 # declare it in IUSE to use the variable outside of the
2059 # USE_EXPAND context.
2064 def setcpv(self, mycpv, use_cache=1, mydb=None):
2066 Load a particular CPV into the config, this lets us see the
2067 Default USE flags for a particular ebuild as well as the USE
2068 flags from package.use.
2070 @param mycpv: A cpv to load
2072 @param use_cache: Enables caching
2073 @type use_cache: Boolean
2074 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
2075 @type mydb: dbapi or derivative.
2082 if not isinstance(mycpv, basestring):
2087 if self.mycpv == mycpv:
2091 cat, pf = catsplit(mycpv)
2092 cp = dep_getkey(mycpv)
2093 cpv_slot = self.mycpv
2096 env_configdict = self.configdict["env"]
2097 pkg_configdict = self.configdict["pkg"]
2098 previous_iuse = pkg_configdict.get("IUSE")
2099 pkg_configdict["CATEGORY"] = cat
2100 pkg_configdict["PF"] = pf
2102 if not hasattr(mydb, "aux_get"):
2103 pkg_configdict.update(mydb)
2105 aux_keys = [k for k in auxdbkeys \
2106 if not k.startswith("UNUSED_")]
2107 aux_keys.append("repository")
2108 for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
2109 pkg_configdict[k] = v
2110 repository = pkg_configdict.pop("repository", None)
2111 if repository is not None:
2112 pkg_configdict["PORTAGE_REPO_NAME"] = repository
2113 for k in pkg_configdict:
2115 env_configdict.pop(k, None)
2116 slot = pkg_configdict["SLOT"]
2117 iuse = pkg_configdict["IUSE"]
2119 cpv_slot = "%s:%s" % (self.mycpv, slot)
2123 for x in iuse.split():
2124 if x.startswith("+"):
2125 pkginternaluse.append(x[1:])
2126 elif x.startswith("-"):
2127 pkginternaluse.append(x)
2128 pkginternaluse = " ".join(pkginternaluse)
2129 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
2130 self.configdict["pkginternal"]["USE"] = pkginternaluse
2134 for i in xrange(len(self.profiles)):
2135 cpdict = self.pkgprofileuse[i].get(cp, None)
2137 keys = cpdict.keys()
2139 bestmatch = best_match_to_list(cpv_slot, keys)
2141 keys.remove(bestmatch)
2142 defaults.insert(pos, cpdict[bestmatch])
2146 if self.make_defaults_use[i]:
2147 defaults.insert(pos, self.make_defaults_use[i])
2149 defaults = " ".join(defaults)
2150 if defaults != self.configdict["defaults"].get("USE",""):
2151 self.configdict["defaults"]["USE"] = defaults
2154 useforce = self._getUseForce(cpv_slot)
2155 if useforce != self.useforce:
2156 self.useforce = useforce
2159 usemask = self._getUseMask(cpv_slot)
2160 if usemask != self.usemask:
2161 self.usemask = usemask
2165 cpdict = self.pusedict.get(cp)
2167 keys = cpdict.keys()
2169 self.pusekey = best_match_to_list(cpv_slot, keys)
2171 keys.remove(self.pusekey)
2172 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2176 if oldpuse != self.puse:
2178 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2179 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2182 self.reset(keeping_pkg=1,use_cache=use_cache)
2184 # If reset() has not been called, it's safe to return
2185 # early if IUSE has not changed.
2186 if not has_changed and previous_iuse == iuse:
2189 # Filter out USE flags that aren't part of IUSE. This has to
2190 # be done for every setcpv() call since practically every
2191 # package has different IUSE.
2192 use = set(self["USE"].split())
2193 iuse_implicit = self._get_implicit_iuse()
2194 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2196 # PORTAGE_IUSE is not always needed so it's lazily evaluated.
2197 self.configdict["pkg"].addLazySingleton(
2198 "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
2200 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2201 if ebuild_force_test and \
2202 not hasattr(self, "_ebuild_force_test_msg_shown"):
2203 self._ebuild_force_test_msg_shown = True
2204 writemsg("Forcing test.\n", noiselevel=-1)
2205 if "test" in self.features and "test" in iuse_implicit:
2206 if "test" in self.usemask and not ebuild_force_test:
2207 # "test" is in IUSE and USE=test is masked, so execution
2208 # of src_test() probably is not reliable. Therefore,
2209 # temporarily disable FEATURES=test just for this package.
2210 self["FEATURES"] = " ".join(x for x in self.features \
2215 if ebuild_force_test:
2216 self.usemask.discard("test")
2218 # Allow _* flags from USE_EXPAND wildcards to pass through here.
2219 use.difference_update([x for x in use \
2220 if x not in iuse_implicit and x[-2:] != '_*'])
2222 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2223 # that they are consistent. For optimal performance, use slice
2224 # comparison instead of startswith().
2225 use_expand_split = self.get("USE_EXPAND", "").split()
2226 lazy_use_expand = self._lazy_use_expand(use, self.usemask,
2227 iuse_implicit, use_expand_split, self._use_expand_dict)
2228 use_expand_iuse = set()
2229 for key in use_expand_split:
2230 prefix = key.lower() + '_'
2231 prefix_len = len(prefix)
2232 expand_flags = set( x[prefix_len:] for x in use \
2233 if x[:prefix_len] == prefix )
2234 use_expand_iuse.clear()
2235 for x in iuse_implicit:
2236 if x[:prefix_len] == prefix:
2237 use_expand_iuse.add(x)
2238 # * means to enable everything in IUSE that's not masked
2239 if use_expand_iuse and '*' in expand_flags:
2240 for x in use_expand_iuse:
2241 if x not in usemask:
2244 self.configdict['env'].addLazySingleton(
2245 key, lazy_use_expand.__getitem__, key)
2247 # It's not in IUSE, so just allow the variable content
2248 # to pass through if it is defined somewhere. This
2249 # allows packages that support LINGUAS but don't
2250 # declare it in IUSE to use the variable outside of the
2251 # USE_EXPAND context.
2254 # Filtered for the ebuild environment. Store this in a separate
2255 # attribute since we still want to be able to see global USE
2256 # settings for things like emerge --info.
2258 self.configdict["pkg"]["PORTAGE_USE"] = \
2259 " ".join(sorted(x for x in use if x[-2:] != '_*'))
2261 def _get_implicit_iuse(self):
2263 Some flags are considered to
2264 be implicit members of IUSE:
2265 * Flags derived from ARCH
2266 * Flags derived from USE_EXPAND_HIDDEN variables
2267 * Masked flags, such as those from {,package}use.mask
2268 * Forced flags, such as those from {,package}use.force
2269 * build and bootstrap flags used by bootstrap.sh
2271 iuse_implicit = set()
2272 # Flags derived from ARCH.
2273 arch = self.configdict["defaults"].get("ARCH")
2275 iuse_implicit.add(arch)
2276 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2278 # Flags derived from USE_EXPAND_HIDDEN variables
2279 # such as ELIBC, KERNEL, and USERLAND.
2280 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2281 for x in use_expand_hidden:
2282 iuse_implicit.add(x.lower() + "_.*")
2284 # Flags that have been masked or forced.
2285 iuse_implicit.update(self.usemask)
2286 iuse_implicit.update(self.useforce)
2288 # build and bootstrap flags used by bootstrap.sh
2289 iuse_implicit.add("build")
2290 iuse_implicit.add("bootstrap")
2291 return iuse_implicit
2293 def _getUseMask(self, pkg):
2294 cp = getattr(pkg, "cp", None)
2296 cp = dep_getkey(pkg)
2299 for i in xrange(len(self.profiles)):
2300 cpdict = self.pusemask_list[i].get(cp, None)
2302 keys = cpdict.keys()
2304 best_match = best_match_to_list(pkg, keys)
2306 keys.remove(best_match)
2307 usemask.insert(pos, cpdict[best_match])
2311 if self.usemask_list[i]:
2312 usemask.insert(pos, self.usemask_list[i])
2314 return set(stack_lists(usemask, incremental=True))
2316 def _getUseForce(self, pkg):
2317 cp = getattr(pkg, "cp", None)
2319 cp = dep_getkey(pkg)
2322 for i in xrange(len(self.profiles)):
2323 cpdict = self.puseforce_list[i].get(cp, None)
2325 keys = cpdict.keys()
2327 best_match = best_match_to_list(pkg, keys)
2329 keys.remove(best_match)
2330 useforce.insert(pos, cpdict[best_match])
2334 if self.useforce_list[i]:
2335 useforce.insert(pos, self.useforce_list[i])
2337 return set(stack_lists(useforce, incremental=True))
2339 def _getMaskAtom(self, cpv, metadata):
2341 Take a package and return a matching package.mask atom, or None if no
2342 such atom exists or it has been cancelled by package.unmask. PROVIDE
2343 is not checked, so atoms will not be found for old-style virtuals.
2345 @param cpv: The package name
2347 @param metadata: A dictionary of raw package metadata
2348 @type metadata: dict
2350 @return: An matching atom string or None if one is not found.
2353 cp = cpv_getkey(cpv)
2354 mask_atoms = self.pmaskdict.get(cp)
2356 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2357 unmask_atoms = self.punmaskdict.get(cp)
2358 for x in mask_atoms:
2359 if not match_from_list(x, pkg_list):
2362 for y in unmask_atoms:
2363 if match_from_list(y, pkg_list):
2368 def _getProfileMaskAtom(self, cpv, metadata):
2370 Take a package and return a matching profile atom, or None if no
2371 such atom exists. Note that a profile atom may or may not have a "*"
2372 prefix. PROVIDE is not checked, so atoms will not be found for
2375 @param cpv: The package name
2377 @param metadata: A dictionary of raw package metadata
2378 @type metadata: dict
2380 @return: An matching profile atom string or None if one is not found.
2383 cp = cpv_getkey(cpv)
2384 profile_atoms = self.prevmaskdict.get(cp)
2386 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2387 for x in profile_atoms:
2388 if match_from_list(x.lstrip("*"), pkg_list):
2393 def _getKeywords(self, cpv, metadata):
2394 cp = dep_getkey(cpv)
2395 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2396 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2398 for i in xrange(len(self.profiles)):
2399 cpdict = self._pkeywords_list[i].get(cp, None)
2403 best_match = best_match_to_list(pkg, keys)
2405 keys.remove(best_match)
2406 keywords.insert(pos, cpdict[best_match])
2410 return stack_lists(keywords, incremental=True)
2412 def _getMissingKeywords(self, cpv, metadata):
2414 Take a package and return a list of any KEYWORDS that the user may
2415 may need to accept for the given package. If the KEYWORDS are empty
2416 and the the ** keyword has not been accepted, the returned list will
2417 contain ** alone (in order to distiguish from the case of "none
2420 @param cpv: The package name (for package.keywords support)
2422 @param metadata: A dictionary of raw package metadata
2423 @type metadata: dict
2425 @return: A list of KEYWORDS that have not been accepted.
2428 # Hack: Need to check the env directly here as otherwise stacking
2429 # doesn't work properly as negative values are lost in the config
2430 # object (bug #139600)
2431 egroups = self.configdict["backupenv"].get(
2432 "ACCEPT_KEYWORDS", "").split()
2433 mygroups = self._getKeywords(cpv, metadata)
2434 # Repoman may modify this attribute as necessary.
2435 pgroups = self["ACCEPT_KEYWORDS"].split()
2437 cp = dep_getkey(cpv)
2438 pkgdict = self.pkeywordsdict.get(cp)
2441 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2442 for atom, pkgkeywords in pkgdict.iteritems():
2443 if match_from_list(atom, cpv_slot_list):
2445 pgroups.extend(pkgkeywords)
2446 if matches or egroups:
2447 pgroups.extend(egroups)
2450 if x.startswith("-"):
2454 inc_pgroups.discard(x[1:])
2457 pgroups = inc_pgroups
2462 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2463 writemsg(("--- WARNING: Package '%s' uses" + \
2464 " '%s' keyword.\n") % (cpv, gp), noiselevel=-1)
2471 elif gp.startswith("~"):
2473 elif not gp.startswith("-"):
2476 ((hastesting and "~*" in pgroups) or \
2477 (hasstable and "*" in pgroups) or "**" in pgroups):
2483 # If KEYWORDS is empty then we still have to return something
2484 # in order to distiguish from the case of "none missing".
2485 mygroups.append("**")
2489 def _getMissingLicenses(self, cpv, metadata):
2491 Take a LICENSE string and return a list any licenses that the user may
2492 may need to accept for the given package. The returned list will not
2493 contain any licenses that have already been accepted. This method
2494 can throw an InvalidDependString exception.
2496 @param cpv: The package name (for package.license support)
2498 @param metadata: A dictionary of raw package metadata
2499 @type metadata: dict
2501 @return: A list of licenses that have not been accepted.
2503 if "*" in self._accept_license:
2505 acceptable_licenses = self._accept_license
2506 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
2508 acceptable_licenses = self._accept_license.copy()
2509 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2510 for atom in match_to_list(cpv_slot, cpdict.keys()):
2511 acceptable_licenses.update(cpdict[atom])
2513 license_str = metadata["LICENSE"]
2514 if "?" in license_str:
2515 use = metadata["USE"].split()
2519 license_struct = portage.dep.use_reduce(
2520 portage.dep.paren_reduce(license_str), uselist=use)
2521 license_struct = portage.dep.dep_opconvert(license_struct)
2522 return self._getMaskedLicenses(license_struct, acceptable_licenses)
2524 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2525 if not license_struct:
2527 if license_struct[0] == "||":
2529 for element in license_struct[1:]:
2530 if isinstance(element, list):
2532 ret.append(self._getMaskedLicenses(
2533 element, acceptable_licenses))
2537 if element in acceptable_licenses:
2540 # Return all masked licenses, since we don't know which combination
2541 # (if any) the user will decide to unmask.
2545 for element in license_struct:
2546 if isinstance(element, list):
2548 ret.extend(self._getMaskedLicenses(element,
2549 acceptable_licenses))
2551 if element not in acceptable_licenses:
2555 def _accept_chost(self, cpv, metadata):
2557 @return True if pkg CHOST is accepted, False otherwise.
2559 if self._accept_chost_re is None:
2560 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2561 if not accept_chost:
2562 chost = self.get("CHOST")
2564 accept_chost.append(chost)
2565 if not accept_chost:
2566 self._accept_chost_re = re.compile(".*")
2567 elif len(accept_chost) == 1:
2569 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2571 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2572 (accept_chost[0], e), noiselevel=-1)
2573 self._accept_chost_re = re.compile("^$")
2576 self._accept_chost_re = re.compile(
2577 r'^(%s)$' % "|".join(accept_chost))
2579 writemsg("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n" % \
2580 (" ".join(accept_chost), e), noiselevel=-1)
2581 self._accept_chost_re = re.compile("^$")
2583 return self._accept_chost_re.match(
2584 metadata.get('CHOST', '')) is not None
2586 def setinst(self,mycpv,mydbapi):
2587 """This updates the preferences for old-style virtuals,
2588 affecting the behavior of dep_expand() and dep_check()
2589 calls. It can change dbapi.match() behavior since that
2590 calls dep_expand(). However, dbapi instances have
2591 internal match caches that are not invalidated when
2592 preferences are updated here. This can potentially
2593 lead to some inconsistency (relevant to bug #1343)."""
2595 if len(self.virtuals) == 0:
2597 # Grab the virtuals this package provides and add them into the tree virtuals.
2598 if not hasattr(mydbapi, "aux_get"):
2599 provides = mydbapi["PROVIDE"]
2601 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2604 if isinstance(mydbapi, portdbapi):
2605 self.setcpv(mycpv, mydb=mydbapi)
2606 myuse = self["PORTAGE_USE"]
2607 elif not hasattr(mydbapi, "aux_get"):
2608 myuse = mydbapi["USE"]
2610 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2611 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2614 cp = dep_getkey(mycpv)
2616 virt = dep_getkey(virt)
2617 providers = self.virtuals.get(virt)
2618 if providers and cp in providers:
2620 providers = self._depgraphVirtuals.get(virt)
2621 if providers is None:
2623 self._depgraphVirtuals[virt] = providers
2624 if cp not in providers:
2625 providers.append(cp)
2629 self.virtuals = self.__getvirtuals_compile()
2632 """Reload things like /etc/profile.env that can change during runtime."""
2633 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
2634 self.configdict["env.d"].clear()
2635 env_d = getconfig(env_d_filename, expand=False)
2637 # env_d will be None if profile.env doesn't exist.
2638 self.configdict["env.d"].update(env_d)
2640 def regenerate(self,useonly=0,use_cache=1):
2643 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2644 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2645 variables. This also updates the env.d configdict; useful in case an ebuild
2646 changes the environment.
2648 If FEATURES has already stacked, it is not stacked twice.
2650 @param useonly: Only regenerate USE flags (not any other incrementals)
2651 @type useonly: Boolean
2652 @param use_cache: Enable Caching (only for autouse)
2653 @type use_cache: Boolean
2658 if self.already_in_regenerate:
2659 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2660 writemsg("!!! Looping in regenerate.\n",1)
2663 self.already_in_regenerate = 1
2666 myincrementals=["USE"]
2668 myincrementals = self.incrementals
2669 myincrementals = set(myincrementals)
2670 # If self.features exists, it has already been stacked and may have
2671 # been mutated, so don't stack it again or else any mutations will be
2673 if "FEATURES" in myincrementals and hasattr(self, "features"):
2674 myincrementals.remove("FEATURES")
2676 if "USE" in myincrementals:
2677 # Process USE last because it depends on USE_EXPAND which is also
2679 myincrementals.remove("USE")
2681 for mykey in myincrementals:
2683 mydbs=self.configlist[:-1]
2687 if mykey not in curdb:
2689 #variables are already expanded
2690 mysplit = curdb[mykey].split()
2694 # "-*" is a special "minus" var that means "unset all settings".
2695 # so USE="-* gnome" will have *just* gnome enabled.
2700 # Not legal. People assume too much. Complain.
2701 writemsg(colorize("BAD",
2702 "USE flags should not start with a '+': %s" % x) \
2703 + "\n", noiselevel=-1)
2709 if (x[1:] in myflags):
2711 del myflags[myflags.index(x[1:])]
2714 # We got here, so add it now.
2715 if x not in myflags:
2719 #store setting in last element of configlist, the original environment:
2720 if myflags or mykey in self:
2721 self.configlist[-1][mykey] = " ".join(myflags)
2724 # Do the USE calculation last because it depends on USE_EXPAND.
2725 if "auto" in self["USE_ORDER"].split(":"):
2726 self.configdict["auto"]["USE"] = autouse(
2727 vartree(root=self["ROOT"], categories=self.categories,
2729 use_cache=use_cache, mysettings=self)
2731 self.configdict["auto"]["USE"] = ""
2733 use_expand = self.get("USE_EXPAND", "").split()
2734 use_expand_dict = self._use_expand_dict
2735 use_expand_dict.clear()
2736 for k in use_expand:
2739 use_expand_dict[k] = v
2742 for x in self["USE_ORDER"].split(":"):
2743 if x in self.configdict:
2744 self.uvlist.append(self.configdict[x])
2745 self.uvlist.reverse()
2747 # For optimal performance, use slice
2748 # comparison instead of startswith().
2750 for curdb in self.uvlist:
2751 cur_use_expand = [x for x in use_expand if x in curdb]
2752 mysplit = curdb.get("USE", "").split()
2753 if not mysplit and not cur_use_expand:
2761 writemsg(colorize("BAD", "USE flags should not start " + \
2762 "with a '+': %s\n" % x), noiselevel=-1)
2768 myflags.discard(x[1:])
2773 for var in cur_use_expand:
2774 var_lower = var.lower()
2775 is_not_incremental = var not in myincrementals
2776 if is_not_incremental:
2777 prefix = var_lower + "_"
2778 prefix_len = len(prefix)
2779 for x in list(myflags):
2780 if x[:prefix_len] == prefix:
2782 for x in curdb[var].split():
2784 if is_not_incremental:
2785 writemsg(colorize("BAD", "Invalid '+' " + \
2786 "operator in non-incremental variable " + \
2787 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2790 writemsg(colorize("BAD", "Invalid '+' " + \
2791 "operator in incremental variable " + \
2792 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2795 if is_not_incremental:
2796 writemsg(colorize("BAD", "Invalid '-' " + \
2797 "operator in non-incremental variable " + \
2798 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
2800 myflags.discard(var_lower + "_" + x[1:])
2802 myflags.add(var_lower + "_" + x)
2804 if not hasattr(self, "features"):
2805 self.features = set(
2806 self.configlist[-1].get("FEATURES","").split())
2807 self["FEATURES"] = " ".join(self.features)
2809 myflags.update(self.useforce)
2810 arch = self.configdict["defaults"].get("ARCH")
2814 myflags.difference_update(self.usemask)
2815 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2817 self.already_in_regenerate = 0
2819 def get_virts_p(self, myroot=None):
2822 virts = self.getvirtuals()
2825 vkeysplit = x.split("/")
2826 if vkeysplit[1] not in self.virts_p:
2827 self.virts_p[vkeysplit[1]] = virts[x]
2830 def getvirtuals(self, myroot=None):
2831 """myroot is now ignored because, due to caching, it has always been
2832 broken for all but the first call."""
2833 myroot = self["ROOT"]
2835 return self.virtuals
2838 for x in self.profiles:
2839 virtuals_file = os.path.join(x, "virtuals")
2840 virtuals_dict = grabdict(virtuals_file)
2841 for k in virtuals_dict.keys():
2842 if not isvalidatom(k) or dep_getkey(k) != k:
2843 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2844 (virtuals_file, k), noiselevel=-1)
2845 del virtuals_dict[k]
2847 myvalues = virtuals_dict[k]
2850 if x.startswith("-"):
2851 # allow incrementals
2853 if not isvalidatom(myatom):
2854 writemsg("--- Invalid atom in %s: %s\n" % \
2855 (virtuals_file, x), noiselevel=-1)
2858 del virtuals_dict[k]
2860 virtuals_list.append(virtuals_dict)
2862 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2865 for virt in self.dirVirtuals:
2866 # Preference for virtuals decreases from left to right.
2867 self.dirVirtuals[virt].reverse()
2869 # Repoman does not use user or tree virtuals.
2870 if self.local_config and not self.treeVirtuals:
2871 temp_vartree = vartree(myroot, None,
2872 categories=self.categories, settings=self)
2873 # Reduce the provides into a list by CP.
2874 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2876 self.virtuals = self.__getvirtuals_compile()
2877 return self.virtuals
2879 def __getvirtuals_compile(self):
2880 """Stack installed and profile virtuals. Preference for virtuals
2881 decreases from left to right.
2882 Order of preference:
2883 1. installed and in profile
2888 # Virtuals by profile+tree preferences.
2891 for virt, installed_list in self.treeVirtuals.iteritems():
2892 profile_list = self.dirVirtuals.get(virt, None)
2893 if not profile_list:
2895 for cp in installed_list:
2896 if cp in profile_list:
2897 ptVirtuals.setdefault(virt, [])
2898 ptVirtuals[virt].append(cp)
2900 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2901 self.dirVirtuals, self._depgraphVirtuals])
2904 def __delitem__(self,mykey):
2906 for x in self.lookuplist:
2911 def __getitem__(self,mykey):
2912 for d in self.lookuplist:
2915 return '' # for backward compat, don't raise KeyError
2917 def get(self, k, x=None):
2918 for d in self.lookuplist:
2923 def pop(self, key, *args):
2926 "pop expected at most 2 arguments, got " + \
2927 repr(1 + len(args)))
2929 for d in reversed(self.lookuplist):
2937 def has_key(self,mykey):
2938 warnings.warn("portage.config.has_key() is deprecated, "
2939 "use the in operator instead",
2941 return mykey in self
2943 def __contains__(self, mykey):
2944 """Called to implement membership test operators (in and not in)."""
2945 for d in self.lookuplist:
2950 def setdefault(self, k, x=None):
2963 for d in self.lookuplist:
2970 def iteritems(self):
2975 return list(self.iteritems())
2977 def __setitem__(self,mykey,myvalue):
2978 "set a value; will be thrown away at reset() time"
2979 if not isinstance(myvalue, basestring):
2980 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2982 self.modifiedkeys += [mykey]
2983 self.configdict["env"][mykey]=myvalue
2986 "return our locally-maintained environment"
2988 environ_filter = self._environ_filter
2990 filter_calling_env = False
2991 temp_dir = self.get("T")
2992 if temp_dir is not None and \
2993 os.path.exists(os.path.join(temp_dir, "environment")):
2994 filter_calling_env = True
2996 environ_whitelist = self._environ_whitelist
2997 env_d = self.configdict["env.d"]
2999 if x in environ_filter:
3002 if not isinstance(myvalue, basestring):
3003 writemsg("!!! Non-string value in config: %s=%s\n" % \
3004 (x, myvalue), noiselevel=-1)
3006 if filter_calling_env and \
3007 x not in environ_whitelist and \
3008 not self._environ_whitelist_re.match(x):
3009 # Do not allow anything to leak into the ebuild
3010 # environment unless it is explicitly whitelisted.
3011 # This ensures that variables unset by the ebuild
3015 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
3016 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
3017 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
3019 if filter_calling_env:
3020 phase = self.get("EBUILD_PHASE")
3024 whitelist.append("RPMDIR")
3030 # Filtered by IUSE and implicit IUSE.
3031 mydict["USE"] = self.get("PORTAGE_USE", "")
3033 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
3034 # so we have to back it up and restore it.
3035 rootpath = mydict.get("ROOTPATH")
3037 mydict["PORTAGE_ROOTPATH"] = rootpath
3041 def thirdpartymirrors(self):
3042 if getattr(self, "_thirdpartymirrors", None) is None:
3043 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3044 for x in self["PORTDIR_OVERLAY"].split():
3045 profileroots.insert(0, os.path.join(x, "profiles"))
3046 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3047 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3048 return self._thirdpartymirrors
3051 return flatten([[myarch, "~" + myarch] \
3052 for myarch in self["PORTAGE_ARCHLIST"].split()])
3054 def selinux_enabled(self):
3055 if getattr(self, "_selinux_enabled", None) is None:
3056 self._selinux_enabled = 0
3057 if "selinux" in self["USE"].split():
3058 if "selinux" in globals():
3059 if selinux.is_selinux_enabled() == 1:
3060 self._selinux_enabled = 1
3062 self._selinux_enabled = 0
3064 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
3066 self._selinux_enabled = 0
3067 if self._selinux_enabled == 0:
3069 del sys.modules["selinux"]
3072 return self._selinux_enabled
3074 if sys.hexversion >= 0x3000000:
3078 def _shell_quote(s):
3080 Quote a string in double-quotes and use backslashes to
3081 escape any backslashes, double-quotes, dollar signs, or
3082 backquotes in the string.
3084 for letter in "\\\"$`":
3086 s = s.replace(letter, "\\" + letter)
3089 # In some cases, openpty can be slow when it fails. Therefore,
3090 # stop trying to use it after the first failure.
3091 _disable_openpty = False
3093 def _create_pty_or_pipe(copy_term_size=None):
3095 Try to create a pty and if then fails then create a normal
3098 @param copy_term_size: If a tty file descriptor is given
3099 then the term size will be copied to the pty.
3100 @type copy_term_size: int
3102 @returns: A tuple of (is_pty, master_fd, slave_fd) where
3103 is_pty is True if a pty was successfully allocated, and
3104 False if a normal pipe was allocated.
3109 global _disable_openpty
3110 if _disable_openpty:
3111 master_fd, slave_fd = os.pipe()
3113 from pty import openpty
3115 master_fd, slave_fd = openpty()
3117 except EnvironmentError, e:
3118 _disable_openpty = True
3119 writemsg("openpty failed: '%s'\n" % str(e),
3122 master_fd, slave_fd = os.pipe()
3125 # Disable post-processing of output since otherwise weird
3126 # things like \n -> \r\n transformations may occur.
3128 mode = termios.tcgetattr(slave_fd)
3129 mode[1] &= ~termios.OPOST
3130 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3133 copy_term_size is not None and \
3134 os.isatty(copy_term_size):
3135 from portage.output import get_term_size, set_term_size
3136 rows, columns = get_term_size()
3137 set_term_size(rows, columns, slave_fd)
3139 return (got_pty, master_fd, slave_fd)
3141 # XXX This would be to replace getstatusoutput completely.
3142 # XXX Issue: cannot block execution. Deadlock condition.
3143 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3145 Spawn a subprocess with extra portage-specific options.
3148 Sandbox: Sandbox means the spawned process will be limited in its ability t
3149 read and write files (normally this means it is restricted to ${IMAGE}/)
3150 SElinux Sandbox: Enables sandboxing on SElinux
3151 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3154 Notes: os.system cannot be used because it messes with signal handling. Instead we
3155 use the portage.process spawn* family of functions.
3157 This function waits for the process to terminate.
3159 @param mystring: Command to run
3160 @type mystring: String
3161 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3162 @type mysettings: Dictionary or config instance
3163 @param debug: Ignored
3164 @type debug: Boolean
3165 @param free: Enable sandboxing for this process
3167 @param droppriv: Drop to portage:portage when running this command
3168 @type droppriv: Boolean
3169 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3170 @type sesandbox: Boolean
3171 @param fakeroot: Run this command with faked root privileges
3172 @type fakeroot: Boolean
3173 @param keywords: Extra options encoded as a dict, to be passed to spawn
3174 @type keywords: Dictionary
3177 1. The return code of the spawned process.
3180 if isinstance(mysettings, dict):
3182 keywords["opt_name"]="[ %s ]" % "portage"
3184 check_config_instance(mysettings)
3185 env=mysettings.environ()
3186 if mysettings.mycpv is not None:
3187 keywords["opt_name"] = "[%s]" % mysettings.mycpv
3189 keywords["opt_name"] = "[%s/%s]" % \
3190 (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
3192 fd_pipes = keywords.get("fd_pipes")
3193 if fd_pipes is None:
3195 0:sys.stdin.fileno(),
3196 1:sys.stdout.fileno(),
3197 2:sys.stderr.fileno(),
3199 # In some cases the above print statements don't flush stdout, so
3200 # it needs to be flushed before allowing a child process to use it
3201 # so that output always shows in the correct order.
3202 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3203 for fd in fd_pipes.itervalues():
3204 if fd in stdout_filenos:
3209 # The default policy for the sesandbox domain only allows entry (via exec)
3210 # from shells and from binaries that belong to portage (the number of entry
3211 # points is minimized). The "tee" binary is not among the allowed entry
3212 # points, so it is spawned outside of the sesandbox domain and reads from a
3213 # pseudo-terminal that connects two domains.
3214 logfile = keywords.get("logfile")
3218 fd_pipes_orig = None
3221 del keywords["logfile"]
3222 if 1 not in fd_pipes or 2 not in fd_pipes:
3223 raise ValueError(fd_pipes)
3225 fd_pipes.setdefault(0, sys.stdin.fileno())
3226 fd_pipes_orig = fd_pipes.copy()
3228 got_pty, master_fd, slave_fd = \
3229 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3231 # We must set non-blocking mode before we close the slave_fd
3232 # since otherwise the fcntl call can fail on FreeBSD (the child
3233 # process might have already exited and closed slave_fd so we
3234 # have to keep it open in order to avoid FreeBSD potentially
3235 # generating an EAGAIN exception).
3237 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3238 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3240 fd_pipes[0] = fd_pipes_orig[0]
3241 fd_pipes[1] = slave_fd
3242 fd_pipes[2] = slave_fd
3243 keywords["fd_pipes"] = fd_pipes
3245 features = mysettings.features
3246 # TODO: Enable fakeroot to be used together with droppriv. The
3247 # fake ownership/permissions will have to be converted to real
3248 # permissions in the merge phase.
3249 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3250 if droppriv and not uid and portage_gid and portage_uid:
3251 keywords.update({"uid":portage_uid,"gid":portage_gid,
3252 "groups":userpriv_groups,"umask":002})
3254 free=((droppriv and "usersandbox" not in features) or \
3255 (not droppriv and "sandbox" not in features and \
3256 "usersandbox" not in features and not fakeroot))
3258 if free or "SANDBOX_ACTIVE" in os.environ:
3259 keywords["opt_name"] += " bash"
3260 spawn_func = portage.process.spawn_bash
3262 keywords["opt_name"] += " fakeroot"
3263 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3264 spawn_func = portage.process.spawn_fakeroot
3266 keywords["opt_name"] += " sandbox"
3267 spawn_func = portage.process.spawn_sandbox
3270 con = selinux.getcontext()
3271 con = con.replace(mysettings["PORTAGE_T"],
3272 mysettings["PORTAGE_SANDBOX_T"])
3273 selinux.setexec(con)
3275 returnpid = keywords.get("returnpid")
3276 keywords["returnpid"] = True
3278 mypids.extend(spawn_func(mystring, env=env, **keywords))
3283 selinux.setexec(None)
3289 log_file = open(logfile, mode='ab')
3290 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
3291 master_file = os.fdopen(master_fd, 'rb')
3292 iwtd = [master_file]
3295 import array, select
3299 events = select.select(iwtd, owtd, ewtd)
3301 # Use non-blocking mode to prevent read
3302 # calls from blocking indefinitely.
3303 buf = array.array('B')
3305 buf.fromfile(f, buffsize)
3311 if f is master_file:
3312 buf.tofile(stdout_file)
3314 buf.tofile(log_file)
3320 retval = os.waitpid(pid, 0)[1]
3321 portage.process.spawned_pids.remove(pid)
3322 if retval != os.EX_OK:
3324 return (retval & 0xff) << 8
3328 _userpriv_spawn_kwargs = (
3329 ("uid", portage_uid),
3330 ("gid", portage_gid),
3331 ("groups", userpriv_groups),
3335 def _spawn_fetch(settings, args, **kwargs):
3337 Spawn a process with appropriate settings for fetching, including
3338 userfetch and selinux support.
3341 global _userpriv_spawn_kwargs
3343 # Redirect all output to stdout since some fetchers like
3344 # wget pollute stderr (if portage detects a problem then it
3345 # can send it's own message to stderr).
3346 if "fd_pipes" not in kwargs:
3348 kwargs["fd_pipes"] = {
3349 0 : sys.stdin.fileno(),
3350 1 : sys.stdout.fileno(),
3351 2 : sys.stdout.fileno(),
3354 if "userfetch" in settings.features and \
3355 os.getuid() == 0 and portage_gid and portage_uid:
3356 kwargs.update(_userpriv_spawn_kwargs)
3360 if settings.selinux_enabled():
3361 con = selinux.getcontext()
3362 con = con.replace(settings["PORTAGE_T"], settings["PORTAGE_FETCH_T"])
3363 selinux.setexec(con)
3364 # bash is an allowed entrypoint, while most binaries are not
3365 if args[0] != BASH_BINARY:
3366 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
3368 rval = portage.process.spawn(args,
3369 env=dict(settings.iteritems()), **kwargs)
3372 if settings.selinux_enabled():
3373 selinux.setexec(None)
3377 _userpriv_test_write_file_cache = {}
3378 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
3379 "rm -f %(file_path)s ; exit $rval"
3381 def _userpriv_test_write_file(settings, file_path):
3383 Drop privileges and try to open a file for writing. The file may or
3384 may not exist, and the parent directory is assumed to exist. The file
3385 is removed before returning.
3387 @param settings: A config instance which is passed to _spawn_fetch()
3388 @param file_path: A file path to open and write.
3389 @return: True if write succeeds, False otherwise.
3392 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
3393 rval = _userpriv_test_write_file_cache.get(file_path)
3394 if rval is not None:
3397 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
3398 {"file_path" : _shell_quote(file_path)}]
3400 returncode = _spawn_fetch(settings, args)
3402 rval = returncode == os.EX_OK
3403 _userpriv_test_write_file_cache[file_path] = rval
3406 def _checksum_failure_temp_file(distdir, basename):
3408 First try to find a duplicate temp file with the same checksum and return
3409 that filename if available. Otherwise, use mkstemp to create a new unique
3410 filename._checksum_failure_.$RANDOM, rename the given file, and return the
3411 new filename. In any case, filename will be renamed or removed before this
3412 function returns a temp filename.
3415 filename = os.path.join(distdir, basename)
3416 size = os.stat(filename).st_size
3418 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
3419 for temp_filename in os.listdir(distdir):
3420 if not tempfile_re.match(temp_filename):
3422 temp_filename = os.path.join(distdir, temp_filename)
3424 if size != os.stat(temp_filename).st_size:
3429 temp_checksum = portage.checksum.perform_md5(temp_filename)
3430 except portage.exception.FileNotFound:
3431 # Apparently the temp file disappeared. Let it go.
3433 if checksum is None:
3434 checksum = portage.checksum.perform_md5(filename)
3435 if checksum == temp_checksum:
3437 return temp_filename
3439 from tempfile import mkstemp
3440 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
3442 os.rename(filename, temp_filename)
3443 return temp_filename
3445 def _check_digests(filename, digests, show_errors=1):
3447 Check digests and display a message if an error occurs.
3448 @return True if all digests match, False otherwise.
3450 verified_ok, reason = portage.checksum.verify_all(filename, digests)
3453 writemsg("!!! Previously fetched" + \
3454 " file: '%s'\n" % filename, noiselevel=-1)
3455 writemsg("!!! Reason: %s\n" % reason[0],
3457 writemsg(("!!! Got: %s\n" + \
3458 "!!! Expected: %s\n") % \
3459 (reason[1], reason[2]), noiselevel=-1)
3463 def _check_distfile(filename, digests, eout, show_errors=1):
3465 @return a tuple of (match, stat_obj) where match is True if filename
3466 matches all given digests (if any) and stat_obj is a stat result, or
3467 None if the file does not exist.
3471 size = digests.get("size")
3472 if size is not None and len(digests) == 1:
3476 st = os.stat(filename)
3478 return (False, None)
3479 if size is not None and size != st.st_size:
3482 if size is not None:
3483 eout.ebegin("%s %s ;-)" % (os.path.basename(filename), "size"))
3485 elif st.st_size == 0:
3486 # Zero-byte distfiles are always invalid.
3489 if _check_digests(filename, digests, show_errors=show_errors):
3490 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
3491 " ".join(sorted(digests))))
3497 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
3499 _size_suffix_map = {
3511 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
3512 "fetch files. Will use digest file if available."
3517 features = mysettings.features
3518 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
3520 from portage.data import secpass
3521 userfetch = secpass >= 2 and "userfetch" in features
3522 userpriv = secpass >= 2 and "userpriv" in features
3524 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
3525 if "mirror" in restrict or \
3526 "nomirror" in restrict:
3527 if ("mirror" in features) and ("lmirror" not in features):
3528 # lmirror should allow you to bypass mirror restrictions.
3529 # XXX: This is not a good thing, and is temporary at best.
3530 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
3533 # Generally, downloading the same file repeatedly from
3534 # every single available mirror is a waste of bandwidth
3535 # and time, so there needs to be a cap.
3536 checksum_failure_max_tries = 5
3537 v = checksum_failure_max_tries
3539 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
3540 checksum_failure_max_tries))
3541 except (ValueError, OverflowError):
3542 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3543 " contains non-integer value: '%s'\n" % \
3544 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
3545 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3546 "default value: %s\n" % checksum_failure_max_tries,
3548 v = checksum_failure_max_tries
3550 writemsg("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" + \
3551 " contains value less than 1: '%s'\n" % v, noiselevel=-1)
3552 writemsg("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " + \
3553 "default value: %s\n" % checksum_failure_max_tries,
3555 v = checksum_failure_max_tries
3556 checksum_failure_max_tries = v
3559 fetch_resume_size_default = "350K"
3560 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
3561 if fetch_resume_size is not None:
3562 fetch_resume_size = "".join(fetch_resume_size.split())
3563 if not fetch_resume_size:
3564 # If it's undefined or empty, silently use the default.
3565 fetch_resume_size = fetch_resume_size_default
3566 match = _fetch_resume_size_re.match(fetch_resume_size)
3567 if match is None or \
3568 (match.group(2).upper() not in _size_suffix_map):
3569 writemsg("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" + \
3570 " contains an unrecognized format: '%s'\n" % \
3571 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
3572 writemsg("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " + \
3573 "default value: %s\n" % fetch_resume_size_default,
3575 fetch_resume_size = None
3576 if fetch_resume_size is None:
3577 fetch_resume_size = fetch_resume_size_default
3578 match = _fetch_resume_size_re.match(fetch_resume_size)
3579 fetch_resume_size = int(match.group(1)) * \
3580 2 ** _size_suffix_map[match.group(2).upper()]
3582 # Behave like the package has RESTRICT="primaryuri" after a
3583 # couple of checksum failures, to increase the probablility
3584 # of success before checksum_failure_max_tries is reached.
3585 checksum_failure_primaryuri = 2
3586 thirdpartymirrors = mysettings.thirdpartymirrors()
3588 # In the background parallel-fetch process, it's safe to skip checksum
3589 # verification of pre-existing files in $DISTDIR that have the correct
3590 # file size. The parent process will verify their checksums prior to
3593 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
3594 if parallel_fetchonly:
3597 check_config_instance(mysettings)
3599 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
3600 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
3604 if listonly or ("distlocks" not in features):
3608 if "skiprocheck" in features:
3611 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
3613 writemsg(colorize("BAD",
3614 "!!! For fetching to a read-only filesystem, " + \
3615 "locking should be turned off.\n"), noiselevel=-1)
3616 writemsg("!!! This can be done by adding -distlocks to " + \
3617 "FEATURES in /etc/make.conf\n", noiselevel=-1)
3620 # local mirrors are always added
3621 if "local" in custommirrors:
3622 mymirrors += custommirrors["local"]
3624 if "nomirror" in restrict or \
3625 "mirror" in restrict:
3626 # We don't add any mirrors.
3630 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
3632 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
3633 pkgdir = mysettings.get("O")
3634 if not (pkgdir is None or skip_manifest):
3635 mydigests = Manifest(
3636 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
3638 # no digests because fetch was not called for a specific package
3642 ro_distdirs = [x for x in \
3643 shlex.split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
3644 if os.path.isdir(x)]
3647 for x in range(len(mymirrors)-1,-1,-1):
3648 if mymirrors[x] and mymirrors[x][0]=='/':
3649 fsmirrors += [mymirrors[x]]
3652 restrict_fetch = "fetch" in restrict
3653 custom_local_mirrors = custommirrors.get("local", [])
3655 # With fetch restriction, a normal uri may only be fetched from
3656 # custom local mirrors (if available). A mirror:// uri may also
3657 # be fetched from specific mirrors (effectively overriding fetch
3658 # restriction, but only for specific mirrors).
3659 locations = custom_local_mirrors
3661 locations = mymirrors
3663 file_uri_tuples = []
3664 if isinstance(myuris, dict):
3665 for myfile, uri_set in myuris.iteritems():
3666 for myuri in uri_set:
3667 file_uri_tuples.append((myfile, myuri))
3669 for myuri in myuris:
3670 file_uri_tuples.append((os.path.basename(myuri), myuri))
3673 primaryuri_indexes={}
3674 primaryuri_dict = {}
3675 thirdpartymirror_uris = {}
3676 for myfile, myuri in file_uri_tuples:
3677 if myfile not in filedict:
3679 for y in range(0,len(locations)):
3680 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
3681 if myuri[:9]=="mirror://":
3682 eidx = myuri.find("/", 9)
3684 mirrorname = myuri[9:eidx]
3685 path = myuri[eidx+1:]
3687 # Try user-defined mirrors first
3688 if mirrorname in custommirrors:
3689 for cmirr in custommirrors[mirrorname]:
3690 filedict[myfile].append(
3691 cmirr.rstrip("/") + "/" + path)
3693 # now try the official mirrors
3694 if mirrorname in thirdpartymirrors:
3695 shuffle(thirdpartymirrors[mirrorname])
3697 uris = [locmirr.rstrip("/") + "/" + path \
3698 for locmirr in thirdpartymirrors[mirrorname]]
3699 filedict[myfile].extend(uris)
3700 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
3702 if not filedict[myfile]:
3703 writemsg("No known mirror by the name: %s\n" % (mirrorname))
3705 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
3706 writemsg(" %s\n" % (myuri), noiselevel=-1)
3709 # Only fetch from specific mirrors is allowed.
3711 if "primaryuri" in restrict:
3712 # Use the source site first.
3713 if myfile in primaryuri_indexes:
3714 primaryuri_indexes[myfile] += 1
3716 primaryuri_indexes[myfile] = 0
3717 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
3719 filedict[myfile].append(myuri)
3720 primaryuris = primaryuri_dict.get(myfile)
3721 if primaryuris is None:
3723 primaryuri_dict[myfile] = primaryuris
3724 primaryuris.append(myuri)
3726 # Prefer thirdpartymirrors over normal mirrors in cases when
3727 # the file does not yet exist on the normal mirrors.
3728 for myfile, uris in thirdpartymirror_uris.iteritems():
3729 primaryuri_dict.setdefault(myfile, []).extend(uris)
3736 if can_fetch and not fetch_to_ro:
3737 global _userpriv_test_write_file_cache
3741 dir_gid = portage_gid
3742 if "FAKED_MODE" in mysettings:
3743 # When inside fakeroot, directories with portage's gid appear
3744 # to have root's gid. Therefore, use root's gid instead of
3745 # portage's gid to avoid spurrious permissions adjustments
3746 # when inside fakeroot.
3749 if "distlocks" in features:
3750 distdir_dirs.append(".locks")
3753 for x in distdir_dirs:
3754 mydir = os.path.join(mysettings["DISTDIR"], x)
3755 write_test_file = os.path.join(
3756 mydir, ".__portage_test_write__")
3763 if st is not None and stat.S_ISDIR(st.st_mode):
3764 if not (userfetch or userpriv):
3766 if _userpriv_test_write_file(mysettings, write_test_file):
3769 _userpriv_test_write_file_cache.pop(write_test_file, None)
3770 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
3772 # The directory has just been created
3773 # and therefore it must be empty.
3775 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3778 raise # bail out on the first error that occurs during recursion
3779 if not apply_recursive_permissions(mydir,
3780 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
3781 filemode=filemode, filemask=modemask, onerror=onerror):
3782 raise portage.exception.OperationNotPermitted(
3783 "Failed to apply recursive permissions for the portage group.")
3784 except portage.exception.PortageException, e:
3785 if not os.path.isdir(mysettings["DISTDIR"]):
3786 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3787 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
3788 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
3791 not fetch_to_ro and \
3792 not os.access(mysettings["DISTDIR"], os.W_OK):
3793 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
3797 if can_fetch and use_locks and locks_in_subdir:
3798 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
3799 if not os.access(distlocks_subdir, os.W_OK):
3800 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
3803 del distlocks_subdir
3805 distdir_writable = can_fetch and not fetch_to_ro
3806 failed_files = set()
3807 restrict_fetch_msg = False
3809 for myfile in filedict:
3813 1 partially downloaded
3814 2 completely downloaded
3818 orig_digests = mydigests.get(myfile, {})
3819 size = orig_digests.get("size")
3821 # Zero-byte distfiles are always invalid, so discard their digests.
3822 del mydigests[myfile]
3823 orig_digests.clear()
3825 pruned_digests = orig_digests
3826 if parallel_fetchonly:
3828 if size is not None:
3829 pruned_digests["size"] = size
3831 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
3835 writemsg_stdout("\n", noiselevel=-1)
3837 # check if there is enough space in DISTDIR to completely store myfile
3838 # overestimate the filesize so we aren't bitten by FS overhead
3839 if size is not None and hasattr(os, "statvfs"):
3840 vfs_stat = os.statvfs(mysettings["DISTDIR"])
3842 mysize = os.stat(myfile_path).st_size
3844 if e.errno != errno.ENOENT:
3848 if (size - mysize + vfs_stat.f_bsize) >= \
3849 (vfs_stat.f_bsize * vfs_stat.f_bavail):
3850 writemsg("!!! Insufficient space to store %s in %s\n" % (myfile, mysettings["DISTDIR"]), noiselevel=-1)
3853 if distdir_writable and use_locks:
3856 lock_file = os.path.join(mysettings["DISTDIR"],
3857 locks_in_subdir, myfile)
3859 lock_file = myfile_path
3863 lock_kwargs["flags"] = os.O_NONBLOCK
3866 file_lock = portage.locks.lockfile(myfile_path,
3867 wantnewlockfile=1, **lock_kwargs)
3868 except portage.exception.TryAgain:
3869 writemsg((">>> File '%s' is already locked by " + \
3870 "another fetcher. Continuing...\n") % myfile,
3876 eout = portage.output.EOutput()
3877 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
3878 match, mystat = _check_distfile(
3879 myfile_path, pruned_digests, eout)
3881 if distdir_writable:
3883 apply_secpass_permissions(myfile_path,
3884 gid=portage_gid, mode=0664, mask=02,
3886 except portage.exception.PortageException, e:
3887 if not os.access(myfile_path, os.R_OK):
3888 writemsg("!!! Failed to adjust permissions:" + \
3889 " %s\n" % str(e), noiselevel=-1)
3893 if distdir_writable and mystat is None:
3894 # Remove broken symlinks if necessary.
3896 os.unlink(myfile_path)
3900 if mystat is not None:
3901 if stat.S_ISDIR(mystat.st_mode):
3902 portage.util.writemsg_level(
3903 ("!!! Unable to fetch file since " + \
3904 "a directory is in the way: \n" + \
3905 "!!! %s\n") % myfile_path,
3906 level=logging.ERROR, noiselevel=-1)
3909 if mystat.st_size == 0:
3910 if distdir_writable:
3912 os.unlink(myfile_path)
3915 elif distdir_writable:
3916 if mystat.st_size < fetch_resume_size and \
3917 mystat.st_size < size:
3918 # If the file already exists and the size does not
3919 # match the existing digests, it may be that the
3920 # user is attempting to update the digest. In this
3921 # case, the digestgen() function will advise the
3922 # user to use `ebuild --force foo.ebuild manifest`
3923 # in order to force the old digests to be replaced.
3924 # Since the user may want to keep this file, rename
3925 # it instead of deleting it.
3926 writemsg((">>> Renaming distfile with size " + \
3927 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
3928 "ME_MIN_SIZE)\n") % mystat.st_size)
3930 _checksum_failure_temp_file(
3931 mysettings["DISTDIR"], myfile)
3932 writemsg_stdout("Refetching... " + \
3933 "File renamed to '%s'\n\n" % \
3934 temp_filename, noiselevel=-1)
3935 elif mystat.st_size >= size:
3937 _checksum_failure_temp_file(
3938 mysettings["DISTDIR"], myfile)
3939 writemsg_stdout("Refetching... " + \
3940 "File renamed to '%s'\n\n" % \
3941 temp_filename, noiselevel=-1)
3943 if distdir_writable and ro_distdirs:
3944 readonly_file = None
3945 for x in ro_distdirs:
3946 filename = os.path.join(x, myfile)
3947 match, mystat = _check_distfile(
3948 filename, pruned_digests, eout)
3950 readonly_file = filename
3952 if readonly_file is not None:
3954 os.unlink(myfile_path)
3956 if e.errno != errno.ENOENT:
3959 os.symlink(readonly_file, myfile_path)
3962 if fsmirrors and not os.path.exists(myfile_path) and has_space:
3963 for mydir in fsmirrors:
3964 mirror_file = os.path.join(mydir, myfile)
3966 shutil.copyfile(mirror_file, myfile_path)
3967 writemsg(_("Local mirror has file:" + \
3968 " %(file)s\n" % {"file":myfile}))
3970 except (IOError, OSError), e:
3971 if e.errno != errno.ENOENT:
3976 mystat = os.stat(myfile_path)
3978 if e.errno != errno.ENOENT:
3983 apply_secpass_permissions(
3984 myfile_path, gid=portage_gid, mode=0664, mask=02,
3986 except portage.exception.PortageException, e:
3987 if not os.access(myfile_path, os.R_OK):
3988 writemsg("!!! Failed to adjust permissions:" + \
3989 " %s\n" % str(e), noiselevel=-1)
3991 # If the file is empty then it's obviously invalid. Remove
3992 # the empty file and try to download if possible.
3993 if mystat.st_size == 0:
3994 if distdir_writable:
3996 os.unlink(myfile_path)
3997 except EnvironmentError:
3999 elif myfile not in mydigests:
4000 # We don't have a digest, but the file exists. We must
4001 # assume that it is fully downloaded.
4004 if mystat.st_size < mydigests[myfile]["size"] and \
4006 fetched = 1 # Try to resume this download.
4007 elif parallel_fetchonly and \
4008 mystat.st_size == mydigests[myfile]["size"]:
4009 eout = portage.output.EOutput()
4011 mysettings.get("PORTAGE_QUIET") == "1"
4013 "%s size ;-)" % (myfile, ))
4017 verified_ok, reason = portage.checksum.verify_all(
4018 myfile_path, mydigests[myfile])
4020 writemsg("!!! Previously fetched" + \
4021 " file: '%s'\n" % myfile, noiselevel=-1)
4022 writemsg("!!! Reason: %s\n" % reason[0],
4024 writemsg(("!!! Got: %s\n" + \
4025 "!!! Expected: %s\n") % \
4026 (reason[1], reason[2]), noiselevel=-1)
4027 if reason[0] == "Insufficient data for checksum verification":
4029 if distdir_writable:
4031 _checksum_failure_temp_file(
4032 mysettings["DISTDIR"], myfile)
4033 writemsg_stdout("Refetching... " + \
4034 "File renamed to '%s'\n\n" % \
4035 temp_filename, noiselevel=-1)
4037 eout = portage.output.EOutput()
4039 mysettings.get("PORTAGE_QUIET", None) == "1"
4040 digests = mydigests.get(myfile)
4042 digests = digests.keys()
4045 "%s %s ;-)" % (myfile, " ".join(digests)))
4047 continue # fetch any remaining files
4049 # Create a reversed list since that is optimal for list.pop().
4050 uri_list = filedict[myfile][:]
4052 checksum_failure_count = 0
4053 tried_locations = set()
4055 loc = uri_list.pop()
4056 # Eliminate duplicates here in case we've switched to
4057 # "primaryuri" mode on the fly due to a checksum failure.
4058 if loc in tried_locations:
4060 tried_locations.add(loc)
4062 writemsg_stdout(loc+" ", noiselevel=-1)
4064 # allow different fetchcommands per protocol
4065 protocol = loc[0:loc.find("://")]
4067 missing_file_param = False
4068 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
4069 fetchcommand = mysettings.get(fetchcommand_var)
4070 if fetchcommand is None:
4071 fetchcommand_var = "FETCHCOMMAND"
4072 fetchcommand = mysettings.get(fetchcommand_var)
4073 if fetchcommand is None:
4074 portage.util.writemsg_level(
4075 ("!!! %s is unset. It should " + \
4076 "have been defined in\n!!! %s/make.globals.\n") \
4077 % (fetchcommand_var,
4078 portage.const.GLOBAL_CONFIG_PATH),
4079 level=logging.ERROR, noiselevel=-1)
4081 if "${FILE}" not in fetchcommand:
4082 portage.util.writemsg_level(
4083 ("!!! %s does not contain the required ${FILE}" + \
4084 " parameter.\n") % fetchcommand_var,
4085 level=logging.ERROR, noiselevel=-1)
4086 missing_file_param = True
4088 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
4089 resumecommand = mysettings.get(resumecommand_var)
4090 if resumecommand is None:
4091 resumecommand_var = "RESUMECOMMAND"
4092 resumecommand = mysettings.get(resumecommand_var)
4093 if resumecommand is None:
4094 portage.util.writemsg_level(
4095 ("!!! %s is unset. It should " + \
4096 "have been defined in\n!!! %s/make.globals.\n") \
4097 % (resumecommand_var,
4098 portage.const.GLOBAL_CONFIG_PATH),
4099 level=logging.ERROR, noiselevel=-1)
4101 if "${FILE}" not in resumecommand:
4102 portage.util.writemsg_level(
4103 ("!!! %s does not contain the required ${FILE}" + \
4104 " parameter.\n") % resumecommand_var,
4105 level=logging.ERROR, noiselevel=-1)
4106 missing_file_param = True
4108 if missing_file_param:
4109 portage.util.writemsg_level(
4110 "!!! Refer to the make.conf(5) man page for " + \
4111 "information about how to\n!!! correctly specify " + \
4112 "FETCHCOMMAND and RESUMECOMMAND.\n",
4113 level=logging.ERROR, noiselevel=-1)
4114 if myfile != os.path.basename(loc):
4120 mysize = os.stat(myfile_path).st_size
4122 if e.errno != errno.ENOENT:
4128 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
4130 elif size is None or size > mysize:
4131 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
4134 writemsg(("!!! File %s is incorrect size, " + \
4135 "but unable to retry.\n") % myfile, noiselevel=-1)
4140 if fetched != 2 and has_space:
4141 #we either need to resume or start the download
4144 mystat = os.stat(myfile_path)
4146 if e.errno != errno.ENOENT:
4151 if mystat.st_size < fetch_resume_size:
4152 writemsg((">>> Deleting distfile with size " + \
4153 "%d (smaller than " "PORTAGE_FETCH_RESU" + \
4154 "ME_MIN_SIZE)\n") % mystat.st_size)
4156 os.unlink(myfile_path)
4158 if e.errno != errno.ENOENT:
4164 writemsg(">>> Resuming download...\n")
4165 locfetch=resumecommand
4166 command_var = resumecommand_var
4169 locfetch=fetchcommand
4170 command_var = fetchcommand_var
4171 writemsg_stdout(">>> Downloading '%s'\n" % \
4172 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
4174 "DISTDIR": mysettings["DISTDIR"],
4179 myfetch = shlex.split(locfetch)
4180 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
4184 myret = _spawn_fetch(mysettings, myfetch)
4188 apply_secpass_permissions(myfile_path,
4189 gid=portage_gid, mode=0664, mask=02)
4190 except portage.exception.FileNotFound, e:
4192 except portage.exception.PortageException, e:
4193 if not os.access(myfile_path, os.R_OK):
4194 writemsg("!!! Failed to adjust permissions:" + \
4195 " %s\n" % str(e), noiselevel=-1)
4197 # If the file is empty then it's obviously invalid. Don't
4198 # trust the return value from the fetcher. Remove the
4199 # empty file and try to download again.
4201 if os.stat(myfile_path).st_size == 0:
4202 os.unlink(myfile_path)
4205 except EnvironmentError:
4208 if mydigests is not None and myfile in mydigests:
4210 mystat = os.stat(myfile_path)
4212 if e.errno != errno.ENOENT:
4218 if stat.S_ISDIR(mystat.st_mode):
4219 # This can happen if FETCHCOMMAND erroneously
4220 # contains wget's -P option where it should
4222 portage.util.writemsg_level(
4223 ("!!! The command specified in the " + \
4224 "%s variable appears to have\n!!! " + \
4225 "created a directory instead of a " + \
4226 "normal file.\n") % command_var,
4227 level=logging.ERROR, noiselevel=-1)
4228 portage.util.writemsg_level(
4229 "!!! Refer to the make.conf(5) " + \
4230 "man page for information about how " + \
4231 "to\n!!! correctly specify " + \
4232 "FETCHCOMMAND and RESUMECOMMAND.\n",
4233 level=logging.ERROR, noiselevel=-1)
4236 # no exception? file exists. let digestcheck() report
4237 # an appropriately for size or checksum errors
4239 # If the fetcher reported success and the file is
4240 # too small, it's probably because the digest is
4241 # bad (upstream changed the distfile). In this
4242 # case we don't want to attempt to resume. Show a
4243 # digest verification failure to that the user gets
4244 # a clue about what just happened.
4245 if myret != os.EX_OK and \
4246 mystat.st_size < mydigests[myfile]["size"]:
4247 # Fetch failed... Try the next one... Kill 404 files though.
4248 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4249 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4250 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
4252 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4253 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
4256 except (IOError, OSError):
4261 # File is the correct size--check the checksums for the fetched
4262 # file NOW, for those users who don't have a stable/continuous
4263 # net connection. This way we have a chance to try to download
4264 # from another mirror...
4265 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4268 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
4270 writemsg("!!! Reason: "+reason[0]+"\n",
4272 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
4273 (reason[1], reason[2]), noiselevel=-1)
4274 if reason[0] == "Insufficient data for checksum verification":
4277 _checksum_failure_temp_file(
4278 mysettings["DISTDIR"], myfile)
4279 writemsg_stdout("Refetching... " + \
4280 "File renamed to '%s'\n\n" % \
4281 temp_filename, noiselevel=-1)
4283 checksum_failure_count += 1
4284 if checksum_failure_count == \
4285 checksum_failure_primaryuri:
4286 # Switch to "primaryuri" mode in order
4287 # to increase the probablility of
4290 primaryuri_dict.get(myfile)
4293 reversed(primaryuris))
4294 if checksum_failure_count >= \
4295 checksum_failure_max_tries:
4298 eout = portage.output.EOutput()
4299 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4300 digests = mydigests.get(myfile)
4302 eout.ebegin("%s %s ;-)" % \
4303 (myfile, " ".join(sorted(digests))))
4311 elif mydigests!=None:
4312 writemsg("No digest file available and download failed.\n\n",
4315 if use_locks and file_lock:
4316 portage.locks.unlockfile(file_lock)
4319 writemsg_stdout("\n", noiselevel=-1)
4321 if restrict_fetch and not restrict_fetch_msg:
4322 restrict_fetch_msg = True
4323 msg = ("\n!!! %s/%s" + \
4324 " has fetch restriction turned on.\n" + \
4325 "!!! This probably means that this " + \
4326 "ebuild's files must be downloaded\n" + \
4327 "!!! manually. See the comments in" + \
4328 " the ebuild for more information.\n\n") % \
4329 (mysettings["CATEGORY"], mysettings["PF"])
4330 portage.util.writemsg_level(msg,
4331 level=logging.ERROR, noiselevel=-1)
4332 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
4333 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
4335 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
4336 private_tmpdir = None
4337 if not parallel_fetchonly and not have_builddir:
4338 # When called by digestgen(), it's normal that
4339 # PORTAGE_BUILDDIR doesn't exist. It's helpful
4340 # to show the pkg_nofetch output though, so go
4341 # ahead and create a temporary PORTAGE_BUILDDIR.
4342 # Use a temporary config instance to avoid altering
4343 # the state of the one that's been passed in.
4344 mysettings = config(clone=mysettings)
4345 from tempfile import mkdtemp
4347 private_tmpdir = mkdtemp("", "._portage_fetch_.",
4350 if e.errno != portage.exception.PermissionDenied.errno:
4352 raise portage.exception.PermissionDenied(global_tmpdir)
4353 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
4354 mysettings.backup_changes("PORTAGE_TMPDIR")
4355 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4356 portage.doebuild_environment(mysettings["EBUILD"], "fetch",
4357 mysettings["ROOT"], mysettings, debug, 1, None)
4358 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
4359 have_builddir = True
4361 if not parallel_fetchonly and have_builddir:
4362 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
4363 # ensuring sane $PWD (bug #239560) and storing elog
4364 # messages. Therefore, calling code needs to ensure that
4365 # PORTAGE_BUILDDIR is already clean and locked here.
4367 # All the pkg_nofetch goes to stderr since it's considered
4368 # to be an error message.
4370 0 : sys.stdin.fileno(),
4371 1 : sys.stderr.fileno(),
4372 2 : sys.stderr.fileno(),
4375 ebuild_phase = mysettings.get("EBUILD_PHASE")
4377 mysettings["EBUILD_PHASE"] = "nofetch"
4378 spawn(_shell_quote(EBUILD_SH_BINARY) + \
4379 " nofetch", mysettings, fd_pipes=fd_pipes)
4381 if ebuild_phase is None:
4382 mysettings.pop("EBUILD_PHASE", None)
4384 mysettings["EBUILD_PHASE"] = ebuild_phase
4385 if private_tmpdir is not None:
4386 shutil.rmtree(private_tmpdir)
4388 elif restrict_fetch:
4392 elif not filedict[myfile]:
4393 writemsg("Warning: No mirrors available for file" + \
4394 " '%s'\n" % (myfile), noiselevel=-1)
4396 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
4402 failed_files.add(myfile)
4409 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
4411 Generates a digest file if missing. Assumes all files are available.
4412 DEPRECATED: this now only is a compability wrapper for
4413 portage.manifest.Manifest()
4414 NOTE: manifestonly and overwrite are useless with manifest2 and
4415 are therefore ignored."""
4416 if myportdb is None:
4417 writemsg("Warning: myportdb not specified to digestgen\n")
4420 global _doebuild_manifest_exempt_depend
4422 _doebuild_manifest_exempt_depend += 1
4424 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
4425 for cpv in fetchlist_dict:
4427 for myfile in fetchlist_dict[cpv]:
4428 distfiles_map.setdefault(myfile, []).append(cpv)
4429 except portage.exception.InvalidDependString, e:
4430 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4433 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
4434 manifest1_compat = False
4435 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
4436 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
4437 # Don't require all hashes since that can trigger excessive
4438 # fetches when sufficient digests already exist. To ease transition
4439 # while Manifest 1 is being removed, only require hashes that will
4440 # exist before and after the transition.
4441 required_hash_types = set()
4442 required_hash_types.add("size")
4443 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
4444 dist_hashes = mf.fhashdict.get("DIST", {})
4446 # To avoid accidental regeneration of digests with the incorrect
4447 # files (such as partially downloaded files), trigger the fetch
4448 # code if the file exists and it's size doesn't match the current
4449 # manifest entry. If there really is a legitimate reason for the
4450 # digest to change, `ebuild --force digest` can be used to avoid
4451 # triggering this code (or else the old digests can be manually
4452 # removed from the Manifest).
4454 for myfile in distfiles_map:
4455 myhashes = dist_hashes.get(myfile)
4458 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4461 if st is None or st.st_size == 0:
4462 missing_files.append(myfile)
4464 size = myhashes.get("size")
4467 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
4469 if e.errno != errno.ENOENT:
4473 missing_files.append(myfile)
4475 if required_hash_types.difference(myhashes):
4476 missing_files.append(myfile)
4479 if st.st_size == 0 or size is not None and size != st.st_size:
4480 missing_files.append(myfile)
4484 mytree = os.path.realpath(os.path.dirname(
4485 os.path.dirname(mysettings["O"])))
4486 fetch_settings = config(clone=mysettings)
4487 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4488 for myfile in missing_files:
4490 for cpv in distfiles_map[myfile]:
4491 myebuild = os.path.join(mysettings["O"],
4492 catsplit(cpv)[1] + ".ebuild")
4493 # for RESTRICT=fetch, mirror, etc...
4494 doebuild_environment(myebuild, "fetch",
4495 mysettings["ROOT"], fetch_settings,
4497 uris.update(myportdb.getFetchMap(
4498 cpv, mytree=mytree)[myfile])
4500 fetch_settings["A"] = myfile # for use by pkg_nofetch()
4503 st = os.stat(os.path.join(
4504 mysettings["DISTDIR"],myfile))
4508 if not fetch({myfile : uris}, fetch_settings):
4509 writemsg(("!!! Fetch failed for %s, can't update " + \
4510 "Manifest\n") % myfile, noiselevel=-1)
4511 if myfile in dist_hashes and \
4512 st is not None and st.st_size > 0:
4513 # stat result is obtained before calling fetch(),
4514 # since fetch may rename the existing file if the
4515 # digest does not match.
4516 writemsg("!!! If you would like to " + \
4517 "forcefully replace the existing " + \
4518 "Manifest entry\n!!! for %s, use the " % \
4519 myfile + "following command:\n" + \
4520 "!!! " + colorize("INFORM",
4521 "ebuild --force %s manifest" % \
4522 os.path.basename(myebuild)) + "\n",
4525 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
4527 mf.create(requiredDistfiles=myarchives,
4528 assumeDistHashesSometimes=True,
4529 assumeDistHashesAlways=(
4530 "assume-digests" in mysettings.features))
4531 except portage.exception.FileNotFound, e:
4532 writemsg(("!!! File %s doesn't exist, can't update " + \
4533 "Manifest\n") % e, noiselevel=-1)
4535 except portage.exception.PortagePackageException, e:
4536 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
4539 mf.write(sign=False)
4540 except portage.exception.PermissionDenied, e:
4541 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
4543 if "assume-digests" not in mysettings.features:
4544 distlist = mf.fhashdict.get("DIST", {}).keys()
4547 for filename in distlist:
4548 if not os.path.exists(
4549 os.path.join(mysettings["DISTDIR"], filename)):
4550 auto_assumed.append(filename)
4552 mytree = os.path.realpath(
4553 os.path.dirname(os.path.dirname(mysettings["O"])))
4554 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
4555 pkgs = myportdb.cp_list(cp, mytree=mytree)
4557 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
4558 str(len(auto_assumed)).rjust(18)) + "\n")
4559 for pkg_key in pkgs:
4560 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
4561 pv = pkg_key.split("/")[1]
4562 for filename in auto_assumed:
4563 if filename in fetchlist:
4565 " %s::%s\n" % (pv, filename))
4568 _doebuild_manifest_exempt_depend -= 1
4570 def digestParseFile(myfilename, mysettings=None):
4571 """(filename) -- Parses a given file for entries matching:
4572 <checksumkey> <checksum_hex_string> <filename> <filesize>
4573 Ignores lines that don't start with a valid checksum identifier
4574 and returns a dict with the filenames as keys and {checksumkey:checksum}
4576 DEPRECATED: this function is now only a compability wrapper for
4577 portage.manifest.Manifest()."""
4579 mysplit = myfilename.split(os.sep)
4580 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
4581 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
4582 elif mysplit[-1] == "Manifest":
4583 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
4585 if mysettings is None:
4587 mysettings = config(clone=settings)
4589 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
4591 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
4592 """Verifies checksums. Assumes all files have been downloaded.
4593 DEPRECATED: this is now only a compability wrapper for
4594 portage.manifest.Manifest()."""
4595 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4597 pkgdir = mysettings["O"]
4598 manifest_path = os.path.join(pkgdir, "Manifest")
4599 if not os.path.exists(manifest_path):
4600 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
4606 mf = Manifest(pkgdir, mysettings["DISTDIR"])
4607 manifest_empty = True
4608 for d in mf.fhashdict.itervalues():
4610 manifest_empty = False
4613 writemsg("!!! Manifest is empty: '%s'\n" % manifest_path,
4619 eout = portage.output.EOutput()
4620 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4622 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
4623 eout.ebegin("checking ebuild checksums ;-)")
4624 mf.checkTypeHashes("EBUILD")
4626 eout.ebegin("checking auxfile checksums ;-)")
4627 mf.checkTypeHashes("AUX")
4629 eout.ebegin("checking miscfile checksums ;-)")
4630 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4633 eout.ebegin("checking %s ;-)" % f)
4634 mf.checkFileHashes(mf.findFile(f), f)
4638 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
4640 except portage.exception.FileNotFound, e:
4642 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
4645 except portage.exception.DigestException, e:
4647 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
4648 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
4649 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
4650 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
4651 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
4653 # Make sure that all of the ebuilds are actually listed in the Manifest.
4654 for f in os.listdir(pkgdir):
4655 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
4656 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4657 os.path.join(pkgdir, f), noiselevel=-1)
4660 """ epatch will just grab all the patches out of a directory, so we have to
4661 make sure there aren't any foreign files that it might grab."""
4662 filesdir = os.path.join(pkgdir, "files")
4663 for parent, dirs, files in os.walk(filesdir):
4665 if d.startswith(".") or d == "CVS":
4668 if f.startswith("."):
4670 f = os.path.join(parent, f)[len(filesdir) + 1:]
4671 file_type = mf.findFile(f)
4672 if file_type != "AUX" and not f.startswith("digest-"):
4673 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
4674 os.path.join(filesdir, f), noiselevel=-1)
4679 # parse actionmap to spawn ebuild with the appropriate args
4680 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
4681 logfile=None, fd_pipes=None, returnpid=False):
4682 if not returnpid and \
4683 (alwaysdep or "noauto" not in mysettings.features):
4684 # process dependency first
4685 if "dep" in actionmap[mydo]:
4686 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
4687 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
4688 fd_pipes=fd_pipes, returnpid=returnpid)
4692 eapi = mysettings["EAPI"]
4694 if mydo == "configure" and eapi in ("0", "1"):
4697 if mydo == "prepare" and eapi in ("0", "1"):
4700 kwargs = actionmap[mydo]["args"]
4701 mysettings["EBUILD_PHASE"] = mydo
4702 _doebuild_exit_status_unlink(
4703 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4706 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
4707 mysettings, debug=debug, logfile=logfile,
4708 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
4710 mysettings["EBUILD_PHASE"] = ""
4714 msg = _doebuild_exit_status_check(mydo, mysettings)
4717 from textwrap import wrap
4718 from portage.elog.messages import eerror
4719 for l in wrap(msg, 72):
4720 eerror(l, phase=mydo, key=mysettings.mycpv)
4722 _post_phase_userpriv_perms(mysettings)
4723 if mydo == "install":
4724 _check_build_log(mysettings)
4725 if phase_retval == os.EX_OK:
4726 _post_src_install_chost_fix(mysettings)
4727 phase_retval = _post_src_install_checks(mysettings)
4729 if mydo == "test" and phase_retval != os.EX_OK and \
4730 "test-fail-continue" in mysettings.features:
4731 phase_retval = os.EX_OK
4735 _post_phase_cmds = {
4739 "install_symlink_html_docs"],
4744 "preinst_selinux_labels",
4745 "preinst_suid_scan",
4749 "postinst_bsdflags"]
4752 def _post_phase_userpriv_perms(mysettings):
4753 if "userpriv" in mysettings.features and secpass >= 2:
4754 """ Privileged phases may have left files that need to be made
4755 writable to a less privileged user."""
4756 apply_recursive_permissions(mysettings["T"],
4757 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
4758 filemode=060, filemask=0)
4760 def _post_src_install_checks(mysettings):
4761 _post_src_install_uid_fix(mysettings)
4762 global _post_phase_cmds
4763 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
4764 if retval != os.EX_OK:
4765 writemsg("!!! install_qa_check failed; exiting.\n",
4769 def _check_build_log(mysettings, out=None):
4771 Search the content of $PORTAGE_LOG_FILE if it exists
4772 and generate the following QA Notices when appropriate:
4774 * Automake "maintainer mode"
4776 * Unrecognized configure options
4778 logfile = mysettings.get("PORTAGE_LOG_FILE")
4783 except EnvironmentError:
4786 am_maintainer_mode = []
4787 bash_command_not_found = []
4788 bash_command_not_found_re = re.compile(
4789 r'(.*): line (\d*): (.*): command not found$')
4790 command_not_found_exclude_re = re.compile(r'/configure: line ')
4791 helper_missing_file = []
4792 helper_missing_file_re = re.compile(
4793 r'^!!! (do|new).*: .* does not exist$')
4795 configure_opts_warn = []
4796 configure_opts_warn_re = re.compile(
4797 r'^configure: WARNING: [Uu]nrecognized options: ')
4798 am_maintainer_mode_re = re.compile(r'/missing --run ')
4799 am_maintainer_mode_exclude_re = \
4800 re.compile(r'/missing --run (autoheader|makeinfo)')
4802 make_jobserver_re = \
4803 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
4808 if am_maintainer_mode_re.search(line) is not None and \
4809 am_maintainer_mode_exclude_re.search(line) is None:
4810 am_maintainer_mode.append(line.rstrip("\n"))
4812 if bash_command_not_found_re.match(line) is not None and \
4813 command_not_found_exclude_re.search(line) is None:
4814 bash_command_not_found.append(line.rstrip("\n"))
4816 if helper_missing_file_re.match(line) is not None:
4817 helper_missing_file.append(line.rstrip("\n"))
4819 if configure_opts_warn_re.match(line) is not None:
4820 configure_opts_warn.append(line.rstrip("\n"))
4822 if make_jobserver_re.match(line) is not None:
4823 make_jobserver.append(line.rstrip("\n"))
4828 from portage.elog.messages import eqawarn
4829 def _eqawarn(lines):
4831 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
4832 from textwrap import wrap
4835 if am_maintainer_mode:
4836 msg = ["QA Notice: Automake \"maintainer mode\" detected:"]
4838 msg.extend("\t" + line for line in am_maintainer_mode)
4841 "If you patch Makefile.am, " + \
4842 "configure.in, or configure.ac then you " + \
4843 "should use autotools.eclass and " + \
4844 "eautomake or eautoreconf. Exceptions " + \
4845 "are limited to system packages " + \
4846 "for which it is impossible to run " + \
4847 "autotools during stage building. " + \
4848 "See http://www.gentoo.org/p" + \
4849 "roj/en/qa/autofailure.xml for more information.",
4853 if bash_command_not_found:
4854 msg = ["QA Notice: command not found:"]
4856 msg.extend("\t" + line for line in bash_command_not_found)
4859 if helper_missing_file:
4860 msg = ["QA Notice: file does not exist:"]
4862 msg.extend("\t" + line[4:] for line in helper_missing_file)
4865 if configure_opts_warn:
4866 msg = ["QA Notice: Unrecognized configure options:"]
4868 msg.extend("\t" + line for line in configure_opts_warn)
4872 msg = ["QA Notice: make jobserver unavailable:"]
4874 msg.extend("\t" + line for line in make_jobserver)
4877 def _post_src_install_chost_fix(settings):
4879 It's possible that the ebuild has changed the
4880 CHOST variable, so revert it to the initial
4883 chost = settings.get('CHOST')
4885 write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
4886 'build-info', 'CHOST'), chost + '\n')
4888 def _post_src_install_uid_fix(mysettings):
4890 Files in $D with user and group bits that match the "portage"
4891 user or group are automatically mapped to PORTAGE_INST_UID and
4892 PORTAGE_INST_GID if necessary. The chown system call may clear
4893 S_ISUID and S_ISGID bits, so those bits are restored if
4896 inst_uid = int(mysettings["PORTAGE_INST_UID"])
4897 inst_gid = int(mysettings["PORTAGE_INST_GID"])
4900 # Temporarily remove all of the flags in order to avoid EPERM errors.
4901 os.system("mtree -c -p %s -k flags > %s" % \
4902 (_shell_quote(mysettings["D"]),
4903 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
4904 os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
4905 (_shell_quote(mysettings["D"]),))
4906 os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
4907 (_shell_quote(mysettings["D"]),))
4909 for parent, dirs, files in os.walk(mysettings["D"]):
4910 for fname in chain(dirs, files):
4911 fpath = os.path.join(parent, fname)
4912 mystat = os.lstat(fpath)
4913 if mystat.st_uid != portage_uid and \
4914 mystat.st_gid != portage_gid:
4918 if mystat.st_uid == portage_uid:
4920 if mystat.st_gid == portage_gid:
4922 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
4923 mode=mystat.st_mode, stat_cached=mystat,
4927 # Restore all of the flags saved above.
4928 os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
4929 (_shell_quote(mysettings["D"]),
4930 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
4932 def _post_pkg_preinst_cmd(mysettings):
4934 Post phase logic and tasks that have been factored out of
4935 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
4936 can be used to wipe out any gmon.out files created during
4937 previous functions (in case any tools were built with -pg
4941 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4942 misc_sh_binary = os.path.join(portage_bin_path,
4943 os.path.basename(MISC_SH_BINARY))
4945 mysettings["EBUILD_PHASE"] = ""
4946 global _post_phase_cmds
4947 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
4951 def _post_pkg_postinst_cmd(mysettings):
4953 Post phase logic and tasks that have been factored out of
4957 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4958 misc_sh_binary = os.path.join(portage_bin_path,
4959 os.path.basename(MISC_SH_BINARY))
4961 mysettings["EBUILD_PHASE"] = ""
4962 global _post_phase_cmds
4963 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
4967 def _spawn_misc_sh(mysettings, commands, **kwargs):
4969 @param mysettings: the ebuild config
4970 @type mysettings: config
4971 @param commands: a list of function names to call in misc-functions.sh
4972 @type commands: list
4974 @returns: the return value from the spawn() call
4977 # Note: PORTAGE_BIN_PATH may differ from the global
4978 # constant when portage is reinstalling itself.
4979 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
4980 misc_sh_binary = os.path.join(portage_bin_path,
4981 os.path.basename(MISC_SH_BINARY))
4982 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
4983 _doebuild_exit_status_unlink(
4984 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
4985 debug = mysettings.get("PORTAGE_DEBUG") == "1"
4986 logfile = mysettings.get("PORTAGE_LOG_FILE")
4987 mydo = mysettings["EBUILD_PHASE"]
4989 rval = spawn(mycommand, mysettings, debug=debug,
4990 logfile=logfile, **kwargs)
4993 msg = _doebuild_exit_status_check(mydo, mysettings)
4996 from textwrap import wrap
4997 from portage.elog.messages import eerror
4998 for l in wrap(msg, 72):
4999 eerror(l, phase=mydo, key=mysettings.mycpv)
5002 _testing_eapis = frozenset(["3_pre1"])
5003 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
5005 def _eapi_is_deprecated(eapi):
5006 return eapi in _deprecated_eapis
5008 def eapi_is_supported(eapi):
5009 eapi = str(eapi).strip()
5011 if _eapi_is_deprecated(eapi):
5014 if eapi in _testing_eapis:
5023 return eapi <= portage.const.EAPI
5025 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
5027 ebuild_path = os.path.abspath(myebuild)
5028 pkg_dir = os.path.dirname(ebuild_path)
5030 if "CATEGORY" in mysettings.configdict["pkg"]:
5031 cat = mysettings.configdict["pkg"]["CATEGORY"]
5033 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
5034 mypv = os.path.basename(ebuild_path)[:-7]
5035 mycpv = cat+"/"+mypv
5036 mysplit=pkgsplit(mypv,silent=0)
5038 raise portage.exception.IncorrectParameter(
5039 "Invalid ebuild path: '%s'" % myebuild)
5041 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
5042 # so that the caller can override it.
5043 tmpdir = mysettings["PORTAGE_TMPDIR"]
5045 if mydo != "depend" and mycpv != mysettings.mycpv:
5046 """For performance reasons, setcpv only triggers reset when it
5047 detects a package-specific change in config. For the ebuild
5048 environment, a reset call is forced in order to ensure that the
5049 latest env.d variables are used."""
5051 mysettings.reset(use_cache=use_cache)
5052 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
5054 # config.reset() might have reverted a change made by the caller,
5055 # so restore it to it's original value.
5056 mysettings["PORTAGE_TMPDIR"] = tmpdir
5058 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
5059 mysettings["EBUILD_PHASE"] = mydo
5061 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
5063 # We are disabling user-specific bashrc files.
5064 mysettings["BASH_ENV"] = INVALID_ENV_FILE
5066 if debug: # Otherwise it overrides emerge's settings.
5067 # We have no other way to set debug... debug can't be passed in
5068 # due to how it's coded... Don't overwrite this so we can use it.
5069 mysettings["PORTAGE_DEBUG"] = "1"
5071 mysettings["ROOT"] = myroot
5072 mysettings["STARTDIR"] = getcwd()
5073 mysettings["EBUILD"] = ebuild_path
5074 mysettings["O"] = pkg_dir
5075 mysettings.configdict["pkg"]["CATEGORY"] = cat
5076 mysettings["FILESDIR"] = pkg_dir+"/files"
5077 mysettings["PF"] = mypv
5079 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
5080 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
5081 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
5083 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
5084 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
5086 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
5087 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
5088 mysettings["PN"] = mysplit[0]
5089 mysettings["PV"] = mysplit[1]
5090 mysettings["PR"] = mysplit[2]
5092 if portage.util.noiselimit < 0:
5093 mysettings["PORTAGE_QUIET"] = "1"
5095 if mydo != "depend":
5096 # Metadata vars such as EAPI and RESTRICT are
5097 # set by the above config.setcpv() call.
5098 eapi = mysettings["EAPI"]
5099 if not eapi_is_supported(eapi):
5100 # can't do anything with this.
5101 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
5103 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
5104 portage.dep.use_reduce(portage.dep.paren_reduce(
5105 mysettings["RESTRICT"]),
5106 uselist=mysettings["PORTAGE_USE"].split())))
5107 except portage.exception.InvalidDependString:
5108 # RESTRICT is validated again inside doebuild, so let this go
5109 mysettings["PORTAGE_RESTRICT"] = ""
5111 if mysplit[2] == "r0":
5112 mysettings["PVR"]=mysplit[1]
5114 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
5116 if "PATH" in mysettings:
5117 mysplit=mysettings["PATH"].split(":")
5120 # Note: PORTAGE_BIN_PATH may differ from the global constant
5121 # when portage is reinstalling itself.
5122 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5123 if portage_bin_path not in mysplit:
5124 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
5126 # Sandbox needs cannonical paths.
5127 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
5128 mysettings["PORTAGE_TMPDIR"])
5129 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
5130 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
5132 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
5133 # locations in order to prevent interference.
5134 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
5135 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
5136 mysettings["PKG_TMPDIR"],
5137 mysettings["CATEGORY"], mysettings["PF"])
5139 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
5140 mysettings["BUILD_PREFIX"],
5141 mysettings["CATEGORY"], mysettings["PF"])
5143 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
5144 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
5145 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
5146 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
5148 mysettings["PORTAGE_BASHRC"] = os.path.join(
5149 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
5150 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
5151 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
5153 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
5154 if mydo != "depend" and "KV" not in mysettings:
5155 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
5157 # Regular source tree
5158 mysettings["KV"]=mykv
5161 mysettings.backup_changes("KV")
5163 # Allow color.map to control colors associated with einfo, ewarn, etc...
5165 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
5166 mycolors.append("%s=$'%s'" % (c, portage.output.codes[c]))
5167 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
5169 def prepare_build_dirs(myroot, mysettings, cleanup):
5171 clean_dirs = [mysettings["HOME"]]
5173 # We enable cleanup when we want to make sure old cruft (such as the old
5174 # environment) doesn't interfere with the current phase.
5176 clean_dirs.append(mysettings["T"])
5178 for clean_dir in clean_dirs:
5180 shutil.rmtree(clean_dir)
5182 if errno.ENOENT == oe.errno:
5184 elif errno.EPERM == oe.errno:
5185 writemsg("%s\n" % oe, noiselevel=-1)
5186 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
5187 clean_dir, noiselevel=-1)
5192 def makedirs(dir_path):
5194 os.makedirs(dir_path)
5196 if errno.EEXIST == oe.errno:
5198 elif errno.EPERM == oe.errno:
5199 writemsg("%s\n" % oe, noiselevel=-1)
5200 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
5201 dir_path, noiselevel=-1)
5207 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
5209 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
5210 mydirs.append(os.path.dirname(mydirs[-1]))
5213 for mydir in mydirs:
5214 portage.util.ensure_dirs(mydir)
5215 portage.util.apply_secpass_permissions(mydir,
5216 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
5217 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
5218 """These directories don't necessarily need to be group writable.
5219 However, the setup phase is commonly run as a privileged user prior
5220 to the other phases being run by an unprivileged user. Currently,
5221 we use the portage group to ensure that the unprivleged user still
5222 has write access to these directories in any case."""
5223 portage.util.ensure_dirs(mysettings[dir_key], mode=0775)
5224 portage.util.apply_secpass_permissions(mysettings[dir_key],
5225 uid=portage_uid, gid=portage_gid)
5226 except portage.exception.PermissionDenied, e:
5227 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
5229 except portage.exception.OperationNotPermitted, e:
5230 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
5232 except portage.exception.FileNotFound, e:
5233 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
5236 _prepare_workdir(mysettings)
5237 if mysettings.get('EBUILD_PHASE') != 'fetch':
5238 # Avoid spurious permissions adjustments when fetching with
5239 # a temporary PORTAGE_TMPDIR setting (for fetchonly).
5240 _prepare_features_dirs(mysettings)
5242 def _adjust_perms_msg(settings, msg):
5245 writemsg(msg, noiselevel=-1)
5247 background = settings.get("PORTAGE_BACKGROUND") == "1"
5248 log_path = settings.get("PORTAGE_LOG_FILE")
5251 if background and log_path is not None:
5253 log_file = open(log_path, 'a')
5265 if log_file is not None:
5268 def _prepare_features_dirs(mysettings):
5272 "basedir_var":"CCACHE_DIR",
5273 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
5274 "always_recurse":False},
5276 "basedir_var":"DISTCC_DIR",
5277 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
5278 "subdirs":("lock", "state"),
5279 "always_recurse":True}
5284 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
5285 from portage.data import secpass
5286 droppriv = secpass >= 2 and \
5287 "userpriv" in mysettings.features and \
5288 "userpriv" not in restrict
5289 for myfeature, kwargs in features_dirs.iteritems():
5290 if myfeature in mysettings.features:
5291 basedir = mysettings[kwargs["basedir_var"]]
5293 basedir = kwargs["default_dir"]
5294 mysettings[kwargs["basedir_var"]] = basedir
5296 mydirs = [mysettings[kwargs["basedir_var"]]]
5297 if "subdirs" in kwargs:
5298 for subdir in kwargs["subdirs"]:
5299 mydirs.append(os.path.join(basedir, subdir))
5300 for mydir in mydirs:
5301 modified = portage.util.ensure_dirs(mydir)
5302 # Generally, we only want to apply permissions for
5303 # initial creation. Otherwise, we don't know exactly what
5304 # permissions the user wants, so should leave them as-is.
5305 droppriv_fix = False
5308 if st.st_gid != portage_gid or \
5309 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
5311 if not droppriv_fix:
5312 # Check permissions of files in the directory.
5313 for filename in os.listdir(mydir):
5315 subdir_st = os.lstat(
5316 os.path.join(mydir, filename))
5319 if subdir_st.st_gid != portage_gid or \
5320 ((stat.S_ISDIR(subdir_st.st_mode) and \
5321 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
5326 _adjust_perms_msg(mysettings,
5327 colorize("WARN", " * ") + \
5328 "Adjusting permissions " + \
5329 "for FEATURES=userpriv: '%s'\n" % mydir)
5331 _adjust_perms_msg(mysettings,
5332 colorize("WARN", " * ") + \
5333 "Adjusting permissions " + \
5334 "for FEATURES=%s: '%s'\n" % (myfeature, mydir))
5336 if modified or kwargs["always_recurse"] or droppriv_fix:
5338 raise # The feature is disabled if a single error
5339 # occurs during permissions adjustment.
5340 if not apply_recursive_permissions(mydir,
5341 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5342 filemode=filemode, filemask=modemask, onerror=onerror):
5343 raise portage.exception.OperationNotPermitted(
5344 "Failed to apply recursive permissions for the portage group.")
5345 except portage.exception.PortageException, e:
5346 mysettings.features.remove(myfeature)
5347 mysettings["FEATURES"] = " ".join(mysettings.features)
5348 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5349 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
5350 (kwargs["basedir_var"], basedir), noiselevel=-1)
5351 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
5355 def _prepare_workdir(mysettings):
5358 mode = mysettings["PORTAGE_WORKDIR_MODE"]
5360 parsed_mode = int(mode, 8)
5365 if parsed_mode & 07777 != parsed_mode:
5366 raise ValueError("Invalid file mode: %s" % mode)
5368 workdir_mode = parsed_mode
5370 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
5371 except ValueError, e:
5373 writemsg("%s\n" % e)
5374 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
5375 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
5376 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
5378 apply_secpass_permissions(mysettings["WORKDIR"],
5379 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
5380 except portage.exception.FileNotFound:
5381 pass # ebuild.sh will create it
5383 if mysettings.get("PORT_LOGDIR", "") == "":
5384 while "PORT_LOGDIR" in mysettings:
5385 del mysettings["PORT_LOGDIR"]
5386 if "PORT_LOGDIR" in mysettings:
5388 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
5390 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
5391 uid=portage_uid, gid=portage_gid, mode=02770)
5392 except portage.exception.PortageException, e:
5393 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5394 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
5395 mysettings["PORT_LOGDIR"], noiselevel=-1)
5396 writemsg("!!! Disabling logging.\n", noiselevel=-1)
5397 while "PORT_LOGDIR" in mysettings:
5398 del mysettings["PORT_LOGDIR"]
5399 if "PORT_LOGDIR" in mysettings and \
5400 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
5401 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
5402 if not os.path.exists(logid_path):
5403 f = open(logid_path, "w")
5406 logid_time = time.strftime("%Y%m%d-%H%M%S",
5407 time.gmtime(os.stat(logid_path).st_mtime))
5408 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5409 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
5410 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
5411 del logid_path, logid_time
5413 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
5414 # enabled since it is possible that local SELinux security policies
5415 # do not allow ouput to be piped out of the sesandbox domain.
5416 if not (mysettings.selinux_enabled() and \
5417 "sesandbox" in mysettings.features):
5418 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
5419 mysettings["T"], "build.log")
5421 def _doebuild_exit_status_check(mydo, settings):
5423 Returns an error string if the shell appeared
5424 to exit unsuccessfully, None otherwise.
5426 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
5427 if not exit_status_file or \
5428 os.path.exists(exit_status_file):
5430 msg = ("The ebuild phase '%s' has exited " % mydo) + \
5431 "unexpectedly. This type of behavior " + \
5432 "is known to be triggered " + \
5433 "by things such as failed variable " + \
5434 "assignments (bug #190128) or bad substitution " + \
5435 "errors (bug #200313). Normally, before exiting, bash should " + \
5436 "have displayed an error message above. If bash did not " + \
5437 "produce an error message above, it's possible " + \
5438 "that the ebuild has called `exit` when it " + \
5439 "should have called `die` instead. This behavior may also " + \
5440 "be triggered by a corrupt bash binary or a hardware " + \
5441 "problem such as memory or cpu malfunction. If the problem is not " + \
5442 "reproducible or it appears to occur randomly, then it is likely " + \
5443 "to be triggered by a hardware problem. " + \
5444 "If you suspect a hardware problem then you should " + \
5445 "try some basic hardware diagnostics such as memtest. " + \
5446 "Please do not report this as a bug unless it is consistently " + \
5447 "reproducible and you are sure that your bash binary and hardware " + \
5448 "are functioning properly."
5451 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
5452 if retval != os.EX_OK:
5454 msg = _doebuild_exit_status_check(mydo, settings)
5457 from textwrap import wrap
5458 from portage.elog.messages import eerror
5459 for l in wrap(msg, 72):
5460 eerror(l, phase=mydo, key=settings.mycpv)
5463 def _doebuild_exit_status_unlink(exit_status_file):
5465 Double check to make sure it really doesn't exist
5466 and raise an OSError if it still does (it shouldn't).
5467 OSError if necessary.
5469 if not exit_status_file:
5472 os.unlink(exit_status_file)
5475 if os.path.exists(exit_status_file):
5476 os.unlink(exit_status_file)
5478 _doebuild_manifest_exempt_depend = 0
5479 _doebuild_manifest_cache = None
5480 _doebuild_broken_ebuilds = set()
5481 _doebuild_broken_manifests = set()
5483 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
5484 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
5485 mydbapi=None, vartree=None, prev_mtimes=None,
5486 fd_pipes=None, returnpid=False):
5489 Wrapper function that invokes specific ebuild phases through the spawning
5492 @param myebuild: name of the ebuild to invoke the phase on (CPV)
5493 @type myebuild: String
5494 @param mydo: Phase to run
5496 @param myroot: $ROOT (usually '/', see man make.conf)
5497 @type myroot: String
5498 @param mysettings: Portage Configuration
5499 @type mysettings: instance of portage.config
5500 @param debug: Turns on various debug information (eg, debug for spawn)
5501 @type debug: Boolean
5502 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
5503 @type listonly: Boolean
5504 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
5505 @type fetchonly: Boolean
5506 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
5507 @type cleanup: Boolean
5508 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
5509 @type dbkey: Dict or String
5510 @param use_cache: Enables the cache
5511 @type use_cache: Boolean
5512 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
5513 @type fetchall: Boolean
5514 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
5516 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
5517 @type mydbapi: portdbapi instance
5518 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
5519 @type vartree: vartree instance
5520 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
5521 @type prev_mtimes: dictionary
5527 Most errors have an accompanying error message.
5529 listonly and fetchonly are only really necessary for operations involving 'fetch'
5530 prev_mtimes are only necessary for merge operations.
5531 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
5536 writemsg("Warning: tree not specified to doebuild\n")
5540 # chunked out deps for each phase, so that ebuild binary can use it
5541 # to collapse targets down.
5544 "unpack": ["setup"],
5545 "prepare": ["unpack"],
5546 "configure": ["prepare"],
5547 "compile":["configure"],
5548 "test": ["compile"],
5551 "package":["install"],
5555 mydbapi = db[myroot][tree].dbapi
5557 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
5558 vartree = db[myroot]["vartree"]
5560 features = mysettings.features
5561 noauto = "noauto" in features
5562 from portage.data import secpass
5564 clean_phases = ("clean", "cleanrm")
5565 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
5566 "config", "info", "setup", "depend",
5567 "fetch", "fetchall", "digest",
5568 "unpack", "prepare", "configure", "compile", "test",
5569 "install", "rpm", "qmerge", "merge",
5570 "package","unmerge", "manifest"]
5572 if mydo not in validcommands:
5573 validcommands.sort()
5574 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
5576 for vcount in range(len(validcommands)):
5578 writemsg("\n!!! ", noiselevel=-1)
5579 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
5580 writemsg("\n", noiselevel=-1)
5583 if mydo == "fetchall":
5587 parallel_fetchonly = mydo in ("fetch", "fetchall") and \
5588 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
5590 if mydo not in clean_phases and not os.path.exists(myebuild):
5591 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
5595 global _doebuild_manifest_exempt_depend
5597 if "strict" in features and \
5598 "digest" not in features and \
5599 tree == "porttree" and \
5600 mydo not in ("digest", "manifest", "help") and \
5601 not _doebuild_manifest_exempt_depend:
5602 # Always verify the ebuild checksums before executing it.
5603 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
5604 _doebuild_broken_ebuilds
5606 if myebuild in _doebuild_broken_ebuilds:
5609 pkgdir = os.path.dirname(myebuild)
5610 manifest_path = os.path.join(pkgdir, "Manifest")
5612 # Avoid checking the same Manifest several times in a row during a
5613 # regen with an empty cache.
5614 if _doebuild_manifest_cache is None or \
5615 _doebuild_manifest_cache.getFullname() != manifest_path:
5616 _doebuild_manifest_cache = None
5617 if not os.path.exists(manifest_path):
5618 out = portage.output.EOutput()
5619 out.eerror("Manifest not found for '%s'" % (myebuild,))
5620 _doebuild_broken_ebuilds.add(myebuild)
5622 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5625 mf = _doebuild_manifest_cache
5628 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
5630 out = portage.output.EOutput()
5631 out.eerror("Missing digest for '%s'" % (myebuild,))
5632 _doebuild_broken_ebuilds.add(myebuild)
5634 except portage.exception.FileNotFound:
5635 out = portage.output.EOutput()
5636 out.eerror("A file listed in the Manifest " + \
5637 "could not be found: '%s'" % (myebuild,))
5638 _doebuild_broken_ebuilds.add(myebuild)
5640 except portage.exception.DigestException, e:
5641 out = portage.output.EOutput()
5642 out.eerror("Digest verification failed:")
5643 out.eerror("%s" % e.value[0])
5644 out.eerror("Reason: %s" % e.value[1])
5645 out.eerror("Got: %s" % e.value[2])
5646 out.eerror("Expected: %s" % e.value[3])
5647 _doebuild_broken_ebuilds.add(myebuild)
5650 if mf.getFullname() in _doebuild_broken_manifests:
5653 if mf is not _doebuild_manifest_cache:
5655 # Make sure that all of the ebuilds are
5656 # actually listed in the Manifest.
5657 for f in os.listdir(pkgdir):
5658 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
5659 f = os.path.join(pkgdir, f)
5660 if f not in _doebuild_broken_ebuilds:
5661 out = portage.output.EOutput()
5662 out.eerror("A file is not listed in the " + \
5663 "Manifest: '%s'" % (f,))
5664 _doebuild_broken_manifests.add(manifest_path)
5667 # Only cache it if the above stray files test succeeds.
5668 _doebuild_manifest_cache = mf
5670 def exit_status_check(retval):
5671 if retval != os.EX_OK:
5673 msg = _doebuild_exit_status_check(mydo, mysettings)
5676 from textwrap import wrap
5677 from portage.elog.messages import eerror
5678 for l in wrap(msg, 72):
5679 eerror(l, phase=mydo, key=mysettings.mycpv)
5682 # Note: PORTAGE_BIN_PATH may differ from the global
5683 # constant when portage is reinstalling itself.
5684 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5685 ebuild_sh_binary = os.path.join(portage_bin_path,
5686 os.path.basename(EBUILD_SH_BINARY))
5687 misc_sh_binary = os.path.join(portage_bin_path,
5688 os.path.basename(MISC_SH_BINARY))
5691 builddir_lock = None
5696 if mydo in ("digest", "manifest", "help"):
5697 # Temporarily exempt the depend phase from manifest checks, in case
5698 # aux_get calls trigger cache generation.
5699 _doebuild_manifest_exempt_depend += 1
5701 # If we don't need much space and we don't need a constant location,
5702 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
5703 # so that there's no need for locking and it can be used even if the
5704 # user isn't in the portage group.
5705 if mydo in ("info",):
5706 from tempfile import mkdtemp
5708 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
5709 mysettings["PORTAGE_TMPDIR"] = tmpdir
5711 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
5714 if mydo in clean_phases:
5715 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
5716 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
5717 logfile=None, returnpid=returnpid)
5720 # get possible slot information from the deps file
5721 if mydo == "depend":
5722 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
5723 droppriv = "userpriv" in mysettings.features
5725 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5726 mysettings, fd_pipes=fd_pipes, returnpid=True,
5729 elif isinstance(dbkey, dict):
5730 mysettings["dbkey"] = ""
5733 0:sys.stdin.fileno(),
5734 1:sys.stdout.fileno(),
5735 2:sys.stderr.fileno(),
5737 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
5739 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
5740 os.close(pw) # belongs exclusively to the child process now
5744 mybytes.append(os.read(pr, maxbytes))
5748 mybytes = "".join(mybytes)
5750 for k, v in izip(auxdbkeys, mybytes.splitlines()):
5752 retval = os.waitpid(mypids[0], 0)[1]
5753 portage.process.spawned_pids.remove(mypids[0])
5754 # If it got a signal, return the signal that was sent, but
5755 # shift in order to distinguish it from a return value. (just
5756 # like portage.process.spawn() would do).
5758 retval = (retval & 0xff) << 8
5760 # Otherwise, return its exit code.
5761 retval = retval >> 8
5762 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
5763 # Don't trust bash's returncode if the
5764 # number of lines is incorrect.
5768 mysettings["dbkey"] = dbkey
5770 mysettings["dbkey"] = \
5771 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
5773 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
5777 # Validate dependency metadata here to ensure that ebuilds with invalid
5778 # data are never installed via the ebuild command. Don't bother when
5779 # returnpid == True since there's no need to do this every time emerge
5782 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
5783 if rval != os.EX_OK:
5786 if "PORTAGE_TMPDIR" not in mysettings or \
5787 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
5788 writemsg("The directory specified in your " + \
5789 "PORTAGE_TMPDIR variable, '%s',\n" % \
5790 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
5791 writemsg("does not exist. Please create this directory or " + \
5792 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
5795 # as some people use a separate PORTAGE_TMPDIR mount
5796 # we prefer that as the checks below would otherwise be pointless
5798 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
5799 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
5801 checkdir = mysettings["PORTAGE_TMPDIR"]
5803 if not os.access(checkdir, os.W_OK):
5804 writemsg("%s is not writable.\n" % checkdir + \
5805 "Likely cause is that you've mounted it as readonly.\n" \
5809 from tempfile import NamedTemporaryFile
5810 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
5811 os.chmod(fd.name, 0755)
5812 if not os.access(fd.name, os.X_OK):
5813 writemsg("Can not execute files in %s\n" % checkdir + \
5814 "Likely cause is that you've mounted it with one of the\n" + \
5815 "following mount options: 'noexec', 'user', 'users'\n\n" + \
5816 "Please make sure that portage can execute files in this directory.\n" \
5823 if mydo == "unmerge":
5824 return unmerge(mysettings["CATEGORY"],
5825 mysettings["PF"], myroot, mysettings, vartree=vartree)
5827 # Build directory creation isn't required for any of these.
5828 have_build_dirs = False
5829 if not parallel_fetchonly and mydo not in ("digest", "help", "manifest"):
5830 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
5833 have_build_dirs = True
5835 # emerge handles logging externally
5837 # PORTAGE_LOG_FILE is set by the
5838 # above prepare_build_dirs() call.
5839 logfile = mysettings.get("PORTAGE_LOG_FILE")
5842 env_file = os.path.join(mysettings["T"], "environment")
5846 env_stat = os.stat(env_file)
5848 if e.errno != errno.ENOENT:
5852 saved_env = os.path.join(
5853 os.path.dirname(myebuild), "environment.bz2")
5854 if not os.path.isfile(saved_env):
5858 "bzip2 -dc %s > %s" % \
5859 (_shell_quote(saved_env),
5860 _shell_quote(env_file)))
5862 env_stat = os.stat(env_file)
5864 if e.errno != errno.ENOENT:
5867 if os.WIFEXITED(retval) and \
5868 os.WEXITSTATUS(retval) == os.EX_OK and \
5869 env_stat and env_stat.st_size > 0:
5870 # This is a signal to ebuild.sh, so that it knows to filter
5871 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
5872 # would be preserved between normal phases.
5873 open(env_file + ".raw", "w")
5875 writemsg(("!!! Error extracting saved " + \
5876 "environment: '%s'\n") % \
5877 saved_env, noiselevel=-1)
5881 if e.errno != errno.ENOENT:
5888 for var in ("ARCH", ):
5889 value = mysettings.get(var)
5890 if value and value.strip():
5892 msg = ("%s is not set... " % var) + \
5893 ("Are you missing the '%setc/make.profile' symlink? " % \
5894 mysettings["PORTAGE_CONFIGROOT"]) + \
5895 "Is the symlink correct? " + \
5896 "Is your portage tree complete?"
5897 from portage.elog.messages import eerror
5898 from textwrap import wrap
5899 for line in wrap(msg, 70):
5900 eerror(line, phase="setup", key=mysettings.mycpv)
5901 from portage.elog import elog_process
5902 elog_process(mysettings.mycpv, mysettings)
5904 del env_file, env_stat, saved_env
5905 _doebuild_exit_status_unlink(
5906 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5908 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
5910 # if any of these are being called, handle them -- running them out of
5911 # the sandbox -- and stop now.
5913 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
5914 mysettings, debug=debug, free=1, logfile=logfile)
5915 elif mydo == "setup":
5917 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
5918 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
5919 returnpid=returnpid)
5922 retval = exit_status_check(retval)
5924 """ Privileged phases may have left files that need to be made
5925 writable to a less privileged user."""
5926 apply_recursive_permissions(mysettings["T"],
5927 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
5928 filemode=060, filemask=0)
5930 elif mydo == "preinst":
5931 phase_retval = spawn(
5932 _shell_quote(ebuild_sh_binary) + " " + mydo,
5933 mysettings, debug=debug, free=1, logfile=logfile,
5934 fd_pipes=fd_pipes, returnpid=returnpid)
5939 phase_retval = exit_status_check(phase_retval)
5940 if phase_retval == os.EX_OK:
5941 _doebuild_exit_status_unlink(
5942 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5943 mysettings.pop("EBUILD_PHASE", None)
5944 phase_retval = spawn(
5945 " ".join(_post_pkg_preinst_cmd(mysettings)),
5946 mysettings, debug=debug, free=1, logfile=logfile)
5947 phase_retval = exit_status_check(phase_retval)
5948 if phase_retval != os.EX_OK:
5949 writemsg("!!! post preinst failed; exiting.\n",
5952 elif mydo == "postinst":
5953 phase_retval = spawn(
5954 _shell_quote(ebuild_sh_binary) + " " + mydo,
5955 mysettings, debug=debug, free=1, logfile=logfile,
5956 fd_pipes=fd_pipes, returnpid=returnpid)
5961 phase_retval = exit_status_check(phase_retval)
5962 if phase_retval == os.EX_OK:
5963 _doebuild_exit_status_unlink(
5964 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5965 mysettings.pop("EBUILD_PHASE", None)
5966 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
5967 mysettings, debug=debug, free=1, logfile=logfile)
5968 phase_retval = exit_status_check(phase_retval)
5969 if phase_retval != os.EX_OK:
5970 writemsg("!!! post postinst failed; exiting.\n",
5973 elif mydo in ("prerm", "postrm", "config", "info"):
5975 _shell_quote(ebuild_sh_binary) + " " + mydo,
5976 mysettings, debug=debug, free=1, logfile=logfile,
5977 fd_pipes=fd_pipes, returnpid=returnpid)
5982 retval = exit_status_check(retval)
5985 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
5987 emerge_skip_distfiles = returnpid
5988 emerge_skip_digest = returnpid
5989 # Only try and fetch the files if we are going to need them ...
5990 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
5991 # unpack compile install`, we will try and fetch 4 times :/
5992 need_distfiles = not emerge_skip_distfiles and \
5993 (mydo in ("fetch", "unpack") or \
5994 mydo not in ("digest", "manifest") and "noauto" not in features)
5995 alist = mysettings.configdict["pkg"].get("A")
5996 aalist = mysettings.configdict["pkg"].get("AA")
5997 if need_distfiles or alist is None or aalist is None:
5998 # Make sure we get the correct tree in case there are overlays.
5999 mytree = os.path.realpath(
6000 os.path.dirname(os.path.dirname(mysettings["O"])))
6001 useflags = mysettings["PORTAGE_USE"].split()
6003 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
6005 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
6006 except portage.exception.InvalidDependString, e:
6007 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6008 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
6012 mysettings.configdict["pkg"]["A"] = " ".join(alist)
6013 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
6015 alist = set(alist.split())
6016 aalist = set(aalist.split())
6017 if ("mirror" in features) or fetchall:
6025 # Files are already checked inside fetch(),
6026 # so do not check them again.
6030 if not emerge_skip_distfiles and \
6031 need_distfiles and not fetch(
6032 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
6035 if mydo == "fetch" and listonly:
6039 if mydo == "manifest":
6040 return not digestgen(aalist, mysettings, overwrite=1,
6041 manifestonly=1, myportdb=mydbapi)
6042 elif mydo == "digest":
6043 return not digestgen(aalist, mysettings, overwrite=1,
6045 elif mydo != 'fetch' and not emerge_skip_digest and \
6046 "digest" in mysettings.features:
6047 # Don't do this when called by emerge or when called just
6048 # for fetch (especially parallel-fetch) since it's not needed
6049 # and it can interfere with parallel tasks.
6050 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
6051 except portage.exception.PermissionDenied, e:
6052 writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
6053 if mydo in ("digest", "manifest"):
6056 # See above comment about fetching only when needed
6057 if not emerge_skip_distfiles and \
6058 not digestcheck(checkme, mysettings, "strict" in features):
6064 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
6065 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
6066 orig_distdir = mysettings["DISTDIR"]
6067 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
6068 edpath = mysettings["DISTDIR"] = \
6069 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
6070 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0755)
6072 # Remove any unexpected files or directories.
6073 for x in os.listdir(edpath):
6074 symlink_path = os.path.join(edpath, x)
6075 st = os.lstat(symlink_path)
6076 if x in alist and stat.S_ISLNK(st.st_mode):
6078 if stat.S_ISDIR(st.st_mode):
6079 shutil.rmtree(symlink_path)
6081 os.unlink(symlink_path)
6083 # Check for existing symlinks and recreate if necessary.
6085 symlink_path = os.path.join(edpath, x)
6086 target = os.path.join(orig_distdir, x)
6088 link_target = os.readlink(symlink_path)
6090 os.symlink(target, symlink_path)
6092 if link_target != target:
6093 os.unlink(symlink_path)
6094 os.symlink(target, symlink_path)
6096 #initial dep checks complete; time to process main commands
6098 restrict = mysettings["PORTAGE_RESTRICT"].split()
6099 nosandbox = (("userpriv" in features) and \
6100 ("usersandbox" not in features) and \
6101 "userpriv" not in restrict and \
6102 "nouserpriv" not in restrict)
6103 if nosandbox and ("userpriv" not in features or \
6104 "userpriv" in restrict or \
6105 "nouserpriv" in restrict):
6106 nosandbox = ("sandbox" not in features and \
6107 "usersandbox" not in features)
6109 sesandbox = mysettings.selinux_enabled() and \
6110 "sesandbox" in mysettings.features
6112 droppriv = "userpriv" in mysettings.features and \
6113 "userpriv" not in restrict and \
6116 fakeroot = "fakeroot" in mysettings.features
6118 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
6119 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
6121 # args are for the to spawn function
6123 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
6124 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
6125 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
6126 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
6127 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
6128 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
6129 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
6130 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
6131 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
6134 # merge the deps in so we have again a 'full' actionmap
6135 # be glad when this can die.
6137 if len(actionmap_deps.get(x, [])):
6138 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
6140 if mydo in actionmap:
6141 if mydo == "package":
6142 # Make sure the package directory exists before executing
6143 # this phase. This can raise PermissionDenied if
6144 # the current user doesn't have write access to $PKGDIR.
6145 parent_dir = os.path.join(mysettings["PKGDIR"],
6146 mysettings["CATEGORY"])
6147 portage.util.ensure_dirs(parent_dir)
6148 if not os.access(parent_dir, os.W_OK):
6149 raise portage.exception.PermissionDenied(
6150 "access('%s', os.W_OK)" % parent_dir)
6151 retval = spawnebuild(mydo,
6152 actionmap, mysettings, debug, logfile=logfile,
6153 fd_pipes=fd_pipes, returnpid=returnpid)
6154 elif mydo=="qmerge":
6155 # check to ensure install was run. this *only* pops up when users
6156 # forget it and are using ebuild
6157 if not os.path.exists(
6158 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
6159 writemsg("!!! mydo=qmerge, but the install phase has not been run\n",
6162 # qmerge is a special phase that implies noclean.
6163 if "noclean" not in mysettings.features:
6164 mysettings.features.add("noclean")
6165 #qmerge is specifically not supposed to do a runtime dep check
6167 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
6168 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
6169 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
6170 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
6172 retval = spawnebuild("install", actionmap, mysettings, debug,
6173 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
6174 returnpid=returnpid)
6175 retval = exit_status_check(retval)
6176 if retval != os.EX_OK:
6177 # The merge phase handles this already. Callers don't know how
6178 # far this function got, so we have to call elog_process() here
6179 # so that it's only called once.
6180 from portage.elog import elog_process
6181 elog_process(mysettings.mycpv, mysettings)
6182 if retval == os.EX_OK:
6183 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
6184 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
6185 "build-info"), myroot, mysettings,
6186 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
6187 vartree=vartree, prev_mtimes=prev_mtimes)
6189 print "!!! Unknown mydo:",mydo
6197 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
6198 shutil.rmtree(tmpdir)
6200 portage.locks.unlockdir(builddir_lock)
6202 # Make sure that DISTDIR is restored to it's normal value before we return!
6203 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
6204 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
6205 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
6209 if os.stat(logfile).st_size == 0:
6214 if mydo in ("digest", "manifest", "help"):
6215 # If necessary, depend phase has been triggered by aux_get calls
6216 # and the exemption is no longer needed.
6217 _doebuild_manifest_exempt_depend -= 1
6219 def _validate_deps(mysettings, myroot, mydo, mydbapi):
6221 invalid_dep_exempt_phases = \
6222 set(["clean", "cleanrm", "help", "prerm", "postrm"])
6223 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
6224 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
6225 other_keys = ["SLOT"]
6226 all_keys = dep_keys + misc_keys + other_keys
6227 metadata = dict(izip(all_keys,
6228 mydbapi.aux_get(mysettings.mycpv, all_keys)))
6230 class FakeTree(object):
6231 def __init__(self, mydb):
6233 dep_check_trees = {myroot:{}}
6234 dep_check_trees[myroot]["porttree"] = \
6235 FakeTree(fakedbapi(settings=mysettings))
6238 for dep_type in dep_keys:
6239 mycheck = dep_check(metadata[dep_type], None, mysettings,
6240 myuse="all", myroot=myroot, trees=dep_check_trees)
6242 msgs.append(" %s: %s\n %s\n" % (
6243 dep_type, metadata[dep_type], mycheck[1]))
6247 portage.dep.use_reduce(
6248 portage.dep.paren_reduce(metadata[k]), matchall=True)
6249 except portage.exception.InvalidDependString, e:
6250 msgs.append(" %s: %s\n %s\n" % (
6251 k, metadata[k], str(e)))
6253 if not metadata["SLOT"]:
6254 msgs.append(" SLOT is undefined\n")
6257 portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \
6258 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
6260 portage.util.writemsg_level(x,
6261 level=logging.ERROR, noiselevel=-1)
6262 if mydo not in invalid_dep_exempt_phases:
6269 def _movefile(src, dest, **kwargs):
6270 """Calls movefile and raises a PortageException if an error occurs."""
6271 if movefile(src, dest, **kwargs) is None:
6272 raise portage.exception.PortageException(
6273 "mv '%s' '%s'" % (src, dest))
6275 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
6276 hardlink_candidates=None):
6277 """moves a file from src to dest, preserving all permissions and attributes; mtime will
6278 be preserved even when moving across filesystems. Returns true on success and false on
6279 failure. Move is atomic."""
6280 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
6282 if mysettings is None:
6284 mysettings = settings
6285 selinux_enabled = mysettings.selinux_enabled()
6290 except SystemExit, e:
6292 except Exception, e:
6293 print "!!! Stating source file failed... movefile()"
6299 dstat=os.lstat(dest)
6300 except (OSError, IOError):
6301 dstat=os.lstat(os.path.dirname(dest))
6305 if destexists and dstat.st_flags != 0:
6306 bsd_chflags.lchflags(dest, 0)
6307 # Use normal stat/chflags for the parent since we want to
6308 # follow any symlinks to the real parent directory.
6309 pflags = os.stat(os.path.dirname(dest)).st_flags
6311 bsd_chflags.chflags(os.path.dirname(dest), 0)
6314 if stat.S_ISLNK(dstat[stat.ST_MODE]):
6318 except SystemExit, e:
6320 except Exception, e:
6323 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6325 target=os.readlink(src)
6326 if mysettings and mysettings["D"]:
6327 if target.find(mysettings["D"])==0:
6328 target=target[len(mysettings["D"]):]
6329 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
6332 sid = selinux.get_lsid(src)
6333 selinux.secure_symlink(target,dest,sid)
6335 os.symlink(target,dest)
6336 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6337 # utime() only works on the target of a symlink, so it's not
6338 # possible to perserve mtime on symlinks.
6339 return os.lstat(dest)[stat.ST_MTIME]
6340 except SystemExit, e:
6342 except Exception, e:
6343 print "!!! failed to properly create symlink:"
6344 print "!!!",dest,"->",target
6349 # Since identical files might be merged to multiple filesystems,
6350 # so os.link() calls might fail for some paths, so try them all.
6351 # For atomic replacement, first create the link as a temp file
6352 # and them use os.rename() to replace the destination.
6353 if hardlink_candidates:
6354 head, tail = os.path.split(dest)
6355 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
6356 (tail, os.getpid()))
6358 os.unlink(hardlink_tmp)
6360 if e.errno != errno.ENOENT:
6361 writemsg("!!! Failed to remove hardlink temp file: %s\n" % \
6362 (hardlink_tmp,), noiselevel=-1)
6363 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6366 for hardlink_src in hardlink_candidates:
6368 os.link(hardlink_src, hardlink_tmp)
6373 os.rename(hardlink_tmp, dest)
6375 writemsg("!!! Failed to rename %s to %s\n" % \
6376 (hardlink_tmp, dest), noiselevel=-1)
6377 writemsg("!!! %s\n" % (e,), noiselevel=-1)
6384 renamefailed = False
6385 if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
6388 ret=selinux.secure_rename(src,dest)
6390 ret=os.rename(src,dest)
6392 except SystemExit, e:
6394 except Exception, e:
6395 if e[0]!=errno.EXDEV:
6396 # Some random error.
6397 print "!!! Failed to move",src,"to",dest
6400 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
6403 if stat.S_ISREG(sstat[stat.ST_MODE]):
6404 try: # For safety copy then move it over.
6406 selinux.secure_copy(src,dest+"#new")
6407 selinux.secure_rename(dest+"#new",dest)
6409 shutil.copyfile(src,dest+"#new")
6410 os.rename(dest+"#new",dest)
6412 except SystemExit, e:
6414 except Exception, e:
6415 print '!!! copy',src,'->',dest,'failed.'
6419 #we don't yet handle special, so we need to fall back to /bin/mv
6421 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
6423 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
6425 print "!!! Failed to move special file:"
6426 print "!!! '"+src+"' to '"+dest+"'"
6428 return None # failure
6431 if stat.S_ISLNK(sstat[stat.ST_MODE]):
6432 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6434 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
6435 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
6437 except SystemExit, e:
6439 except Exception, e:
6440 print "!!! Failed to chown/chmod/unlink in movefile()"
6447 newmtime = long(os.stat(dest).st_mtime)
6449 if newmtime is not None:
6450 os.utime(dest, (newmtime, newmtime))
6452 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
6453 newmtime = long(sstat.st_mtime)
6455 # The utime can fail here with EPERM even though the move succeeded.
6456 # Instead of failing, use stat to return the mtime if possible.
6458 newmtime = long(os.stat(dest).st_mtime)
6460 writemsg("!!! Failed to stat in movefile()\n", noiselevel=-1)
6461 writemsg("!!! %s\n" % dest, noiselevel=-1)
6462 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6466 # Restore the flags we saved before moving
6468 bsd_chflags.chflags(os.path.dirname(dest), pflags)
6472 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
6473 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
6475 if not os.access(myroot, os.W_OK):
6476 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
6479 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
6480 vartree=vartree, blockers=blockers, scheduler=scheduler)
6481 return mylink.merge(pkgloc, infloc, myroot, myebuild,
6482 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
6484 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
6485 ldpath_mtimes=None, scheduler=None):
6486 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
6487 vartree=vartree, scheduler=scheduler)
6488 vartree = mylink.vartree
6492 vartree.dbapi.plib_registry.load()
6493 vartree.dbapi.plib_registry.pruneNonExisting()
6494 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
6495 ldpath_mtimes=ldpath_mtimes)
6496 if retval == os.EX_OK:
6501 vartree.dbapi.linkmap._clear_cache()
6504 def getCPFromCPV(mycpv):
6505 """Calls pkgsplit on a cpv and returns only the cp."""
6506 return pkgsplit(mycpv)[0]
6508 def dep_virtual(mysplit, mysettings):
6509 "Does virtual dependency conversion"
6511 myvirtuals = mysettings.getvirtuals()
6513 if isinstance(x, list):
6514 newsplit.append(dep_virtual(x, mysettings))
6517 mychoices = myvirtuals.get(mykey, None)
6519 if len(mychoices) == 1:
6520 a = x.replace(mykey, mychoices[0])
6523 # blocker needs "and" not "or(||)".
6528 a.append(x.replace(mykey, y))
6534 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
6535 trees=None, use_mask=None, use_force=None, **kwargs):
6536 """Recursively expand new-style virtuals so as to collapse one or more
6537 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
6538 zero cost regardless of whether or not they are currently installed. Virtual
6539 blockers are supported but only when the virtual expands to a single
6540 atom because it wouldn't necessarily make sense to block all the components
6541 of a compound virtual. When more than one new-style virtual is matched,
6542 the matches are sorted from highest to lowest versions and the atom is
6543 expanded to || ( highest match ... lowest match )."""
6545 # According to GLEP 37, RDEPEND is the only dependency type that is valid
6546 # for new-style virtuals. Repoman should enforce this.
6547 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
6548 portdb = trees[myroot]["porttree"].dbapi
6549 repoman = not mysettings.local_config
6550 if kwargs["use_binaries"]:
6551 portdb = trees[myroot]["bintree"].dbapi
6552 myvirtuals = mysettings.getvirtuals()
6553 myuse = kwargs["myuse"]
6558 elif isinstance(x, list):
6559 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
6560 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
6561 use_force=use_force, **kwargs))
6564 if not isinstance(x, portage.dep.Atom):
6566 x = portage.dep.Atom(x)
6567 except portage.exception.InvalidAtom:
6568 if portage.dep._dep_check_strict:
6569 raise portage.exception.ParseError(
6570 "invalid atom: '%s'" % x)
6572 if repoman and x.use and x.use.conditional:
6573 evaluated_atom = portage.dep.remove_slot(x)
6575 evaluated_atom += ":%s" % x.slot
6576 evaluated_atom += str(x.use._eval_qa_conditionals(
6577 use_mask, use_force))
6578 x = portage.dep.Atom(evaluated_atom)
6580 if not repoman and \
6581 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
6582 if x.use.conditional:
6583 evaluated_atom = portage.dep.remove_slot(x)
6585 evaluated_atom += ":%s" % x.slot
6586 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
6587 x = portage.dep.Atom(evaluated_atom)
6589 mykey = dep_getkey(x)
6590 if not mykey.startswith("virtual/"):
6593 mychoices = myvirtuals.get(mykey, [])
6594 isblocker = x.startswith("!")
6596 # Virtual blockers are no longer expanded here since
6597 # the un-expanded virtual atom is more useful for
6598 # maintaining a cache of blocker atoms.
6605 matches = portdb.match(match_atom)
6606 # Use descending order to prefer higher versions.
6609 # only use new-style matches
6610 if cpv.startswith("virtual/"):
6611 pkgs.append((cpv, catpkgsplit(cpv)[1:], portdb))
6612 if not (pkgs or mychoices):
6613 # This one couldn't be expanded as a new-style virtual. Old-style
6614 # virtuals have already been expanded by dep_virtual, so this one
6615 # is unavailable and dep_zapdeps will identify it as such. The
6616 # atom is not eliminated here since it may still represent a
6617 # dependency that needs to be satisfied.
6620 if not pkgs and len(mychoices) == 1:
6621 newsplit.append(portage.dep.Atom(x.replace(mykey, mychoices[0])))
6628 cpv, pv_split, db = y
6629 depstring = " ".join(db.aux_get(cpv, dep_keys))
6630 pkg_kwargs = kwargs.copy()
6635 use_split = db.aux_get(cpv, ["USE"])[0].split()
6636 pkg_kwargs["myuse"] = use_split
6638 print "Virtual Parent: ", y[0]
6639 print "Virtual Depstring:", depstring
6640 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
6641 trees=trees, **pkg_kwargs)
6643 raise portage.exception.ParseError(
6644 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
6646 virtual_atoms = [atom for atom in mycheck[1] \
6647 if not atom.startswith("!")]
6648 if len(virtual_atoms) == 1:
6649 # It wouldn't make sense to block all the components of a
6650 # compound virtual, so only a single atom block is allowed.
6651 a.append(portage.dep.Atom("!" + virtual_atoms[0]))
6653 # pull in the new-style virtual
6654 mycheck[1].append(portage.dep.Atom("="+y[0]))
6655 a.append(mycheck[1])
6656 # Plain old-style virtuals. New-style virtuals are preferred.
6658 a.append(portage.dep.Atom(x.replace(mykey, y, 1)))
6659 if isblocker and not a:
6660 # Probably a compound virtual. Pass the atom through unprocessed.
6666 def dep_eval(deplist):
6669 if deplist[0]=="||":
6670 #or list; we just need one "1"
6671 for x in deplist[1:]:
6672 if isinstance(x, list):
6677 #XXX: unless there's no available atoms in the list
6678 #in which case we need to assume that everything is
6679 #okay as some ebuilds are relying on an old bug.
6680 if len(deplist) == 1:
6685 if isinstance(x, list):
6692 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
6693 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
6694 Returned deplist contains steps that must be taken to satisfy dependencies."""
6698 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
6699 if not reduced or unreduced == ["||"] or dep_eval(reduced):
6702 if unreduced[0] != "||":
6704 for dep, satisfied in izip(unreduced, reduced):
6705 if isinstance(dep, list):
6706 unresolved += dep_zapdeps(dep, satisfied, myroot,
6707 use_binaries=use_binaries, trees=trees)
6709 unresolved.append(dep)
6712 # We're at a ( || atom ... ) type level and need to make a choice
6713 deps = unreduced[1:]
6714 satisfieds = reduced[1:]
6716 # Our preference order is for an the first item that:
6717 # a) contains all unmasked packages with the same key as installed packages
6718 # b) contains all unmasked packages
6719 # c) contains masked installed packages
6720 # d) is the first item
6723 preferred_not_installed = []
6724 preferred_any_slot = []
6725 possible_upgrades = []
6728 # Alias the trees we'll be checking availability against
6729 parent = trees[myroot].get("parent")
6730 graph_db = trees[myroot].get("graph_db")
6732 if "vartree" in trees[myroot]:
6733 vardb = trees[myroot]["vartree"].dbapi
6735 mydbapi = trees[myroot]["bintree"].dbapi
6737 mydbapi = trees[myroot]["porttree"].dbapi
6739 # Sort the deps into preferred (installed) and other
6740 # with values of [[required_atom], availablility]
6741 for dep, satisfied in izip(deps, satisfieds):
6742 if isinstance(dep, list):
6743 atoms = dep_zapdeps(dep, satisfied, myroot,
6744 use_binaries=use_binaries, trees=trees)
6750 other.append((atoms, None, False))
6753 all_available = True
6758 avail_pkg = mydbapi.match(atom)
6760 avail_pkg = avail_pkg[-1] # highest (ascending order)
6761 avail_slot = "%s:%s" % (dep_getkey(atom),
6762 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
6764 all_available = False
6767 versions[avail_slot] = avail_pkg
6769 this_choice = (atoms, versions, all_available)
6771 # The "all installed" criterion is not version or slot specific.
6772 # If any version of a package is installed then we assume that it
6773 # is preferred over other possible packages choices.
6774 all_installed = True
6775 for atom in set([dep_getkey(atom) for atom in atoms \
6776 if atom[:1] != "!"]):
6777 # New-style virtuals have zero cost to install.
6778 if not vardb.match(atom) and not atom.startswith("virtual/"):
6779 all_installed = False
6781 all_installed_slots = False
6783 all_installed_slots = True
6784 for slot_atom in versions:
6785 # New-style virtuals have zero cost to install.
6786 if not vardb.match(slot_atom) and \
6787 not slot_atom.startswith("virtual/"):
6788 all_installed_slots = False
6791 if all_installed_slots:
6792 preferred.append(this_choice)
6794 preferred_any_slot.append(this_choice)
6795 elif graph_db is None:
6796 possible_upgrades.append(this_choice)
6799 for slot_atom in versions:
6800 # New-style virtuals have zero cost to install.
6801 if not graph_db.match(slot_atom) and \
6802 not slot_atom.startswith("virtual/"):
6803 all_in_graph = False
6807 preferred_not_installed.append(this_choice)
6809 # Check if the atom would result in a direct circular
6810 # dependency and try to avoid that if it seems likely
6811 # to be unresolvable.
6812 cpv_slot_list = [parent]
6813 circular_atom = None
6817 if vardb.match(atom):
6818 # If the atom is satisfied by an installed
6819 # version then it's not a circular dep.
6821 if dep_getkey(atom) != parent.cp:
6823 if match_from_list(atom, cpv_slot_list):
6824 circular_atom = atom
6826 if circular_atom is None:
6827 preferred_not_installed.append(this_choice)
6829 other.append(this_choice)
6831 possible_upgrades.append(this_choice)
6833 other.append(this_choice)
6835 # Compare the "all_installed" choices against the "all_available" choices
6836 # for possible missed upgrades. The main purpose of this code is to find
6837 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
6838 # into || ( highest version ... lowest version ). We want to prefer the
6839 # highest all_available version of the new-style virtual when there is a
6840 # lower all_installed version.
6841 preferred.extend(preferred_not_installed)
6842 preferred.extend(preferred_any_slot)
6843 preferred.extend(possible_upgrades)
6844 possible_upgrades = preferred[1:]
6845 for possible_upgrade in possible_upgrades:
6846 atoms, versions, all_available = possible_upgrade
6847 myslots = set(versions)
6848 for other_choice in preferred:
6849 if possible_upgrade is other_choice:
6850 # possible_upgrade will not be promoted, so move on
6852 o_atoms, o_versions, o_all_available = other_choice
6853 intersecting_slots = myslots.intersection(o_versions)
6854 if not intersecting_slots:
6857 has_downgrade = False
6858 for myslot in intersecting_slots:
6859 myversion = versions[myslot]
6860 o_version = o_versions[myslot]
6861 difference = pkgcmp(catpkgsplit(myversion)[1:],
6862 catpkgsplit(o_version)[1:])
6867 has_downgrade = True
6869 if has_upgrade and not has_downgrade:
6870 preferred.remove(possible_upgrade)
6871 o_index = preferred.index(other_choice)
6872 preferred.insert(o_index, possible_upgrade)
6875 # preferred now contains a) and c) from the order above with
6876 # the masked flag differentiating the two. other contains b)
6877 # and d) so adding other to preferred will give us a suitable
6878 # list to iterate over.
6879 preferred.extend(other)
6881 for allow_masked in (False, True):
6882 for atoms, versions, all_available in preferred:
6883 if all_available or allow_masked:
6886 assert(False) # This point should not be reachable
6889 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
6895 mydep = dep_getcpv(orig_dep)
6896 myindex = orig_dep.index(mydep)
6897 prefix = orig_dep[:myindex]
6898 postfix = orig_dep[myindex+len(mydep):]
6899 expanded = cpv_expand(mydep, mydb=mydb,
6900 use_cache=use_cache, settings=settings)
6902 return portage.dep.Atom(prefix + expanded + postfix)
6903 except portage.exception.InvalidAtom:
6904 # Missing '=' prefix is allowed for backward compatibility.
6905 if not isvalidatom("=" + prefix + expanded + postfix):
6907 return portage.dep.Atom("=" + prefix + expanded + postfix)
6909 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
6910 use_cache=1, use_binaries=0, myroot="/", trees=None):
6911 """Takes a depend string and parses the condition."""
6912 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
6913 #check_config_instance(mysettings)
6915 trees = globals()["db"]
6919 myusesplit = mysettings["PORTAGE_USE"].split()
6922 # We've been given useflags to use.
6923 #print "USE FLAGS PASSED IN."
6925 #if "bindist" in myusesplit:
6926 # print "BINDIST is set!"
6928 # print "BINDIST NOT set."
6930 #we are being run by autouse(), don't consult USE vars yet.
6931 # WE ALSO CANNOT USE SETTINGS
6934 #convert parenthesis to sublists
6936 mysplit = portage.dep.paren_reduce(depstring)
6937 except portage.exception.InvalidDependString, e:
6942 useforce.add(mysettings["ARCH"])
6944 # This masking/forcing is only for repoman. In other cases, relevant
6945 # masking/forcing should have already been applied via
6946 # config.regenerate(). Also, binary or installed packages may have
6947 # been built with flags that are now masked, and it would be
6948 # inconsistent to mask them now. Additionally, myuse may consist of
6949 # flags from a parent package that is being merged to a $ROOT that is
6950 # different from the one that mysettings represents.
6951 mymasks.update(mysettings.usemask)
6952 mymasks.update(mysettings.archlist())
6953 mymasks.discard(mysettings["ARCH"])
6954 useforce.update(mysettings.useforce)
6955 useforce.difference_update(mymasks)
6957 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
6958 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
6959 except portage.exception.InvalidDependString, e:
6962 # Do the || conversions
6963 mysplit=portage.dep.dep_opconvert(mysplit)
6966 #dependencies were reduced to nothing
6969 # Recursively expand new-style virtuals so as to
6970 # collapse one or more levels of indirection.
6972 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
6973 use=use, mode=mode, myuse=myuse,
6974 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
6975 use_binaries=use_binaries, myroot=myroot, trees=trees)
6976 except portage.exception.ParseError, e:
6980 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
6981 if mysplit2 is None:
6982 return [0,"Invalid token"]
6984 writemsg("\n\n\n", 1)
6985 writemsg("mysplit: %s\n" % (mysplit), 1)
6986 writemsg("mysplit2: %s\n" % (mysplit2), 1)
6989 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
6990 use_binaries=use_binaries, trees=trees)
6991 except portage.exception.InvalidAtom, e:
6992 if portage.dep._dep_check_strict:
6993 raise # This shouldn't happen.
6994 # dbapi.match() failed due to an invalid atom in
6995 # the dependencies of an installed package.
6996 return [0, "Invalid atom: '%s'" % (e,)]
6998 mylist = flatten(myzaps)
6999 writemsg("myzaps: %s\n" % (myzaps), 1)
7000 writemsg("mylist: %s\n" % (mylist), 1)
7005 writemsg("mydict: %s\n" % (mydict), 1)
7006 return [1,mydict.keys()]
7008 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
7009 "Reduces the deplist to ones and zeros"
7010 deplist=mydeplist[:]
7011 for mypos, token in enumerate(deplist):
7012 if isinstance(deplist[mypos], list):
7014 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
7015 elif deplist[mypos]=="||":
7017 elif token[:1] == "!":
7018 deplist[mypos] = False
7020 mykey = dep_getkey(deplist[mypos])
7021 if mysettings and mykey in mysettings.pprovideddict and \
7022 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
7024 elif mydbapi is None:
7025 # Assume nothing is satisfied. This forces dep_zapdeps to
7026 # return all of deps the deps that have been selected
7027 # (excluding those satisfied by package.provided).
7028 deplist[mypos] = False
7031 x = mydbapi.xmatch(mode, deplist[mypos])
7032 if mode.startswith("minimum-"):
7039 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
7042 if deplist[mypos][0]=="!":
7046 #encountered invalid string
7050 def cpv_getkey(mycpv):
7051 myslash=mycpv.split("/")
7052 mysplit=pkgsplit(myslash[-1])
7055 return myslash[0]+"/"+mysplit[0]
7061 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
7062 mysplit=mykey.split("/")
7063 if settings is None:
7064 settings = globals()["settings"]
7065 virts = settings.getvirtuals("/")
7066 virts_p = settings.get_virts_p("/")
7068 if hasattr(mydb, "cp_list"):
7069 for x in mydb.categories:
7070 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
7072 if mykey in virts_p:
7073 return(virts_p[mykey][0])
7074 return "null/"+mykey
7076 if hasattr(mydb, "cp_list"):
7077 if not mydb.cp_list(mykey, use_cache=use_cache) and \
7078 virts and mykey in virts:
7079 return virts[mykey][0]
7082 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
7083 """Given a string (packagename or virtual) expand it into a valid
7084 cat/package string. Virtuals use the mydb to determine which provided
7085 virtual is a valid choice and defaults to the first element when there
7086 are no installed/available candidates."""
7087 myslash=mycpv.split("/")
7088 mysplit=pkgsplit(myslash[-1])
7089 if settings is None:
7090 settings = globals()["settings"]
7091 virts = settings.getvirtuals("/")
7092 virts_p = settings.get_virts_p("/")
7094 # this is illegal case.
7097 elif len(myslash)==2:
7099 mykey=myslash[0]+"/"+mysplit[0]
7102 if mydb and virts and mykey in virts:
7103 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
7104 if hasattr(mydb, "cp_list"):
7105 if not mydb.cp_list(mykey, use_cache=use_cache):
7106 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
7107 mykey_orig = mykey[:]
7108 for vkey in virts[mykey]:
7109 # The virtuals file can contain a versioned atom, so
7110 # it may be necessary to remove the operator and
7111 # version from the atom before it is passed into
7113 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
7115 writemsg("virts chosen: %s\n" % (mykey), 1)
7117 if mykey == mykey_orig:
7118 mykey=virts[mykey][0]
7119 writemsg("virts defaulted: %s\n" % (mykey), 1)
7120 #we only perform virtual expansion if we are passed a dbapi
7122 #specific cpv, no category, ie. "foo-1.0"
7130 if mydb and hasattr(mydb, "categories"):
7131 for x in mydb.categories:
7132 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
7133 matches.append(x+"/"+myp)
7134 if len(matches) > 1:
7135 virtual_name_collision = False
7136 if len(matches) == 2:
7138 if not x.startswith("virtual/"):
7139 # Assume that the non-virtual is desired. This helps
7140 # avoid the ValueError for invalid deps that come from
7141 # installed packages (during reverse blocker detection,
7145 virtual_name_collision = True
7146 if not virtual_name_collision:
7147 # AmbiguousPackageName inherits from ValueError,
7148 # for backward compatibility with calling code
7149 # that already handles ValueError.
7150 raise portage.exception.AmbiguousPackageName(matches)
7154 if not mykey and not isinstance(mydb, list):
7156 mykey=virts_p[myp][0]
7157 #again, we only perform virtual expansion if we have a dbapi (not a list)
7161 if mysplit[2]=="r0":
7162 return mykey+"-"+mysplit[1]
7164 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
7168 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
7169 from portage.util import grablines
7170 if settings is None:
7171 settings = globals()["settings"]
7173 portdb = globals()["portdb"]
7174 mysplit = catpkgsplit(mycpv)
7176 raise ValueError("invalid CPV: %s" % mycpv)
7177 if metadata is None:
7178 db_keys = list(portdb._aux_cache_keys)
7180 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
7182 if not portdb.cpv_exists(mycpv):
7184 if metadata is None:
7185 # Can't access SLOT due to corruption.
7186 cpv_slot_list = [mycpv]
7188 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
7189 mycp=mysplit[0]+"/"+mysplit[1]
7191 # XXX- This is a temporary duplicate of code from the config constructor.
7192 locations = [os.path.join(settings["PORTDIR"], "profiles")]
7193 locations.extend(settings.profiles)
7194 for ov in settings["PORTDIR_OVERLAY"].split():
7195 profdir = os.path.join(normalize_path(ov), "profiles")
7196 if os.path.isdir(profdir):
7197 locations.append(profdir)
7198 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
7199 USER_CONFIG_PATH.lstrip(os.path.sep)))
7201 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
7203 if mycp in settings.pmaskdict:
7204 for x in settings.pmaskdict[mycp]:
7205 if match_from_list(x, cpv_slot_list):
7209 for pmask in pmasklists:
7210 pmask_filename = os.path.join(pmask[0], "package.mask")
7211 for i in xrange(len(pmask[1])):
7212 l = pmask[1][i].strip()
7218 comment_valid = i + 1
7220 if comment_valid != i:
7223 return (comment, pmask_filename)
7226 elif comment_valid != -1:
7227 # Apparently this comment applies to muliple masks, so
7228 # it remains valid until a blank line is encountered.
7235 def getmaskingstatus(mycpv, settings=None, portdb=None):
7236 if settings is None:
7237 settings = config(clone=globals()["settings"])
7239 portdb = globals()["portdb"]
7243 if not isinstance(mycpv, basestring):
7244 # emerge passed in a Package instance
7247 metadata = pkg.metadata
7248 installed = pkg.installed
7250 mysplit = catpkgsplit(mycpv)
7252 raise ValueError("invalid CPV: %s" % mycpv)
7253 if metadata is None:
7254 db_keys = list(portdb._aux_cache_keys)
7256 metadata = dict(izip(db_keys, portdb.aux_get(mycpv, db_keys)))
7258 if not portdb.cpv_exists(mycpv):
7260 return ["corruption"]
7261 if "?" in metadata["LICENSE"]:
7262 settings.setcpv(mycpv, mydb=metadata)
7263 metadata["USE"] = settings["PORTAGE_USE"]
7265 metadata["USE"] = ""
7266 mycp=mysplit[0]+"/"+mysplit[1]
7271 if settings._getProfileMaskAtom(mycpv, metadata):
7272 rValue.append("profile")
7274 # package.mask checking
7275 if settings._getMaskAtom(mycpv, metadata):
7276 rValue.append("package.mask")
7279 eapi = metadata["EAPI"]
7280 mygroups = settings._getKeywords(mycpv, metadata)
7281 licenses = metadata["LICENSE"]
7282 slot = metadata["SLOT"]
7283 if eapi.startswith("-"):
7285 if not eapi_is_supported(eapi):
7286 return ["EAPI %s" % eapi]
7287 elif _eapi_is_deprecated(eapi) and not installed:
7288 return ["EAPI %s" % eapi]
7289 egroups = settings.configdict["backupenv"].get(
7290 "ACCEPT_KEYWORDS", "").split()
7291 pgroups = settings["ACCEPT_KEYWORDS"].split()
7292 myarch = settings["ARCH"]
7293 if pgroups and myarch not in pgroups:
7294 """For operating systems other than Linux, ARCH is not necessarily a
7296 myarch = pgroups[0].lstrip("~")
7298 cp = dep_getkey(mycpv)
7299 pkgdict = settings.pkeywordsdict.get(cp)
7302 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
7303 for atom, pkgkeywords in pkgdict.iteritems():
7304 if match_from_list(atom, cpv_slot_list):
7306 pgroups.extend(pkgkeywords)
7307 if matches or egroups:
7308 pgroups.extend(egroups)
7311 if x.startswith("-"):
7315 inc_pgroups.discard(x[1:])
7318 pgroups = inc_pgroups
7326 for keyword in pgroups:
7327 if keyword in mygroups:
7337 elif gp=="-"+myarch and myarch in pgroups:
7340 elif gp=="~"+myarch and myarch in pgroups:
7345 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
7346 if missing_licenses:
7347 allowed_tokens = set(["||", "(", ")"])
7348 allowed_tokens.update(missing_licenses)
7349 license_split = licenses.split()
7350 license_split = [x for x in license_split \
7351 if x in allowed_tokens]
7352 msg = license_split[:]
7353 msg.append("license(s)")
7354 rValue.append(" ".join(msg))
7355 except portage.exception.InvalidDependString, e:
7356 rValue.append("LICENSE: "+str(e))
7358 # Only show KEYWORDS masks for installed packages
7359 # if they're not masked for any other reason.
7360 if kmask and (not installed or not rValue):
7361 rValue.append(kmask+" keyword")
7367 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
7368 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
7369 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
7370 'PDEPEND', 'PROVIDE', 'EAPI',
7371 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
7372 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
7374 auxdbkeylen=len(auxdbkeys)
7376 from portage.dbapi import dbapi
7377 from portage.dbapi.virtual import fakedbapi
7378 from portage.dbapi.bintree import bindbapi, binarytree
7379 from portage.dbapi.vartree import vardbapi, vartree, dblink
7380 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
7382 class FetchlistDict(portage.cache.mappings.Mapping):
7383 """This provide a mapping interface to retrieve fetch lists. It's used
7384 to allow portage.manifest.Manifest to access fetch lists via a standard
7385 mapping interface rather than use the dbapi directly."""
7386 def __init__(self, pkgdir, settings, mydbapi):
7387 """pkgdir is a directory containing ebuilds and settings is passed into
7388 portdbapi.getfetchlist for __getitem__ calls."""
7389 self.pkgdir = pkgdir
7390 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7391 self.settings = settings
7392 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7393 self.portdb = mydbapi
7394 def __getitem__(self, pkg_key):
7395 """Returns the complete fetch list for a given package."""
7396 return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
7397 def __contains__(self, cpv):
7398 return cpv in self.keys()
7399 def has_key(self, pkg_key):
7400 """Returns true if the given package exists within pkgdir."""
7401 return pkg_key in self
7404 return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
7407 """Returns keys for all packages within pkgdir"""
7408 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7410 if sys.hexversion >= 0x3000000:
7413 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
7414 vartree=None, prev_mtimes=None, blockers=None):
7415 """will merge a .tbz2 file, returning a list of runtime dependencies
7416 that must be satisfied, or None if there was a merge error. This
7417 code assumes the package exists."""
7420 mydbapi = db[myroot]["bintree"].dbapi
7422 vartree = db[myroot]["vartree"]
7423 if mytbz2[-5:]!=".tbz2":
7424 print "!!! Not a .tbz2 file"
7430 did_merge_phase = False
7433 """ Don't lock the tbz2 file because the filesytem could be readonly or
7434 shared by a cluster."""
7435 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
7437 mypkg = os.path.basename(mytbz2)[:-5]
7438 xptbz2 = portage.xpak.tbz2(mytbz2)
7439 mycat = xptbz2.getfile("CATEGORY")
7441 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7444 mycat = mycat.strip()
7446 # These are the same directories that would be used at build time.
7447 builddir = os.path.join(
7448 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7449 catdir = os.path.dirname(builddir)
7450 pkgloc = os.path.join(builddir, "image")
7451 infloc = os.path.join(builddir, "build-info")
7452 myebuild = os.path.join(
7453 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7454 portage.util.ensure_dirs(os.path.dirname(catdir),
7455 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7456 catdir_lock = portage.locks.lockdir(catdir)
7457 portage.util.ensure_dirs(catdir,
7458 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7460 shutil.rmtree(builddir)
7461 except (IOError, OSError), e:
7462 if e.errno != errno.ENOENT:
7465 for mydir in (builddir, pkgloc, infloc):
7466 portage.util.ensure_dirs(mydir, uid=portage_uid,
7467 gid=portage_gid, mode=0755)
7468 writemsg_stdout(">>> Extracting info\n")
7469 xptbz2.unpackinfo(infloc)
7470 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
7471 # Store the md5sum in the vdb.
7472 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7473 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
7476 # This gives bashrc users an opportunity to do various things
7477 # such as remove binary packages after they're installed.
7478 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
7479 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
7480 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7482 # Eventually we'd like to pass in the saved ebuild env here.
7483 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7484 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7485 if retval != os.EX_OK:
7486 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7489 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7490 retval = portage.process.spawn_bash(
7491 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7492 env=mysettings.environ())
7493 if retval != os.EX_OK:
7494 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7496 #portage.locks.unlockfile(tbz2_lock)
7499 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7500 treetype="bintree", blockers=blockers)
7501 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7502 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7503 did_merge_phase = True
7504 success = retval == os.EX_OK
7507 mysettings.pop("PORTAGE_BINPKG_FILE", None)
7509 portage.locks.unlockfile(tbz2_lock)
7511 if not did_merge_phase:
7512 # The merge phase handles this already. Callers don't know how
7513 # far this function got, so we have to call elog_process() here
7514 # so that it's only called once.
7515 from portage.elog import elog_process
7516 elog_process(mycat + "/" + mypkg, mysettings)
7519 shutil.rmtree(builddir)
7520 except (IOError, OSError), e:
7521 if e.errno != errno.ENOENT:
7525 def deprecated_profile_check(settings=None):
7527 if settings is not None:
7528 config_root = settings["PORTAGE_CONFIGROOT"]
7529 deprecated_profile_file = os.path.join(config_root,
7530 DEPRECATED_PROFILE_FILE.lstrip(os.sep))
7531 if not os.access(deprecated_profile_file, os.R_OK):
7533 deprecatedfile = open(deprecated_profile_file, "r")
7534 dcontent = deprecatedfile.readlines()
7535 deprecatedfile.close()
7536 writemsg(colorize("BAD", "\n!!! Your current profile is " + \
7537 "deprecated and not supported anymore.") + "\n", noiselevel=-1)
7539 writemsg(colorize("BAD","!!! Please refer to the " + \
7540 "Gentoo Upgrading Guide.") + "\n", noiselevel=-1)
7542 newprofile = dcontent[0]
7543 writemsg(colorize("BAD", "!!! Please upgrade to the " + \
7544 "following profile if possible:") + "\n", noiselevel=-1)
7545 writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
7546 if len(dcontent) > 1:
7547 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7548 for myline in dcontent[1:]:
7549 writemsg(myline, noiselevel=-1)
7550 writemsg("\n\n", noiselevel=-1)
7553 # gets virtual package settings
7554 def getvirtuals(myroot):
7556 writemsg("--- DEPRECATED call to getvirtual\n")
7557 return settings.getvirtuals(myroot)
7559 def commit_mtimedb(mydict=None, filename=None):
7562 if "mtimedb" not in globals() or mtimedb is None:
7566 if filename is None:
7568 filename = mtimedbfile
7569 mydict["version"] = VERSION
7570 d = {} # for full backward compat, pickle it as a plain dict object.
7573 f = atomic_ofstream(filename, mode='wb')
7574 pickle.dump(d, f, protocol=2)
7576 portage.util.apply_secpass_permissions(filename,
7577 uid=uid, gid=portage_gid, mode=0644)
7578 except (IOError, OSError), e:
7582 global uid,portage_gid,portdb,db
7583 if secpass and os.environ.get("SANDBOX_ON") != "1":
7584 close_portdbapi_caches()
7587 atexit_register(portageexit)
7589 def _gen_missing_encodings(missing_encodings):
7593 if 'ascii' in missing_encodings:
7595 class AsciiIncrementalEncoder(codecs.IncrementalEncoder):
7596 def encode(self, input, final=False):
7597 return codecs.ascii_encode(input, self.errors)[0]
7599 class AsciiIncrementalDecoder(codecs.IncrementalDecoder):
7600 def decode(self, input, final=False):
7601 return codecs.ascii_decode(input, self.errors)[0]
7603 class AsciiStreamWriter(codecs.StreamWriter):
7604 encode = codecs.ascii_encode
7606 class AsciiStreamReader(codecs.StreamReader):
7607 decode = codecs.ascii_decode
7609 codec_info = codecs.CodecInfo(
7611 encode=codecs.ascii_encode,
7612 decode=codecs.ascii_decode,
7613 incrementalencoder=AsciiIncrementalEncoder,
7614 incrementaldecoder=AsciiIncrementalDecoder,
7615 streamwriter=AsciiStreamWriter,
7616 streamreader=AsciiStreamReader,
7619 for alias in ('ascii', '646', 'ansi_x3.4_1968', 'ansi_x3_4_1968',
7620 'ansi_x3.4_1986', 'cp367', 'csascii', 'ibm367', 'iso646_us',
7621 'iso_646.irv_1991', 'iso_ir_6', 'us', 'us_ascii'):
7622 encodings[alias] = codec_info
7624 if 'utf_8' in missing_encodings:
7626 def utf8decode(input, errors='strict'):
7627 return codecs.utf_8_decode(input, errors, True)
7629 class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
7630 def encode(self, input, final=False):
7631 return codecs.utf_8_encode(input, self.errors)[0]
7633 class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
7634 _buffer_decode = codecs.utf_8_decode
7636 class Utf8StreamWriter(codecs.StreamWriter):
7637 encode = codecs.utf_8_encode
7639 class Utf8StreamReader(codecs.StreamReader):
7640 decode = codecs.utf_8_decode
7642 codec_info = codecs.CodecInfo(
7644 encode=codecs.utf_8_encode,
7646 incrementalencoder=Utf8IncrementalEncoder,
7647 incrementaldecoder=Utf8IncrementalDecoder,
7648 streamreader=Utf8StreamWriter,
7649 streamwriter=Utf8StreamReader,
7652 for alias in ('utf_8', 'u8', 'utf', 'utf8', 'utf8_ucs2', 'utf8_ucs4'):
7653 encodings[alias] = codec_info
7657 def _ensure_default_encoding():
7659 The python that's inside stage 1 or 2 is built with a minimal
7660 configuration which does not include the /usr/lib/pythonX.Y/encodings
7661 directory. This results in error like the following:
7663 LookupError: no codec search functions registered: can't find encoding
7665 In order to solve this problem, detect it early and manually register
7666 a search function for the ascii and utf_8 codecs. Starting with python-3.0
7667 this problem is more noticeable because of stricter handling of encoding
7668 and decoding between strings of characters and bytes.
7671 default_fallback = 'utf_8'
7672 default_encoding = sys.getdefaultencoding().lower().replace('-', '_')
7673 required_encodings = set(['ascii', 'utf_8'])
7674 required_encodings.add(default_encoding)
7675 missing_encodings = set()
7676 for codec_name in required_encodings:
7678 codecs.lookup(codec_name)
7680 missing_encodings.add(codec_name)
7682 if not missing_encodings:
7685 encodings = _gen_missing_encodings(missing_encodings)
7687 if default_encoding in missing_encodings and \
7688 default_encoding not in encodings:
7689 # Make the fallback codec correspond to whatever name happens
7690 # to be returned by sys.getdefaultencoding().
7693 encodings[default_encoding] = codecs.lookup(default_fallback)
7695 encodings[default_encoding] = encodings[default_fallback]
7697 def search_function(name):
7699 name = name.replace('-', '_')
7700 codec_info = encodings.get(name)
7701 if codec_info is not None:
7702 return codecs.CodecInfo(
7703 name=codec_info.name,
7704 encode=codec_info.encode,
7705 decode=codec_info.decode,
7706 incrementalencoder=codec_info.incrementalencoder,
7707 incrementaldecoder=codec_info.incrementaldecoder,
7708 streamreader=codec_info.streamreader,
7709 streamwriter=codec_info.streamwriter,
7713 codecs.register(search_function)
7715 del codec_name, default_encoding, default_fallback, missing_encodings, \
7716 required_encodings, search_function
7718 def _global_updates(trees, prev_mtimes):
7720 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
7722 @param trees: A dictionary containing portage trees.
7724 @param prev_mtimes: A dictionary containing mtimes of files located in
7725 $PORTDIR/profiles/updates/.
7726 @type prev_mtimes: dict
7727 @rtype: None or List
7728 @return: None if no were no updates, otherwise a list of update commands
7729 that have been performed.
7731 # only do this if we're root and not running repoman/ebuild digest
7733 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
7736 mysettings = trees["/"]["vartree"].settings
7737 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
7740 if mysettings["PORTAGE_CALLER"] == "fixpackages":
7741 update_data = grab_updates(updpath)
7743 update_data = grab_updates(updpath, prev_mtimes)
7744 except portage.exception.DirectoryNotFound:
7745 writemsg("--- 'profiles/updates' is empty or " + \
7746 "not available. Empty portage tree?\n", noiselevel=1)
7749 if len(update_data) > 0:
7750 do_upgrade_packagesmessage = 0
7753 for mykey, mystat, mycontent in update_data:
7754 writemsg_stdout("\n\n")
7755 writemsg_stdout(colorize("GOOD",
7756 "Performing Global Updates: ")+bold(mykey)+"\n")
7757 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
7758 writemsg_stdout(" " + bold(".") + "='update pass' " + \
7759 bold("*") + "='binary update' " + bold("#") + \
7760 "='/var/db update' " + bold("@") + "='/var/db move'\n" + \
7761 " " + bold("s") + "='/var/db SLOT move' " + \
7762 bold("%") + "='binary move' " + bold("S") + \
7763 "='binary SLOT move'\n " + \
7764 bold("p") + "='update /etc/portage/package.*'\n")
7765 valid_updates, errors = parse_updates(mycontent)
7766 myupd.extend(valid_updates)
7767 writemsg_stdout(len(valid_updates) * "." + "\n")
7768 if len(errors) == 0:
7769 # Update our internal mtime since we
7770 # processed all of our directives.
7771 timestamps[mykey] = long(mystat.st_mtime)
7774 writemsg("%s\n" % msg, noiselevel=-1)
7776 world_file = os.path.join(root, WORLD_FILE)
7777 world_list = grabfile(world_file)
7778 world_modified = False
7779 for update_cmd in myupd:
7780 for pos, atom in enumerate(world_list):
7781 new_atom = update_dbentry(update_cmd, atom)
7782 if atom != new_atom:
7783 world_list[pos] = new_atom
7784 world_modified = True
7787 write_atomic(world_file,
7788 "".join("%s\n" % (x,) for x in world_list))
7790 update_config_files("/",
7791 mysettings.get("CONFIG_PROTECT","").split(),
7792 mysettings.get("CONFIG_PROTECT_MASK","").split(),
7795 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
7796 settings=mysettings)
7797 vardb = trees["/"]["vartree"].dbapi
7798 bindb = trees["/"]["bintree"].dbapi
7799 if not os.access(bindb.bintree.pkgdir, os.W_OK):
7801 for update_cmd in myupd:
7802 if update_cmd[0] == "move":
7803 moves = vardb.move_ent(update_cmd)
7805 writemsg_stdout(moves * "@")
7807 moves = bindb.move_ent(update_cmd)
7809 writemsg_stdout(moves * "%")
7810 elif update_cmd[0] == "slotmove":
7811 moves = vardb.move_slot_ent(update_cmd)
7813 writemsg_stdout(moves * "s")
7815 moves = bindb.move_slot_ent(update_cmd)
7817 writemsg_stdout(moves * "S")
7819 # The above global updates proceed quickly, so they
7820 # are considered a single mtimedb transaction.
7821 if len(timestamps) > 0:
7822 # We do not update the mtime in the mtimedb
7823 # until after _all_ of the above updates have
7824 # been processed because the mtimedb will
7825 # automatically commit when killed by ctrl C.
7826 for mykey, mtime in timestamps.iteritems():
7827 prev_mtimes[mykey] = mtime
7829 # We gotta do the brute force updates for these now.
7830 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
7831 "fixpackages" in mysettings.features:
7832 def onUpdate(maxval, curval):
7834 writemsg_stdout("#")
7835 vardb.update_ents(myupd, onUpdate=onUpdate)
7837 def onUpdate(maxval, curval):
7839 writemsg_stdout("*")
7840 bindb.update_ents(myupd, onUpdate=onUpdate)
7842 do_upgrade_packagesmessage = 1
7844 # Update progress above is indicated by characters written to stdout so
7845 # we print a couple new lines here to separate the progress output from
7850 if do_upgrade_packagesmessage and bindb and \
7852 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
7853 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
7854 writemsg_stdout("\n")
7858 #continue setting up other trees
7860 class MtimeDB(dict):
7861 def __init__(self, filename):
7863 self.filename = filename
7864 self._load(filename)
7866 def _load(self, filename):
7868 f = open(filename, 'rb')
7869 mypickle = pickle.Unpickler(f)
7871 mypickle.find_global = None
7872 except AttributeError:
7873 # TODO: If py3k, override Unpickler.find_class().
7878 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
7879 if isinstance(e, pickle.UnpicklingError):
7880 writemsg("!!! Error loading '%s': %s\n" % \
7881 (filename, str(e)), noiselevel=-1)
7886 d["updates"] = d["old"]
7891 d.setdefault("starttime", 0)
7892 d.setdefault("version", "")
7893 for k in ("info", "ldpath", "updates"):
7896 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
7897 "starttime", "updates", "version"))
7900 if k not in mtimedbkeys:
7901 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
7904 self._clean_data = copy.deepcopy(d)
7907 if not self.filename:
7911 # Only commit if the internal state has changed.
7912 if d != self._clean_data:
7913 commit_mtimedb(mydict=d, filename=self.filename)
7914 self._clean_data = copy.deepcopy(d)
7916 def create_trees(config_root=None, target_root=None, trees=None):
7920 # clean up any existing portdbapi instances
7921 for myroot in trees:
7922 portdb = trees[myroot]["porttree"].dbapi
7923 portdb.close_caches()
7924 portdbapi.portdbapi_instances.remove(portdb)
7925 del trees[myroot]["porttree"], myroot, portdb
7927 settings = config(config_root=config_root, target_root=target_root,
7928 config_incrementals=portage.const.INCREMENTALS)
7931 myroots = [(settings["ROOT"], settings)]
7932 if settings["ROOT"] != "/":
7934 # When ROOT != "/" we only want overrides from the calling
7935 # environment to apply to the config that's associated
7936 # with ROOT != "/", so pass an empty dict for the env parameter.
7937 settings = config(config_root=None, target_root="/", env={})
7939 myroots.append((settings["ROOT"], settings))
7941 for myroot, mysettings in myroots:
7942 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
7943 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
7944 trees[myroot].addLazySingleton(
7945 "vartree", vartree, myroot, categories=mysettings.categories,
7946 settings=mysettings)
7947 trees[myroot].addLazySingleton("porttree",
7948 portagetree, myroot, settings=mysettings)
7949 trees[myroot].addLazySingleton("bintree",
7950 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
7953 class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
7955 Instances of these serve as proxies to global variables
7956 that are initialized on demand.
7959 __slots__ = ('_name',)
7961 def __init__(self, name):
7962 proxy.objectproxy.ObjectProxy.__init__(self)
7963 object.__setattr__(self, '_name', name)
7965 def _get_target(self):
7966 init_legacy_globals()
7967 name = object.__getattribute__(self, '_name')
7968 return globals()[name]
7970 class _PortdbProxy(proxy.objectproxy.ObjectProxy):
7972 The portdb is initialized separately from the rest
7973 of the variables, since sometimes the other variables
7974 are needed while the portdb is not.
7979 def _get_target(self):
7980 init_legacy_globals()
7981 global db, portdb, root, _portdb_initialized
7982 if not _portdb_initialized:
7983 portdb = db[root]["porttree"].dbapi
7984 _portdb_initialized = True
7987 class _MtimedbProxy(proxy.objectproxy.ObjectProxy):
7989 The mtimedb is independent from the portdb and other globals.
7992 __slots__ = ('_name',)
7994 def __init__(self, name):
7995 proxy.objectproxy.ObjectProxy.__init__(self)
7996 object.__setattr__(self, '_name', name)
7998 def _get_target(self):
7999 global mtimedb, mtimedbfile, _mtimedb_initialized
8000 if not _mtimedb_initialized:
8001 mtimedbfile = os.path.join("/",
8002 CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8003 mtimedb = MtimeDB(mtimedbfile)
8004 _mtimedb_initialized = True
8005 name = object.__getattribute__(self, '_name')
8006 return globals()[name]
8008 _legacy_global_var_names = ("archlist", "db", "features",
8009 "groups", "mtimedb", "mtimedbfile", "pkglines",
8010 "portdb", "profiledir", "root", "selinux_enabled",
8011 "settings", "thirdpartymirrors", "usedefaults")
8013 def _disable_legacy_globals():
8015 This deletes the ObjectProxy instances that are used
8016 for lazy initialization of legacy global variables.
8017 The purpose of deleting them is to prevent new code
8018 from referencing these deprecated variables.
8020 global _legacy_global_var_names
8021 for k in _legacy_global_var_names:
8022 globals().pop(k, None)
8024 # Initialization of legacy globals. No functions/classes below this point
8025 # please! When the above functions and classes become independent of the
8026 # below global variables, it will be possible to make the below code
8027 # conditional on a backward compatibility flag (backward compatibility could
8028 # be disabled via an environment variable, for example). This will enable new
8029 # code that is aware of this flag to import portage without the unnecessary
8030 # overhead (and other issues!) of initializing the legacy globals.
8032 def init_legacy_globals():
8033 global _globals_initialized
8034 if _globals_initialized:
8036 _globals_initialized = True
8038 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8039 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8040 profiledir, flushmtimedb
8042 # Portage needs to ensure a sane umask for the files it creates.
8046 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8047 kwargs[k] = os.environ.get(envvar, "/")
8049 global _initializing_globals
8050 _initializing_globals = True
8051 db = create_trees(**kwargs)
8052 del _initializing_globals
8054 settings = db["/"]["vartree"].settings
8058 settings = db[myroot]["vartree"].settings
8061 root = settings["ROOT"]
8064 # ========================================================================
8066 # These attributes should not be used
8067 # within Portage under any circumstances.
8068 # ========================================================================
8069 archlist = settings.archlist()
8070 features = settings.features
8071 groups = settings["ACCEPT_KEYWORDS"].split()
8072 pkglines = settings.packages
8073 selinux_enabled = settings.selinux_enabled()
8074 thirdpartymirrors = settings.thirdpartymirrors()
8075 usedefaults = settings.use_defs
8077 if os.path.isdir(PROFILE_PATH):
8078 profiledir = PROFILE_PATH
8079 def flushmtimedb(record):
8080 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8081 # ========================================================================
8083 # These attributes should not be used
8084 # within Portage under any circumstances.
8085 # ========================================================================
8089 _mtimedb_initialized = False
8090 mtimedb = _MtimedbProxy("mtimedb")
8091 mtimedbfile = _MtimedbProxy("mtimedbfile")
8093 _portdb_initialized = False
8094 portdb = _PortdbProxy()
8096 _globals_initialized = False
8098 for k in ("db", "settings", "root", "selinux_enabled",
8099 "archlist", "features", "groups",
8100 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
8102 globals()[k] = _LegacyGlobalProxy(k)
8104 _ensure_default_encoding()
8109 # ============================================================================
8110 # ============================================================================