1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
16 print "Failed to import sys! Something is _VERY_ wrong with python."
20 import copy, errno, os, re, shutil, time, types
24 import pickle as cPickle
28 from time import sleep
29 from random import shuffle
31 if getattr(__builtins__, "set", None) is None:
32 from sets import Set as set
33 from itertools import chain, izip
34 except ImportError, e:
35 sys.stderr.write("\n\n")
36 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
40 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42 sys.stderr.write(" "+str(e)+"\n\n");
46 # XXX: This should get renamed to bsd_chflags, I think.
53 from cache.cache_errors import CacheError
58 from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59 isjustname, isspecific, isvalidatom, \
60 match_from_list, match_to_list, best_match_to_list
62 # XXX: This needs to get cleaned up.
64 from output import bold, colorize, green, red, yellow
67 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74 INCREMENTALS, EAPI, MISC_SH_BINARY
76 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77 portage_uid, portage_gid, userpriv_groups
78 from portage_manifest import Manifest
81 from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83 map_dictlist_vals, new_protect_filename, normalize_path, \
84 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86 import portage_exception
90 from portage_exec import atexit_register, run_exitfuncs
91 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92 import portage_checksum
93 from portage_checksum import perform_md5,perform_checksum,prelink_capable
95 from portage_localization import _
96 from portage_update import dep_transform, fixdbentries, grab_updates, \
97 parse_updates, update_config_files, update_dbentries
99 # Need these functions directly in portage namespace to not break every external tool in existence
100 from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101 pkgsplit, vercmp, ververify
103 # endversion and endversion_keys are for backward compatibility only.
104 from portage_versions import endversion_keys
105 from portage_versions import suffix_value as endversion
107 except ImportError, e:
108 sys.stderr.write("\n\n")
109 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114 sys.stderr.write("!!! a recovery of portage.\n")
115 sys.stderr.write(" "+str(e)+"\n\n")
120 import portage_selinux as selinux
122 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
133 modname = ".".join(name.split(".")[:-1])
134 mod = __import__(modname)
135 components = name.split('.')
136 for comp in components[1:]:
137 mod = getattr(mod, comp)
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
142 if top_dict.has_key(x) and top_dict[x].has_key(key):
144 return copy.deepcopy(top_dict[x][key])
146 return top_dict[x][key]
150 raise KeyError, "Key not found in list; '%s'" % key
153 "this fixes situations where the current directory doesn't exist"
156 except OSError: #dir doesn't exist
161 def abssymlink(symlink):
162 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163 mylink=os.readlink(symlink)
165 mydir=os.path.dirname(symlink)
166 mylink=mydir+"/"+mylink
167 return os.path.normpath(mylink)
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174 global cacheHit,cacheMiss,cacheStale
175 mypath = normalize_path(my_original_path)
176 if dircache.has_key(mypath):
178 cached_mtime, list, ftype = dircache[mypath]
181 cached_mtime, list, ftype = -1, [], []
183 pathstat = os.stat(mypath)
184 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185 mtime = pathstat[stat.ST_MTIME]
187 raise portage_exception.DirectoryNotFound(mypath)
188 except (IOError,OSError,portage_exception.PortageException):
192 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193 if mtime != cached_mtime or time.time() - mtime < 4:
194 if dircache.has_key(mypath):
196 list = os.listdir(mypath)
201 pathstat = os.stat(mypath+"/"+x)
203 pathstat = os.lstat(mypath+"/"+x)
205 if stat.S_ISREG(pathstat[stat.ST_MODE]):
207 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
209 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
213 except (IOError, OSError):
215 dircache[mypath] = mtime, list, ftype
219 for x in range(0, len(list)):
220 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221 ret_list.append(list[x])
222 ret_ftype.append(ftype[x])
223 elif (list[x] not in ignorelist):
224 ret_list.append(list[x])
225 ret_ftype.append(ftype[x])
227 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228 return ret_list, ret_ftype
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231 EmptyOnError=False, dirsonly=False):
233 Portage-specific implementation of os.listdir
235 @param mypath: Path whose contents you wish to list
237 @param recursive: Recursively scan directories contained within mypath
238 @type recursive: Boolean
239 @param filesonly; Only return files, not more directories
240 @type filesonly: Boolean
241 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242 @type ignorecvs: Boolean
243 @param ignorelist: List of filenames/directories to exclude
244 @type ignorelist: List
245 @param followSymlinks: Follow Symlink'd files and directories
246 @type followSymlinks: Boolean
247 @param EmptyOnError: Return [] if an error occurs.
248 @type EmptyOnError: Boolean
249 @param dirsonly: Only return directories.
250 @type dirsonly: Boolean
252 @returns: A list of files and directories (or just files or just directories) or an empty list.
255 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
262 if not (filesonly or dirsonly or recursive):
268 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
273 for y in range(0,len(l)):
274 l[y]=list[x]+"/"+l[y]
280 for x in range(0,len(ftype)):
282 rlist=rlist+[list[x]]
285 for x in range(0, len(ftype)):
287 rlist = rlist + [list[x]]
293 def flatten(mytokens):
294 """this function now turns a [1,[2,3]] list into
295 a [1,2,3] list and returns it."""
298 if type(x)==types.ListType:
299 newlist.extend(flatten(x))
304 #beautiful directed graph object
308 """Create an empty digraph"""
310 # { node : ( { child : priority } , { parent : priority } ) }
314 def add(self, node, parent, priority=0):
315 """Adds the specified node with the specified parent.
317 If the dep is a soft-dep and the node already has a hard
318 relationship to the parent, the relationship is left as hard."""
320 if node not in self.nodes:
321 self.nodes[node] = ({}, {})
322 self.order.append(node)
327 if parent not in self.nodes:
328 self.nodes[parent] = ({}, {})
329 self.order.append(parent)
331 if parent in self.nodes[node][1]:
332 if priority > self.nodes[node][1][parent]:
333 self.nodes[node][1][parent] = priority
335 self.nodes[node][1][parent] = priority
337 if node in self.nodes[parent][0]:
338 if priority > self.nodes[parent][0][node]:
339 self.nodes[parent][0][node] = priority
341 self.nodes[parent][0][node] = priority
343 def remove(self, node):
344 """Removes the specified node from the digraph, also removing
345 and ties to other nodes in the digraph. Raises KeyError if the
346 node doesn't exist."""
348 if node not in self.nodes:
351 for parent in self.nodes[node][1]:
352 del self.nodes[parent][0][node]
353 for child in self.nodes[node][0]:
354 del self.nodes[child][1][node]
357 self.order.remove(node)
359 def contains(self, node):
360 """Checks if the digraph contains mynode"""
361 return node in self.nodes
364 """Return a list of all nodes in the graph"""
367 def child_nodes(self, node, ignore_priority=None):
368 """Return all children of the specified node"""
369 if ignore_priority is None:
370 return self.nodes[node][0].keys()
372 for child, priority in self.nodes[node][0].iteritems():
373 if priority > ignore_priority:
374 children.append(child)
377 def parent_nodes(self, node):
378 """Return all parents of the specified node"""
379 return self.nodes[node][1].keys()
381 def leaf_nodes(self, ignore_priority=None):
382 """Return all nodes that have no children
384 If ignore_soft_deps is True, soft deps are not counted as
385 children in calculations."""
388 for node in self.order:
390 for child in self.nodes[node][0]:
391 if self.nodes[node][0][child] > ignore_priority:
395 leaf_nodes.append(node)
398 def root_nodes(self, ignore_priority=None):
399 """Return all nodes that have no parents.
401 If ignore_soft_deps is True, soft deps are not counted as
402 parents in calculations."""
405 for node in self.order:
407 for parent in self.nodes[node][1]:
408 if self.nodes[node][1][parent] > ignore_priority:
412 root_nodes.append(node)
416 """Checks if the digraph is empty"""
417 return len(self.nodes) == 0
421 clone.nodes = copy.deepcopy(self.nodes)
422 clone.order = self.order[:]
425 # Backward compatibility
428 allzeros = leaf_nodes
433 def delnode(self, node):
440 leaf_nodes = self.leaf_nodes()
445 def hasallzeros(self, ignore_priority=None):
446 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
449 def debug_print(self):
450 for node in self.nodes:
452 if self.nodes[node][0]:
455 print "(no children)"
456 for child in self.nodes[node][0]:
458 print "(%s)" % self.nodes[node][0][child]
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463 mylogfiles = listdir(mysettings["T"]+"/logging/")
464 # shortcut for packages without any messages
465 if len(mylogfiles) == 0:
467 # exploit listdir() file order so we process log entries in chronological order
470 my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
472 msgfunction, msgtype = f.split(".")
473 if msgtype.upper() not in my_elog_classes \
474 and msgtype.lower() not in my_elog_classes:
476 if msgfunction not in portage_const.EBUILD_PHASES:
477 writemsg("!!! can't process invalid log file: %s\n" % f,
480 if not msgfunction in mylogentries:
481 mylogentries[msgfunction] = []
482 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483 mylogentries[msgfunction].append((msgtype, msgcontent))
485 # in case the filters matched all messages
486 if len(mylogentries) == 0:
489 # generate a single string with all log messages
491 for phase in portage_const.EBUILD_PHASES:
492 if not phase in mylogentries:
494 for msgtype,msgcontent in mylogentries[phase]:
495 fulllog += "%s: %s\n" % (msgtype, phase)
496 for line in msgcontent:
500 # pass the processing to the individual modules
501 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
503 # - is nicer than _ for module names, so allow people to use it.
504 s = s.replace("-", "_")
506 # FIXME: ugly ad.hoc import code
507 # TODO: implement a common portage module loader
508 logmodule = __import__("elog_modules.mod_"+s)
509 m = getattr(logmodule, "mod_"+s)
510 def timeout_handler(signum, frame):
511 raise portage_exception.PortageException(
512 "Timeout in elog_process for system '%s'" % s)
514 signal.signal(signal.SIGALRM, timeout_handler)
515 # Timeout after one minute (in case something like the mail
519 m.process(mysettings, cpv, mylogentries, fulllog)
522 if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523 _elog_atexit_handlers.append(m.finalize)
524 atexit_register(m.finalize, mysettings)
525 except (ImportError, AttributeError), e:
526 writemsg("!!! Error while importing logging modules " + \
527 "while loading \"mod_%s\":\n" % str(s))
528 writemsg("%s\n" % str(e), noiselevel=-1)
529 except portage_exception.PortageException, e:
530 writemsg("%s\n" % str(e), noiselevel=-1)
532 # clean logfiles to avoid repetitions
535 os.unlink(os.path.join(mysettings["T"], "logging", f))
539 #parse /etc/env.d and generate /etc/profile.env
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542 if target_root is None:
545 if prev_mtimes is None:
547 prev_mtimes = mtimedb["ldpath"]
548 envd_dir = os.path.join(target_root, "etc", "env.d")
549 portage_util.ensure_dirs(envd_dir, mode=0755)
550 fns = listdir(envd_dir, EmptyOnError=1)
556 if not x[0].isdigit() or not x[1].isdigit():
558 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
564 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568 "PYTHONPATH", "ROOTPATH"])
573 file_path = os.path.join(envd_dir, x)
575 myconfig = getconfig(file_path, expand=False)
576 except portage_exception.ParseError, e:
577 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
581 # broken symlink or file removed by a concurrent process
582 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
584 config_list.append(myconfig)
585 if "SPACE_SEPARATED" in myconfig:
586 space_separated.update(myconfig["SPACE_SEPARATED"].split())
587 del myconfig["SPACE_SEPARATED"]
588 if "COLON_SEPARATED" in myconfig:
589 colon_separated.update(myconfig["COLON_SEPARATED"].split())
590 del myconfig["COLON_SEPARATED"]
594 for var in space_separated:
596 for myconfig in config_list:
598 mylist.extend(filter(None, myconfig[var].split()))
599 del myconfig[var] # prepare for env.update(myconfig)
601 env[var] = " ".join(mylist)
602 specials[var] = mylist
604 for var in colon_separated:
606 for myconfig in config_list:
608 mylist.extend(filter(None, myconfig[var].split(":")))
609 del myconfig[var] # prepare for env.update(myconfig)
611 env[var] = ":".join(mylist)
612 specials[var] = mylist
614 for myconfig in config_list:
615 """Cumulative variables have already been deleted from myconfig so that
616 they won't be overwritten by this dict.update call."""
619 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
621 myld = open(ldsoconf_path)
622 myldlines=myld.readlines()
626 #each line has at least one char (a newline)
630 except (IOError, OSError), e:
631 if e.errno != errno.ENOENT:
635 ld_cache_update=False
637 newld = specials["LDPATH"]
639 #ld.so.conf needs updating and ldconfig needs to be run
640 myfd = atomic_ofstream(ldsoconf_path)
641 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642 myfd.write("# contents of /etc/env.d directory\n")
643 for x in specials["LDPATH"]:
648 # Update prelink.conf if we are prelink-enabled
650 newprelink = atomic_ofstream(
651 os.path.join(target_root, "etc", "prelink.conf"))
652 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653 newprelink.write("# contents of /etc/env.d directory\n")
655 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656 newprelink.write("-l "+x+"\n");
657 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
663 for y in specials["PRELINK_PATH_MASK"]:
672 newprelink.write("-h "+x+"\n")
673 for x in specials["PRELINK_PATH_MASK"]:
674 newprelink.write("-b "+x+"\n")
677 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
678 # granularity is possible. In order to avoid the potential ambiguity of
679 # mtimes that differ by less than 1 second, sleep here if any of the
680 # directories have been modified during the current second.
681 sleep_for_mtime_granularity = False
682 current_time = long(time.time())
683 mtime_changed = False
685 for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
686 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
688 newldpathtime = long(os.stat(x).st_mtime)
689 lib_dirs.add(normalize_path(x))
691 if oe.errno == errno.ENOENT:
696 # ignore this path because it doesn't exist
699 if newldpathtime == current_time:
700 sleep_for_mtime_granularity = True
702 if prev_mtimes[x] == newldpathtime:
705 prev_mtimes[x] = newldpathtime
708 prev_mtimes[x] = newldpathtime
712 ld_cache_update = True
715 not ld_cache_update and \
716 contents is not None:
717 libdir_contents_changed = False
718 for mypath, mydata in contents.iteritems():
719 if mydata[0] not in ("obj","sym"):
721 head, tail = os.path.split(mypath)
723 libdir_contents_changed = True
725 if not libdir_contents_changed:
728 # Only run ldconfig as needed
729 if (ld_cache_update or makelinks):
730 # ldconfig has very different behaviour between FreeBSD and Linux
731 if ostype=="Linux" or ostype.lower().endswith("gnu"):
732 # We can't update links if we haven't cleaned other versions first, as
733 # an older package installed ON TOP of a newer version will cause ldconfig
734 # to overwrite the symlinks we just made. -X means no links. After 'clean'
735 # we can safely create links.
736 writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
738 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
740 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
741 elif ostype in ("FreeBSD","DragonFly"):
742 writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
743 commands.getstatusoutput(
744 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
745 (target_root, target_root))
747 del specials["LDPATH"]
749 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
750 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
751 cenvnotice = penvnotice[:]
752 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
753 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
755 #create /etc/profile.env for bash support
756 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
757 outfile.write(penvnotice)
759 env_keys = [ x for x in env if x != "LDPATH" ]
762 outfile.write("export %s='%s'\n" % (x, env[x]))
765 #create /etc/csh.env for (t)csh support
766 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
767 outfile.write(cenvnotice)
769 outfile.write("setenv %s '%s'\n" % (x, env[x]))
772 if sleep_for_mtime_granularity:
773 while current_time == long(time.time()):
776 def ExtractKernelVersion(base_dir):
778 Try to figure out what kernel version we are running
779 @param base_dir: Path to sources (usually /usr/src/linux)
780 @type base_dir: string
781 @rtype: tuple( version[string], error[string])
783 1. tuple( version[string], error[string])
784 Either version or error is populated (but never both)
788 pathname = os.path.join(base_dir, 'Makefile')
790 f = open(pathname, 'r')
791 except OSError, details:
792 return (None, str(details))
793 except IOError, details:
794 return (None, str(details))
798 lines.append(f.readline())
799 except OSError, details:
800 return (None, str(details))
801 except IOError, details:
802 return (None, str(details))
804 lines = [l.strip() for l in lines]
808 #XXX: The following code relies on the ordering of vars within the Makefile
810 # split on the '=' then remove annoying whitespace
811 items = line.split("=")
812 items = [i.strip() for i in items]
813 if items[0] == 'VERSION' or \
814 items[0] == 'PATCHLEVEL':
817 elif items[0] == 'SUBLEVEL':
819 elif items[0] == 'EXTRAVERSION' and \
820 items[-1] != items[0]:
823 # Grab a list of files named localversion* and sort them
824 localversions = os.listdir(base_dir)
825 for x in range(len(localversions)-1,-1,-1):
826 if localversions[x][:12] != "localversion":
830 # Append the contents of each to the version string, stripping ALL whitespace
831 for lv in localversions:
832 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
834 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
835 kernelconfig = getconfig(base_dir+"/.config")
836 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
837 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
839 return (version,None)
841 def autouse(myvartree, use_cache=1, mysettings=None):
843 autuse returns a list of USE variables auto-enabled to packages being installed
845 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
846 @type myvartree: vartree
847 @param use_cache: read values from cache
848 @type use_cache: Boolean
849 @param mysettings: Instance of config
850 @type mysettings: config
852 @returns: A string containing a list of USE variables that are enabled via use.defaults
854 if mysettings is None:
856 mysettings = settings
857 if mysettings.profile_path is None:
860 usedefaults = mysettings.use_defs
861 for myuse in usedefaults:
863 for mydep in usedefaults[myuse]:
864 if not myvartree.dep_match(mydep,use_cache=True):
868 myusevars += " "+myuse
871 def check_config_instance(test):
872 if not test or (str(test.__class__) != 'portage.config'):
873 raise TypeError, "Invalid type for config object: %s" % test.__class__
877 This class encompasses the main portage configuration. Data is pulled from
878 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
879 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
882 Generally if you need data like USE flags, FEATURES, environment variables,
883 virtuals ...etc you look in here.
886 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
887 config_incrementals=None, config_root=None, target_root=None,
890 @param clone: If provided, init will use deepcopy to copy by value the instance.
891 @type clone: Instance of config class.
892 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
893 and then calling instance.setcpv(mycpv).
895 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
896 @type config_profile_path: String
897 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
898 @type config_incrementals: List
899 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
900 @type config_root: String
901 @param target_root: __init__ override of $ROOT env variable.
902 @type target_root: String
903 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
904 ignore local config (keywording and unmasking)
905 @type local_config: Boolean
908 debug = os.environ.get("PORTAGE_DEBUG") == "1"
910 self.already_in_regenerate = 0
915 self.modifiedkeys = []
920 self.dirVirtuals = None
923 # Virtuals obtained from the vartree
924 self.treeVirtuals = {}
925 # Virtuals by user specification. Includes negatives.
926 self.userVirtuals = {}
927 # Virtual negatives from user specifications.
928 self.negVirtuals = {}
930 self.user_profile_dir = None
931 self.local_config = local_config
934 self.incrementals = copy.deepcopy(clone.incrementals)
935 self.profile_path = copy.deepcopy(clone.profile_path)
936 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
937 self.local_config = copy.deepcopy(clone.local_config)
939 self.module_priority = copy.deepcopy(clone.module_priority)
940 self.modules = copy.deepcopy(clone.modules)
942 self.depcachedir = copy.deepcopy(clone.depcachedir)
944 self.packages = copy.deepcopy(clone.packages)
945 self.virtuals = copy.deepcopy(clone.virtuals)
947 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
948 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
949 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
951 self.use_defs = copy.deepcopy(clone.use_defs)
952 self.usemask = copy.deepcopy(clone.usemask)
953 self.usemask_list = copy.deepcopy(clone.usemask_list)
954 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
955 self.useforce = copy.deepcopy(clone.useforce)
956 self.useforce_list = copy.deepcopy(clone.useforce_list)
957 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
958 self.puse = copy.deepcopy(clone.puse)
959 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
960 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
961 self.mycpv = copy.deepcopy(clone.mycpv)
963 self.configlist = copy.deepcopy(clone.configlist)
964 self.lookuplist = self.configlist[:]
965 self.lookuplist.reverse()
967 "env.d": self.configlist[0],
968 "pkginternal": self.configlist[1],
969 "globals": self.configlist[2],
970 "defaults": self.configlist[3],
971 "conf": self.configlist[4],
972 "pkg": self.configlist[5],
973 "auto": self.configlist[6],
974 "backupenv": self.configlist[7],
975 "env": self.configlist[8] }
976 self.profiles = copy.deepcopy(clone.profiles)
977 self.backupenv = self.configdict["backupenv"]
978 self.pusedict = copy.deepcopy(clone.pusedict)
979 self.categories = copy.deepcopy(clone.categories)
980 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
981 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
982 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
983 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
984 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
985 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
986 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
987 self.features = copy.deepcopy(clone.features)
990 # backupenv is for calculated incremental variables.
991 self.backupenv = os.environ.copy()
993 # Clean up pollution from portage_data so that it doesn't
994 # interfere with repoman.
995 self.backupenv.pop("USERLAND", None)
997 def check_var_directory(varname, var):
998 if not os.path.isdir(var):
999 writemsg(("!!! Error: %s='%s' is not a directory. " + \
1000 "Please correct this.\n") % (varname, var),
1002 raise portage_exception.DirectoryNotFound(var)
1004 if config_root is None:
1007 config_root = normalize_path(os.path.abspath(
1008 config_root)).rstrip(os.path.sep) + os.path.sep
1010 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1012 self.depcachedir = DEPCACHE_PATH
1014 if not config_profile_path:
1015 config_profile_path = \
1016 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1017 if os.path.isdir(config_profile_path):
1018 self.profile_path = config_profile_path
1020 self.profile_path = None
1022 self.profile_path = config_profile_path[:]
1024 if not config_incrementals:
1025 writemsg("incrementals not specified to class config\n")
1026 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1028 self.incrementals = copy.deepcopy(config_incrementals)
1030 self.module_priority = ["user","default"]
1032 self.modules["user"] = getconfig(
1033 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1034 if self.modules["user"] is None:
1035 self.modules["user"] = {}
1036 self.modules["default"] = {
1037 "portdbapi.metadbmodule": "cache.metadata.database",
1038 "portdbapi.auxdbmodule": "cache.flat_hash.database",
1044 # back up our incremental variables:
1046 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1047 self.configlist.append({})
1048 self.configdict["env.d"] = self.configlist[-1]
1050 self.configlist.append({})
1051 self.configdict["pkginternal"] = self.configlist[-1]
1053 # The symlink might not exist or might not be a symlink.
1054 if self.profile_path is None:
1058 def addProfile(currentPath):
1059 parentsFile = os.path.join(currentPath, "parent")
1060 if os.path.exists(parentsFile):
1061 parents = grabfile(parentsFile)
1063 raise portage_exception.ParseError(
1064 "Empty parent file: '%s'" % parents_file)
1065 for parentPath in parents:
1066 parentPath = normalize_path(os.path.join(
1067 currentPath, parentPath))
1068 if os.path.exists(parentPath):
1069 addProfile(parentPath)
1071 raise portage_exception.ParseError(
1072 "Parent '%s' not found: '%s'" % \
1073 (parentPath, parentsFile))
1074 self.profiles.append(currentPath)
1075 addProfile(os.path.realpath(self.profile_path))
1077 custom_prof = os.path.join(
1078 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1079 if os.path.exists(custom_prof):
1080 self.user_profile_dir = custom_prof
1081 self.profiles.append(custom_prof)
1084 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1085 self.packages = stack_lists(self.packages_list, incremental=1)
1086 del self.packages_list
1087 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1090 self.prevmaskdict={}
1091 for x in self.packages:
1092 mycatpkg=dep_getkey(x)
1093 if not self.prevmaskdict.has_key(mycatpkg):
1094 self.prevmaskdict[mycatpkg]=[x]
1096 self.prevmaskdict[mycatpkg].append(x)
1098 # get profile-masked use flags -- INCREMENTAL Child over parent
1099 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1100 for x in self.profiles]
1101 self.usemask = set(stack_lists(
1102 self.usemask_list, incremental=True))
1103 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1104 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1107 self.pusemask_list = []
1108 rawpusemask = [grabdict_package(
1109 os.path.join(x, "package.use.mask")) \
1110 for x in self.profiles]
1111 for i in xrange(len(self.profiles)):
1113 for k, v in rawpusemask[i].iteritems():
1114 cpdict.setdefault(dep_getkey(k), {})[k] = v
1115 self.pusemask_list.append(cpdict)
1118 self.pkgprofileuse = []
1119 rawprofileuse = [grabdict_package(
1120 os.path.join(x, "package.use"), juststrings=True) \
1121 for x in self.profiles]
1122 for i in xrange(len(self.profiles)):
1124 for k, v in rawprofileuse[i].iteritems():
1125 cpdict.setdefault(dep_getkey(k), {})[k] = v
1126 self.pkgprofileuse.append(cpdict)
1129 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1130 for x in self.profiles]
1131 self.useforce = set(stack_lists(
1132 self.useforce_list, incremental=True))
1134 self.puseforce_list = []
1135 rawpuseforce = [grabdict_package(
1136 os.path.join(x, "package.use.force")) \
1137 for x in self.profiles]
1138 for i in xrange(len(self.profiles)):
1140 for k, v in rawpuseforce[i].iteritems():
1141 cpdict.setdefault(dep_getkey(k), {})[k] = v
1142 self.puseforce_list.append(cpdict)
1146 self.mygcfg = getconfig(os.path.join(config_root, "etc", "make.globals"))
1148 if self.mygcfg is None:
1150 except SystemExit, e:
1152 except Exception, e:
1155 writemsg("!!! %s\n" % (e), noiselevel=-1)
1156 if not isinstance(e, EnvironmentError):
1157 writemsg("!!! Incorrect multiline literals can cause " + \
1158 "this. Do not use them.\n", noiselevel=-1)
1160 self.configlist.append(self.mygcfg)
1161 self.configdict["globals"]=self.configlist[-1]
1163 self.make_defaults_use = []
1167 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1168 for cfg in mygcfg_dlists:
1170 self.make_defaults_use.append(cfg.get("USE", ""))
1172 self.make_defaults_use.append("")
1173 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1174 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1175 if self.mygcfg is None:
1177 except SystemExit, e:
1179 except Exception, e:
1182 writemsg("!!! %s\n" % (e), noiselevel=-1)
1183 if not isinstance(e, EnvironmentError):
1184 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1185 "emerge sync' may fix this. If it does\n",
1187 writemsg("!!! not then please report this to " + \
1188 "bugs.gentoo.org and, if possible, a dev\n",
1190 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1193 self.configlist.append(self.mygcfg)
1194 self.configdict["defaults"]=self.configlist[-1]
1197 self.mygcfg = getconfig(
1198 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1199 allow_sourcing=True)
1200 if self.mygcfg is None:
1202 except SystemExit, e:
1204 except Exception, e:
1207 writemsg("!!! %s\n" % (e), noiselevel=-1)
1208 if not isinstance(e, EnvironmentError):
1209 writemsg("!!! Incorrect multiline literals can cause " + \
1210 "this. Do not use them.\n", noiselevel=-1)
1213 # Allow ROOT setting to come from make.conf if it's not overridden
1214 # by the constructor argument (from the calling environment). As a
1215 # special exception for a very common use case, config_root == "/"
1216 # implies that ROOT in make.conf should be ignored. That way, the
1217 # user can chroot into $ROOT and the ROOT setting in make.conf will
1218 # be automatically ignored (unless config_root is other than "/").
1219 if config_root != "/" and \
1220 target_root is None and "ROOT" in self.mygcfg:
1221 target_root = self.mygcfg["ROOT"]
1223 self.configlist.append(self.mygcfg)
1224 self.configdict["conf"]=self.configlist[-1]
1226 self.configlist.append({})
1227 self.configdict["pkg"]=self.configlist[-1]
1230 self.configlist.append({})
1231 self.configdict["auto"]=self.configlist[-1]
1233 self.configlist.append(self.backupenv) # XXX Why though?
1234 self.configdict["backupenv"]=self.configlist[-1]
1236 self.configlist.append(os.environ.copy())
1237 self.configdict["env"]=self.configlist[-1]
1238 if not local_config:
1239 # Clean up pollution from portage_data so that it doesn't
1240 # interfere with repoman.
1241 self.configdict["env"].pop("USERLAND", None)
1243 # make lookuplist for loading package.*
1244 self.lookuplist=self.configlist[:]
1245 self.lookuplist.reverse()
1247 # Blacklist vars that could interfere with portage internals.
1248 for blacklisted in "CATEGORY", "PKGUSE", "PORTAGE_CONFIGROOT", \
1250 for cfg in self.lookuplist:
1251 cfg.pop(blacklisted, None)
1252 del blacklisted, cfg
1254 if target_root is None:
1257 target_root = normalize_path(os.path.abspath(
1258 target_root)).rstrip(os.path.sep) + os.path.sep
1260 check_var_directory("ROOT", target_root)
1263 os.path.join(target_root, "etc", "profile.env"), expand=False)
1264 # env_d will be None if profile.env doesn't exist.
1266 self.configdict["env.d"].update(env_d)
1267 # Remove duplicate values so they don't override updated
1268 # profile.env values later (profile.env is reloaded in each
1269 # call to self.regenerate).
1270 for cfg in (self.configdict["backupenv"],
1271 self.configdict["env"]):
1272 for k, v in env_d.iteritems():
1280 self["PORTAGE_CONFIGROOT"] = config_root
1281 self.backup_changes("PORTAGE_CONFIGROOT")
1282 self["ROOT"] = target_root
1283 self.backup_changes("ROOT")
1286 self.pkeywordsdict = {}
1287 self.punmaskdict = {}
1288 abs_user_config = os.path.join(config_root,
1289 USER_CONFIG_PATH.lstrip(os.path.sep))
1291 # locations for "categories" and "arch.list" files
1292 locations = [os.path.join(self["PORTDIR"], "profiles")]
1293 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1294 pmask_locations.extend(self.profiles)
1296 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1297 special cases are needed here."""
1298 overlay_profiles = []
1299 for ov in self["PORTDIR_OVERLAY"].split():
1300 ov = normalize_path(ov)
1301 profiles_dir = os.path.join(ov, "profiles")
1302 if os.path.isdir(profiles_dir):
1303 overlay_profiles.append(profiles_dir)
1304 locations += overlay_profiles
1306 pmask_locations.extend(overlay_profiles)
1309 locations.append(abs_user_config)
1310 pmask_locations.append(abs_user_config)
1311 pusedict = grabdict_package(
1312 os.path.join(abs_user_config, "package.use"), recursive=1)
1313 for key in pusedict.keys():
1314 cp = dep_getkey(key)
1315 if not self.pusedict.has_key(cp):
1316 self.pusedict[cp] = {}
1317 self.pusedict[cp][key] = pusedict[key]
1320 pkgdict = grabdict_package(
1321 os.path.join(abs_user_config, "package.keywords"),
1323 for key in pkgdict.keys():
1324 # default to ~arch if no specific keyword is given
1325 if not pkgdict[key]:
1327 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1328 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1331 for keyword in groups:
1332 if not keyword[0] in "~-":
1333 mykeywordlist.append("~"+keyword)
1334 pkgdict[key] = mykeywordlist
1335 cp = dep_getkey(key)
1336 if not self.pkeywordsdict.has_key(cp):
1337 self.pkeywordsdict[cp] = {}
1338 self.pkeywordsdict[cp][key] = pkgdict[key]
1341 pkgunmasklines = grabfile_package(
1342 os.path.join(abs_user_config, "package.unmask"),
1344 for x in pkgunmasklines:
1345 mycatpkg=dep_getkey(x)
1346 if self.punmaskdict.has_key(mycatpkg):
1347 self.punmaskdict[mycatpkg].append(x)
1349 self.punmaskdict[mycatpkg]=[x]
1351 #getting categories from an external file now
1352 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1353 self.categories = stack_lists(categories, incremental=1)
1356 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1357 archlist = stack_lists(archlist, incremental=1)
1358 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1362 for x in pmask_locations:
1363 pkgmasklines.append(grabfile_package(
1364 os.path.join(x, "package.mask"), recursive=1))
1365 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1368 for x in pkgmasklines:
1369 mycatpkg=dep_getkey(x)
1370 if self.pmaskdict.has_key(mycatpkg):
1371 self.pmaskdict[mycatpkg].append(x)
1373 self.pmaskdict[mycatpkg]=[x]
1375 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1376 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1377 has_invalid_data = False
1378 for x in range(len(pkgprovidedlines)-1, -1, -1):
1379 myline = pkgprovidedlines[x]
1380 if not isvalidatom("=" + myline):
1381 writemsg("Invalid package name in package.provided:" + \
1382 " %s\n" % myline, noiselevel=-1)
1383 has_invalid_data = True
1384 del pkgprovidedlines[x]
1386 cpvr = catpkgsplit(pkgprovidedlines[x])
1387 if not cpvr or cpvr[0] == "null":
1388 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1390 has_invalid_data = True
1391 del pkgprovidedlines[x]
1393 if cpvr[0] == "virtual":
1394 writemsg("Virtual package in package.provided: %s\n" % \
1395 myline, noiselevel=-1)
1396 has_invalid_data = True
1397 del pkgprovidedlines[x]
1399 if has_invalid_data:
1400 writemsg("See portage(5) for correct package.provided usage.\n",
1402 self.pprovideddict = {}
1403 for x in pkgprovidedlines:
1407 mycatpkg=dep_getkey(x)
1408 if self.pprovideddict.has_key(mycatpkg):
1409 self.pprovideddict[mycatpkg].append(x)
1411 self.pprovideddict[mycatpkg]=[x]
1413 # reasonable defaults; this is important as without USE_ORDER,
1414 # USE will always be "" (nothing set)!
1415 if "USE_ORDER" not in self:
1416 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1418 self["PORTAGE_GID"] = str(portage_gid)
1419 self.backup_changes("PORTAGE_GID")
1421 if self.get("PORTAGE_DEPCACHEDIR", None):
1422 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1423 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1424 self.backup_changes("PORTAGE_DEPCACHEDIR")
1426 overlays = self.get("PORTDIR_OVERLAY","").split()
1430 ov = normalize_path(ov)
1431 if os.path.isdir(ov):
1434 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1435 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1436 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1437 self.backup_changes("PORTDIR_OVERLAY")
1439 if "CBUILD" not in self and "CHOST" in self:
1440 self["CBUILD"] = self["CHOST"]
1441 self.backup_changes("CBUILD")
1443 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1444 self.backup_changes("PORTAGE_BIN_PATH")
1445 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1446 self.backup_changes("PORTAGE_PYM_PATH")
1448 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1450 self[var] = str(int(self.get(var, "0")))
1452 writemsg(("!!! %s='%s' is not a valid integer. " + \
1453 "Falling back to '0'.\n") % (var, self[var]),
1456 self.backup_changes(var)
1459 self.features = portage_util.unique_array(self["FEATURES"].split())
1461 if "gpg" in self.features:
1462 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1463 not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1464 writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1465 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1466 self.features.remove("gpg")
1468 if not portage_exec.sandbox_capable and \
1469 ("sandbox" in self.features or "usersandbox" in self.features):
1470 if self.profile_path is not None and \
1471 os.path.realpath(self.profile_path) == \
1472 os.path.realpath(PROFILE_PATH):
1473 """ Don't show this warning when running repoman and the
1474 sandbox feature came from a profile that doesn't belong to
1476 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1477 " binary. Disabling...\n\n"), noiselevel=-1)
1478 if "sandbox" in self.features:
1479 self.features.remove("sandbox")
1480 if "usersandbox" in self.features:
1481 self.features.remove("usersandbox")
1483 self.features.sort()
1484 self["FEATURES"] = " ".join(self.features)
1485 self.backup_changes("FEATURES")
1492 def _init_dirs(self):
1494 Create a few directories that are critical to portage operation
1496 if not os.access(self["ROOT"], os.W_OK):
1500 "tmp" :(-1, 01777, 0),
1501 "var/tmp" :(-1, 01777, 0),
1502 "var/lib/portage" :(portage_gid, 02750, 02),
1503 "var/cache/edb" :(portage_gid, 0755, 02)
1506 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1508 mydir = os.path.join(self["ROOT"], mypath)
1509 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1510 except portage_exception.PortageException, e:
1511 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1513 writemsg("!!! %s\n" % str(e),
1517 """Validate miscellaneous settings and display warnings if necessary.
1518 (This code was previously in the global scope of portage.py)"""
1520 groups = self["ACCEPT_KEYWORDS"].split()
1521 archlist = self.archlist()
1523 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1525 for group in groups:
1526 if group not in archlist and group[0] != '-':
1527 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1530 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1531 PROFILE_PATH.lstrip(os.path.sep))
1532 if not os.path.islink(abs_profile_path) and \
1533 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1534 os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1535 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1537 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1538 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1540 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1541 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1542 if os.path.exists(abs_user_virtuals):
1543 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1544 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1545 writemsg("!!! this new location.\n\n")
1547 def loadVirtuals(self,root):
1548 """Not currently used by portage."""
1549 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1550 self.getvirtuals(root)
1552 def load_best_module(self,property_string):
1553 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1555 mod = load_mod(best_mod)
1557 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1567 def modifying(self):
1569 raise Exception, "Configuration is locked."
1571 def backup_changes(self,key=None):
1573 if key and self.configdict["env"].has_key(key):
1574 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1576 raise KeyError, "No such key defined in environment: %s" % key
1578 def reset(self,keeping_pkg=0,use_cache=1):
1580 Restore environment from self.backupenv, call self.regenerate()
1581 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1582 @type keeping_pkg: Boolean
1583 @param use_cache: Should self.regenerate use the cache or not
1584 @type use_cache: Boolean
1588 self.configdict["env"].clear()
1589 self.configdict["env"].update(self.backupenv)
1591 self.modifiedkeys = []
1595 self.configdict["pkg"].clear()
1596 self.configdict["pkginternal"].clear()
1597 self.configdict["defaults"]["USE"] = \
1598 " ".join(self.make_defaults_use)
1599 self.usemask = set(stack_lists(
1600 self.usemask_list, incremental=True))
1601 self.useforce = set(stack_lists(
1602 self.useforce_list, incremental=True))
1603 self.regenerate(use_cache=use_cache)
1605 def load_infodir(self,infodir):
1607 if self.configdict.has_key("pkg"):
1608 for x in self.configdict["pkg"].keys():
1609 del self.configdict["pkg"][x]
1611 writemsg("No pkg setup for settings instance?\n",
1615 if os.path.exists(infodir):
1616 if os.path.exists(infodir+"/environment"):
1617 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1619 myre = re.compile('^[A-Z]+$')
1621 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1622 if filename == "FEATURES":
1623 # FEATURES from the build host shouldn't be interpreted as
1624 # FEATURES on the client system.
1626 if myre.match(filename):
1628 file_path = os.path.join(infodir, filename)
1629 mydata = open(file_path).read().strip()
1630 if len(mydata) < 2048 or filename == "USE":
1631 if null_byte in mydata:
1632 writemsg("!!! Null byte found in metadata " + \
1633 "file: '%s'\n" % file_path, noiselevel=-1)
1635 if filename == "USE":
1636 binpkg_flags = "-* " + mydata
1637 self.configdict["pkg"][filename] = binpkg_flags
1638 self.configdict["env"][filename] = mydata
1640 self.configdict["pkg"][filename] = mydata
1641 self.configdict["env"][filename] = mydata
1642 # CATEGORY is important because it's used in doebuild
1643 # to infer the cpv. If it's corrupted, it leads to
1644 # strange errors later on, so we'll validate it and
1645 # print a warning if necessary.
1646 if filename == "CATEGORY":
1647 matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1648 if not matchobj or matchobj.start() != 0 or \
1649 matchobj.end() != len(mydata):
1650 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1651 os.path.join(infodir, filename), noiselevel=-1)
1652 except (OSError, IOError):
1653 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1659 def setcpv(self, mycpv, use_cache=1, mydb=None):
1661 Load a particular CPV into the config, this lets us see the
1662 Default USE flags for a particular ebuild as well as the USE
1663 flags from package.use.
1665 @param mycpv: A cpv to load
1667 @param use_cache: Enables caching
1668 @type use_cache: Boolean
1669 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1670 @type mydb: dbapi or derivative.
1675 if self.mycpv == mycpv:
1679 cp = dep_getkey(mycpv)
1680 cpv_slot = self.mycpv
1683 slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"])
1684 cpv_slot = "%s:%s" % (self.mycpv, slot)
1686 for x in iuse.split():
1687 if x.startswith("+"):
1688 pkginternaluse.append(x[1:])
1689 elif x.startswith("-"):
1690 pkginternaluse.append(x)
1691 pkginternaluse = " ".join(pkginternaluse)
1692 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1693 self.configdict["pkginternal"]["USE"] = pkginternaluse
1696 for i in xrange(len(self.profiles)):
1697 defaults.append(self.make_defaults_use[i])
1698 cpdict = self.pkgprofileuse[i].get(cp, None)
1700 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1702 defaults.append(cpdict[best_match])
1703 defaults = " ".join(defaults)
1704 if defaults != self.configdict["defaults"].get("USE",""):
1705 self.configdict["defaults"]["USE"] = defaults
1708 for i in xrange(len(self.profiles)):
1709 useforce.append(self.useforce_list[i])
1710 cpdict = self.puseforce_list[i].get(cp, None)
1712 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1714 useforce.append(cpdict[best_match])
1715 useforce = set(stack_lists(useforce, incremental=True))
1716 if useforce != self.useforce:
1717 self.useforce = useforce
1720 for i in xrange(len(self.profiles)):
1721 usemask.append(self.usemask_list[i])
1722 cpdict = self.pusemask_list[i].get(cp, None)
1724 best_match = best_match_to_list(cpv_slot, cpdict.keys())
1726 usemask.append(cpdict[best_match])
1727 usemask = set(stack_lists(usemask, incremental=True))
1728 if usemask != self.usemask:
1729 self.usemask = usemask
1733 cpdict = self.pusedict.get(cp)
1735 self.pusekey = best_match_to_list(cpv_slot, cpdict.keys())
1737 self.puse = " ".join(cpdict[self.pusekey])
1738 if oldpuse != self.puse:
1740 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1741 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1742 # CATEGORY is essential for doebuild calls
1743 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1745 self.reset(keeping_pkg=1,use_cache=use_cache)
1747 def setinst(self,mycpv,mydbapi):
1749 if len(self.virtuals) == 0:
1751 # Grab the virtuals this package provides and add them into the tree virtuals.
1752 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1753 if isinstance(mydbapi, portdbapi):
1756 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1757 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1759 cp = dep_getkey(mycpv)
1761 virt = dep_getkey(virt)
1762 if not self.treeVirtuals.has_key(virt):
1763 self.treeVirtuals[virt] = []
1764 # XXX: Is this bad? -- It's a permanent modification
1765 if cp not in self.treeVirtuals[virt]:
1766 self.treeVirtuals[virt].append(cp)
1768 self.virtuals = self.__getvirtuals_compile()
1771 def regenerate(self,useonly=0,use_cache=1):
1774 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1775 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1776 variables. This also updates the env.d configdict; useful in case an ebuild
1777 changes the environment.
1779 If FEATURES has already stacked, it is not stacked twice.
1781 @param useonly: Only regenerate USE flags (not any other incrementals)
1782 @type useonly: Boolean
1783 @param use_cache: Enable Caching (only for autouse)
1784 @type use_cache: Boolean
1789 if self.already_in_regenerate:
1790 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1791 writemsg("!!! Looping in regenerate.\n",1)
1794 self.already_in_regenerate = 1
1796 # We grab the latest profile.env here since it changes frequently.
1797 self.configdict["env.d"].clear()
1799 os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1801 # env_d will be None if profile.env doesn't exist.
1802 self.configdict["env.d"].update(env_d)
1805 myincrementals=["USE"]
1807 myincrementals = self.incrementals
1808 myincrementals = set(myincrementals)
1809 # If self.features exists, it has already been stacked and may have
1810 # been mutated, so don't stack it again or else any mutations will be
1812 if "FEATURES" in myincrementals and hasattr(self, "features"):
1813 myincrementals.remove("FEATURES")
1815 if "USE" in myincrementals:
1816 # Process USE last because it depends on USE_EXPAND which is also
1818 myincrementals.remove("USE")
1820 for mykey in myincrementals:
1822 mydbs=self.configlist[:-1]
1826 if mykey not in curdb:
1828 #variables are already expanded
1829 mysplit = curdb[mykey].split()
1833 # "-*" is a special "minus" var that means "unset all settings".
1834 # so USE="-* gnome" will have *just* gnome enabled.
1839 # Not legal. People assume too much. Complain.
1840 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1847 if (x[1:] in myflags):
1849 del myflags[myflags.index(x[1:])]
1852 # We got here, so add it now.
1853 if x not in myflags:
1857 #store setting in last element of configlist, the original environment:
1858 if myflags or mykey in self:
1859 self.configlist[-1][mykey] = " ".join(myflags)
1862 # Do the USE calculation last because it depends on USE_EXPAND.
1863 if "auto" in self["USE_ORDER"].split(":"):
1864 self.configdict["auto"]["USE"] = autouse(
1865 vartree(root=self["ROOT"], categories=self.categories,
1867 use_cache=use_cache, mysettings=self)
1869 self.configdict["auto"]["USE"] = ""
1871 use_expand = self.get("USE_EXPAND", "").split()
1874 for x in self["USE_ORDER"].split(":"):
1875 if x in self.configdict:
1876 self.uvlist.append(self.configdict[x])
1877 self.uvlist.reverse()
1880 for curdb in self.uvlist:
1881 cur_use_expand = [x for x in use_expand if x in curdb]
1882 mysplit = curdb.get("USE", "").split()
1883 if not mysplit and not cur_use_expand:
1891 writemsg(colorize("BAD", "USE flags should not start " + \
1892 "with a '+': %s\n" % x), noiselevel=-1)
1898 myflags.discard(x[1:])
1903 for var in cur_use_expand:
1904 var_lower = var.lower()
1905 is_not_incremental = var not in myincrementals
1906 if is_not_incremental:
1907 prefix = var_lower + "_"
1908 for x in list(myflags):
1909 if x.startswith(prefix):
1911 for x in curdb[var].split():
1913 if is_not_incremental:
1914 writemsg(colorize("BAD", "Invalid '+' " + \
1915 "operator in non-incremental variable " + \
1916 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1919 writemsg(colorize("BAD", "Invalid '+' " + \
1920 "operator in incremental variable " + \
1921 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1924 if is_not_incremental:
1925 writemsg(colorize("BAD", "Invalid '-' " + \
1926 "operator in non-incremental variable " + \
1927 "'%s': '%s'\n" % (var, x)), noiselevel=-1)
1929 myflags.discard(var_lower + "_" + x[1:])
1931 myflags.add(var_lower + "_" + x)
1933 myflags.update(self.useforce)
1935 # FEATURES=test should imply USE=test
1936 if "test" in self.configlist[-1].get("FEATURES","").split():
1938 if self.get("EBUILD_FORCE_TEST") == "1":
1939 self.usemask.discard("test")
1941 usesplit = [ x for x in myflags if \
1942 x not in self.usemask]
1946 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1947 # that they are consistent.
1948 for var in use_expand:
1949 prefix = var.lower() + "_"
1950 prefix_len = len(prefix)
1951 expand_flags = set([ x[prefix_len:] for x in usesplit \
1952 if x.startswith(prefix) ])
1953 var_split = self.get(var, "").split()
1954 # Preserve the order of var_split because it can matter for things
1956 var_split = [ x for x in var_split if x in expand_flags ]
1957 var_split.extend(expand_flags.difference(var_split))
1958 if var_split or var in self:
1959 # Don't export empty USE_EXPAND vars unless the user config
1960 # exports them as empty. This is required for vars such as
1961 # LINGUAS, where unset and empty have different meanings.
1962 self[var] = " ".join(var_split)
1964 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1965 if self.configdict["defaults"].has_key("ARCH"):
1966 if self.configdict["defaults"]["ARCH"]:
1967 if self.configdict["defaults"]["ARCH"] not in usesplit:
1968 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1970 self.configlist[-1]["USE"]= " ".join(usesplit)
1972 self.already_in_regenerate = 0
1974 def get_virts_p(self, myroot):
1977 virts = self.getvirtuals(myroot)
1979 myvkeys = virts.keys()
1981 vkeysplit = x.split("/")
1982 if not self.virts_p.has_key(vkeysplit[1]):
1983 self.virts_p[vkeysplit[1]] = virts[x]
1986 def getvirtuals(self, myroot=None):
1987 """myroot is now ignored because, due to caching, it has always been
1988 broken for all but the first call."""
1989 myroot = self["ROOT"]
1991 return self.virtuals
1994 for x in self.profiles:
1995 virtuals_file = os.path.join(x, "virtuals")
1996 virtuals_dict = grabdict(virtuals_file)
1997 for k in virtuals_dict.keys():
1998 if not isvalidatom(k) or dep_getkey(k) != k:
1999 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
2000 (virtuals_file, k), noiselevel=-1)
2001 del virtuals_dict[k]
2003 myvalues = virtuals_dict[k]
2006 if x.startswith("-"):
2007 # allow incrementals
2009 if not isvalidatom(myatom):
2010 writemsg("--- Invalid atom in %s: %s\n" % \
2011 (virtuals_file, x), noiselevel=-1)
2014 del virtuals_dict[k]
2016 virtuals_list.append(virtuals_dict)
2018 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2021 for virt in self.dirVirtuals:
2022 # Preference for virtuals decreases from left to right.
2023 self.dirVirtuals[virt].reverse()
2025 # Repoman does not use user or tree virtuals.
2026 if self.local_config and not self.treeVirtuals:
2027 temp_vartree = vartree(myroot, None,
2028 categories=self.categories, settings=self)
2029 # Reduce the provides into a list by CP.
2030 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2032 self.virtuals = self.__getvirtuals_compile()
2033 return self.virtuals
2035 def __getvirtuals_compile(self):
2036 """Stack installed and profile virtuals. Preference for virtuals
2037 decreases from left to right.
2038 Order of preference:
2039 1. installed and in profile
2044 # Virtuals by profile+tree preferences.
2047 for virt, installed_list in self.treeVirtuals.iteritems():
2048 profile_list = self.dirVirtuals.get(virt, None)
2049 if not profile_list:
2051 for cp in installed_list:
2052 if cp in profile_list:
2053 ptVirtuals.setdefault(virt, [])
2054 ptVirtuals[virt].append(cp)
2056 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2060 def __delitem__(self,mykey):
2062 for x in self.lookuplist:
2067 def __getitem__(self,mykey):
2069 for x in self.lookuplist:
2071 writemsg("!!! lookuplist is null.\n")
2072 elif x.has_key(mykey):
2077 def has_key(self,mykey):
2078 for x in self.lookuplist:
2079 if x.has_key(mykey):
2083 def __contains__(self, mykey):
2084 """Called to implement membership test operators (in and not in)."""
2085 return bool(self.has_key(mykey))
2087 def setdefault(self, k, x=None):
2094 def get(self, k, x=None):
2101 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2103 def __setitem__(self,mykey,myvalue):
2104 "set a value; will be thrown away at reset() time"
2105 if type(myvalue) != types.StringType:
2106 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2108 self.modifiedkeys += [mykey]
2109 self.configdict["env"][mykey]=myvalue
2112 "return our locally-maintained environment"
2114 for x in self.keys():
2116 if not isinstance(myvalue, basestring):
2117 writemsg("!!! Non-string value in config: %s=%s\n" % \
2118 (x, myvalue), noiselevel=-1)
2121 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2122 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2123 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2127 def thirdpartymirrors(self):
2128 if getattr(self, "_thirdpartymirrors", None) is None:
2129 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2130 for x in self["PORTDIR_OVERLAY"].split():
2131 profileroots.insert(0, os.path.join(x, "profiles"))
2132 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2133 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2134 return self._thirdpartymirrors
2137 return flatten([[myarch, "~" + myarch] \
2138 for myarch in self["PORTAGE_ARCHLIST"].split()])
2140 def selinux_enabled(self):
2141 if getattr(self, "_selinux_enabled", None) is None:
2142 self._selinux_enabled = 0
2143 if "selinux" in self["USE"].split():
2144 if "selinux" in globals():
2145 if selinux.is_selinux_enabled() == 1:
2146 self._selinux_enabled = 1
2148 self._selinux_enabled = 0
2150 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2152 self._selinux_enabled = 0
2153 if self._selinux_enabled == 0:
2155 del sys.modules["selinux"]
2158 return self._selinux_enabled
2160 # XXX This would be to replace getstatusoutput completely.
2161 # XXX Issue: cannot block execution. Deadlock condition.
2162 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2164 Spawn a subprocess with extra portage-specific options.
2167 Sandbox: Sandbox means the spawned process will be limited in its ability t
2168 read and write files (normally this means it is restricted to ${IMAGE}/)
2169 SElinux Sandbox: Enables sandboxing on SElinux
2170 Reduced Privileges: Drops privilages such that the process runs as portage:portage
2173 Notes: os.system cannot be used because it messes with signal handling. Instead we
2174 use the portage_exec spawn* family of functions.
2176 This function waits for the process to terminate.
2178 @param mystring: Command to run
2179 @type mystring: String
2180 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2181 @type mysettings: Dictionary or config instance
2182 @param debug: Ignored
2183 @type debug: Boolean
2184 @param free: Enable sandboxing for this process
2186 @param droppriv: Drop to portage:portage when running this command
2187 @type droppriv: Boolean
2188 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2189 @type sesandbox: Boolean
2190 @param keywords: Extra options encoded as a dict, to be passed to spawn
2191 @type keywords: Dictionary
2194 1. The return code of the spawned process.
2197 if type(mysettings) == types.DictType:
2199 keywords["opt_name"]="[ %s ]" % "portage"
2201 check_config_instance(mysettings)
2202 env=mysettings.environ()
2203 keywords["opt_name"]="[%s]" % mysettings["PF"]
2205 # The default policy for the sesandbox domain only allows entry (via exec)
2206 # from shells and from binaries that belong to portage (the number of entry
2207 # points is minimized). The "tee" binary is not among the allowed entry
2208 # points, so it is spawned outside of the sesandbox domain and reads from a
2209 # pipe between two domains.
2210 logfile = keywords.get("logfile")
2214 del keywords["logfile"]
2215 fd_pipes = keywords.get("fd_pipes")
2216 if fd_pipes is None:
2217 fd_pipes = {0:0, 1:1, 2:2}
2218 elif 1 not in fd_pipes or 2 not in fd_pipes:
2219 raise ValueError(fd_pipes)
2221 mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
2222 returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
2226 keywords["fd_pipes"] = fd_pipes
2228 features = mysettings.features
2229 # XXX: Negative RESTRICT word
2230 droppriv=(droppriv and ("userpriv" in features) and not \
2231 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2232 ("userpriv" in mysettings["RESTRICT"].split())))
2234 if droppriv and not uid and portage_gid and portage_uid:
2235 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2238 free=((droppriv and "usersandbox" not in features) or \
2239 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2242 keywords["opt_name"] += " bash"
2243 spawn_func = portage_exec.spawn_bash
2245 keywords["opt_name"] += " sandbox"
2246 spawn_func = portage_exec.spawn_sandbox
2249 con = selinux.getcontext()
2250 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2251 selinux.setexec(con)
2253 returnpid = keywords.get("returnpid")
2254 keywords["returnpid"] = True
2256 mypids.extend(spawn_func(mystring, env=env, **keywords))
2261 selinux.setexec(None)
2268 retval = os.waitpid(pid, 0)[1]
2269 portage_exec.spawned_pids.remove(pid)
2270 if retval != os.EX_OK:
2272 if os.waitpid(pid, os.WNOHANG) == (0,0):
2274 os.kill(pid, signal.SIGTERM)
2276 portage_exec.spawned_pids.remove(pid)
2278 return (retval & 0xff) << 8
2282 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2283 "fetch files. Will use digest file if available."
2285 features = mysettings.features
2286 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2287 if ("mirror" in mysettings["RESTRICT"].split()) or \
2288 ("nomirror" in mysettings["RESTRICT"].split()):
2289 if ("mirror" in features) and ("lmirror" not in features):
2290 # lmirror should allow you to bypass mirror restrictions.
2291 # XXX: This is not a good thing, and is temporary at best.
2292 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2295 thirdpartymirrors = mysettings.thirdpartymirrors()
2297 check_config_instance(mysettings)
2299 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2300 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2304 if listonly or ("distlocks" not in features):
2308 if "skiprocheck" in features:
2311 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2313 writemsg(red("!!! For fetching to a read-only filesystem, " + \
2314 "locking should be turned off.\n"), noiselevel=-1)
2315 writemsg("!!! This can be done by adding -distlocks to " + \
2316 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2319 # local mirrors are always added
2320 if custommirrors.has_key("local"):
2321 mymirrors += custommirrors["local"]
2323 if ("nomirror" in mysettings["RESTRICT"].split()) or \
2324 ("mirror" in mysettings["RESTRICT"].split()):
2325 # We don't add any mirrors.
2329 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2331 pkgdir = mysettings.get("O")
2333 mydigests = Manifest(
2334 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
2336 # no digests because fetch was not called for a specific package
2340 for x in range(len(mymirrors)-1,-1,-1):
2341 if mymirrors[x] and mymirrors[x][0]=='/':
2342 fsmirrors += [mymirrors[x]]
2345 restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2346 custom_local_mirrors = custommirrors.get("local", [])
2348 # With fetch restriction, a normal uri may only be fetched from
2349 # custom local mirrors (if available). A mirror:// uri may also
2350 # be fetched from specific mirrors (effectively overriding fetch
2351 # restriction, but only for specific mirrors).
2352 locations = custom_local_mirrors
2354 locations = mymirrors
2357 primaryuri_indexes={}
2358 for myuri in myuris:
2359 myfile=os.path.basename(myuri)
2360 if not filedict.has_key(myfile):
2362 for y in range(0,len(locations)):
2363 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2364 if myuri[:9]=="mirror://":
2365 eidx = myuri.find("/", 9)
2367 mirrorname = myuri[9:eidx]
2369 # Try user-defined mirrors first
2370 if custommirrors.has_key(mirrorname):
2371 for cmirr in custommirrors[mirrorname]:
2372 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2373 # remove the mirrors we tried from the list of official mirrors
2374 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2375 thirdpartymirrors[mirrorname].remove(cmirr)
2376 # now try the official mirrors
2377 if thirdpartymirrors.has_key(mirrorname):
2378 shuffle(thirdpartymirrors[mirrorname])
2380 for locmirr in thirdpartymirrors[mirrorname]:
2381 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2383 if not filedict[myfile]:
2384 writemsg("No known mirror by the name: %s\n" % (mirrorname))
2386 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2387 writemsg(" %s\n" % (myuri), noiselevel=-1)
2390 # Only fetch from specific mirrors is allowed.
2392 if "primaryuri" in mysettings["RESTRICT"].split():
2393 # Use the source site first.
2394 if primaryuri_indexes.has_key(myfile):
2395 primaryuri_indexes[myfile] += 1
2397 primaryuri_indexes[myfile] = 0
2398 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2400 filedict[myfile].append(myuri)
2407 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2408 if not mysettings.get(var_name, None):
2416 if "distlocks" in features:
2417 distdir_dirs.append(".locks")
2420 for x in distdir_dirs:
2421 mydir = os.path.join(mysettings["DISTDIR"], x)
2422 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2423 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2426 raise # bail out on the first error that occurs during recursion
2427 if not apply_recursive_permissions(mydir,
2428 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2429 filemode=filemode, filemask=modemask, onerror=onerror):
2430 raise portage_exception.OperationNotPermitted(
2431 "Failed to apply recursive permissions for the portage group.")
2432 except portage_exception.PortageException, e:
2433 if not os.path.isdir(mysettings["DISTDIR"]):
2434 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2435 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2436 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2439 not fetch_to_ro and \
2440 not os.access(mysettings["DISTDIR"], os.W_OK):
2441 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2445 if can_fetch and use_locks and locks_in_subdir:
2446 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2447 if not os.access(distlocks_subdir, os.W_OK):
2448 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
2451 del distlocks_subdir
2452 for myfile in filedict.keys():
2456 1 partially downloaded
2457 2 completely downloaded
2459 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2463 writemsg_stdout("\n", noiselevel=-1)
2465 if use_locks and can_fetch:
2467 if "parallel-fetch" in features:
2468 waiting_msg = ("Downloading '%s'... " + \
2469 "see /var/log/emerge-fetch.log for details.") % myfile
2471 file_lock = portage_locks.lockfile(
2472 os.path.join(mysettings["DISTDIR"],
2473 locks_in_subdir, myfile), wantnewlockfile=1,
2474 waiting_msg=waiting_msg)
2476 file_lock = portage_locks.lockfile(
2477 myfile_path, wantnewlockfile=1,
2478 waiting_msg=waiting_msg)
2481 if fsmirrors and not os.path.exists(myfile_path):
2482 for mydir in fsmirrors:
2483 mirror_file = os.path.join(mydir, myfile)
2485 shutil.copyfile(mirror_file, myfile_path)
2486 writemsg(_("Local mirror has file:" + \
2487 " %(file)s\n" % {"file":myfile}))
2489 except (IOError, OSError), e:
2490 if e.errno != errno.ENOENT:
2495 mystat = os.stat(myfile_path)
2497 if e.errno != errno.ENOENT:
2502 apply_secpass_permissions(
2503 myfile_path, gid=portage_gid, mode=0664, mask=02,
2505 except portage_exception.PortageException, e:
2506 if not os.access(myfile_path, os.R_OK):
2507 writemsg("!!! Failed to adjust permissions:" + \
2508 " %s\n" % str(e), noiselevel=-1)
2509 if myfile not in mydigests:
2510 # We don't have a digest, but the file exists. We must
2511 # assume that it is fully downloaded.
2514 if mystat.st_size < mydigests[myfile]["size"] and \
2516 fetched = 1 # Try to resume this download.
2518 verified_ok, reason = portage_checksum.verify_all(
2519 myfile_path, mydigests[myfile])
2521 writemsg("!!! Previously fetched" + \
2522 " file: '%s'\n" % myfile, noiselevel=-1)
2523 writemsg("!!! Reason: %s\n" % reason[0],
2525 writemsg(("!!! Got: %s\n" + \
2526 "!!! Expected: %s\n") % \
2527 (reason[1], reason[2]), noiselevel=-1)
2528 if reason[0] == "Insufficient data for checksum verification":
2530 if can_fetch and not restrict_fetch:
2531 writemsg("Refetching...\n\n",
2533 os.unlink(myfile_path)
2535 eout = output.EOutput()
2537 mysettings.get("PORTAGE_QUIET", None) == "1"
2538 for digest_name in mydigests[myfile]:
2540 "%s %s ;-)" % (myfile, digest_name))
2542 continue # fetch any remaining files
2544 for loc in filedict[myfile]:
2546 writemsg_stdout(loc+" ", noiselevel=-1)
2548 # allow different fetchcommands per protocol
2549 protocol = loc[0:loc.find("://")]
2550 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2551 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2553 fetchcommand=mysettings["FETCHCOMMAND"]
2554 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2555 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2557 resumecommand=mysettings["RESUMECOMMAND"]
2562 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2565 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2567 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2568 if not mysettings.get(var_name, None):
2569 writemsg(("!!! %s is unset. It should " + \
2570 "have been defined in /etc/make.globals.\n") \
2571 % var_name, noiselevel=-1)
2577 #we either need to resume or start the download
2578 #you can't use "continue" when you're inside a "try" block
2581 writemsg(">>> Resuming download...\n")
2582 locfetch=resumecommand
2585 locfetch=fetchcommand
2586 writemsg_stdout(">>> Downloading '%s'\n" % \
2587 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2589 "DISTDIR": mysettings["DISTDIR"],
2593 import shlex, StringIO
2594 lexer = shlex.shlex(StringIO.StringIO(locfetch), posix=True)
2595 lexer.whitespace_split = True
2596 myfetch = [varexpand(x, mydict=variables) for x in lexer]
2599 if "userfetch" in mysettings.features and \
2600 os.getuid() == 0 and portage_gid and portage_uid:
2601 spawn_keywords.update({
2602 "uid" : portage_uid,
2603 "gid" : portage_gid,
2604 "groups" : userpriv_groups,
2609 if mysettings.selinux_enabled():
2610 con = selinux.getcontext()
2611 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2612 selinux.setexec(con)
2613 # bash is an allowed entrypoint, while most binaries are not
2614 myfetch = ["bash", "-c", "exec \"$@\"", myfetch[0]] + myfetch
2616 myret = portage_exec.spawn(myfetch,
2617 env=mysettings.environ(), **spawn_keywords)
2619 if mysettings.selinux_enabled():
2620 selinux.setexec(None)
2624 apply_secpass_permissions(myfile_path,
2625 gid=portage_gid, mode=0664, mask=02)
2626 except portage_exception.FileNotFound, e:
2628 except portage_exception.PortageException, e:
2629 if not os.access(myfile_path, os.R_OK):
2630 writemsg("!!! Failed to adjust permissions:" + \
2631 " %s\n" % str(e), noiselevel=-1)
2633 if mydigests!=None and mydigests.has_key(myfile):
2635 mystat = os.stat(myfile_path)
2637 if e.errno != errno.ENOENT:
2642 # no exception? file exists. let digestcheck() report
2643 # an appropriately for size or checksum errors
2644 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2645 # Fetch failed... Try the next one... Kill 404 files though.
2646 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2647 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2648 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2650 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2651 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2654 except (IOError, OSError):
2662 # File is the correct size--check the checksums for the fetched
2663 # file NOW, for those users who don't have a stable/continuous
2664 # net connection. This way we have a chance to try to download
2665 # from another mirror...
2666 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2669 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2671 writemsg("!!! Reason: "+reason[0]+"\n",
2673 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
2674 (reason[1], reason[2]), noiselevel=-1)
2675 if reason[0] == "Insufficient data for checksum verification":
2677 writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2678 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2681 eout = output.EOutput()
2682 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2683 for x_key in mydigests[myfile].keys():
2684 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2692 elif mydigests!=None:
2693 writemsg("No digest file available and download failed.\n\n",
2696 if use_locks and file_lock:
2697 portage_locks.unlockfile(file_lock)
2700 writemsg_stdout("\n", noiselevel=-1)
2703 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2704 mysettings["PF"], "has fetch restriction turned on."
2705 print "!!! This probably means that this " + \
2706 "ebuild's files must be downloaded"
2707 print "!!! manually. See the comments in" + \
2708 " the ebuild for more information.\n"
2709 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2712 elif not filedict[myfile]:
2713 writemsg("Warning: No mirrors available for file" + \
2714 " '%s'\n" % (myfile), noiselevel=-1)
2716 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2721 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2723 Generates a digest file if missing. Assumes all files are available.
2724 DEPRECATED: this now only is a compability wrapper for
2725 portage_manifest.Manifest()
2726 NOTE: manifestonly and overwrite are useless with manifest2 and
2727 are therefore ignored."""
2728 if myportdb is None:
2729 writemsg("Warning: myportdb not specified to digestgen\n")
2732 global _doebuild_manifest_exempt_depend
2734 _doebuild_manifest_exempt_depend += 1
2736 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2737 for cpv in fetchlist_dict:
2739 for myfile in fetchlist_dict[cpv]:
2740 distfiles_map.setdefault(myfile, []).append(cpv)
2741 except portage_exception.InvalidDependString, e:
2742 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2743 writemsg("!!! Invalid SRC_URI for '%s'.\n" % cpv, noiselevel=-1)
2746 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
2747 manifest1_compat = not os.path.exists(
2748 os.path.join(mytree, "manifest1_obsolete"))
2749 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2750 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
2751 # Don't require all hashes since that can trigger excessive
2752 # fetches when sufficient digests already exist. To ease transition
2753 # while Manifest 1 is being removed, only require hashes that will
2754 # exist before and after the transition.
2755 required_hash_types = set()
2756 required_hash_types.add("size")
2757 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
2758 dist_hashes = mf.fhashdict.get("DIST", {})
2759 missing_hashes = set()
2760 for myfile in distfiles_map:
2761 myhashes = dist_hashes.get(myfile)
2763 missing_hashes.add(myfile)
2765 if required_hash_types.difference(myhashes):
2766 missing_hashes.add(myfile)
2769 for myfile in missing_hashes:
2771 os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2773 if e.errno != errno.ENOENT:
2776 missing_files.append(myfile)
2778 mytree = os.path.realpath(os.path.dirname(
2779 os.path.dirname(mysettings["O"])))
2780 fetch_settings = config(clone=mysettings)
2781 debug = mysettings.get("PORTAGE_DEBUG") == "1"
2782 for myfile in missing_files:
2784 for cpv in distfiles_map[myfile]:
2785 myebuild = os.path.join(mysettings["O"],
2786 catsplit(cpv)[1] + ".ebuild")
2787 # for RESTRICT=fetch, mirror, etc...
2788 doebuild_environment(myebuild, "fetch",
2789 mysettings["ROOT"], fetch_settings,
2791 alluris, aalist = myportdb.getfetchlist(
2792 cpv, mytree=mytree, all=True,
2793 mysettings=fetch_settings)
2794 myuris = [uri for uri in alluris \
2795 if os.path.basename(uri) == myfile]
2796 fetch_settings["A"] = myfile # for use by pkg_nofetch()
2797 if fetch(myuris, fetch_settings):
2801 writemsg(("!!! File %s doesn't exist, can't update " + \
2802 "Manifest\n") % myfile, noiselevel=-1)
2804 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2806 mf.create(requiredDistfiles=myarchives,
2807 assumeDistHashesSometimes=True,
2808 assumeDistHashesAlways=(
2809 "assume-digests" in mysettings.features))
2810 except portage_exception.FileNotFound, e:
2811 writemsg(("!!! File %s doesn't exist, can't update " + \
2812 "Manifest\n") % e, noiselevel=-1)
2814 mf.write(sign=False)
2815 if "assume-digests" not in mysettings.features:
2816 distlist = mf.fhashdict.get("DIST", {}).keys()
2819 for filename in distlist:
2820 if not os.path.exists(
2821 os.path.join(mysettings["DISTDIR"], filename)):
2822 auto_assumed.append(filename)
2824 mytree = os.path.realpath(
2825 os.path.dirname(os.path.dirname(mysettings["O"])))
2826 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2827 pkgs = myportdb.cp_list(cp, mytree=mytree)
2829 writemsg_stdout(" digest.assumed" + output.colorize("WARN",
2830 str(len(auto_assumed)).rjust(18)) + "\n")
2831 for pkg_key in pkgs:
2832 fetchlist = myportdb.getfetchlist(pkg_key,
2833 mysettings=mysettings, all=True, mytree=mytree)[1]
2834 pv = pkg_key.split("/")[1]
2835 for filename in auto_assumed:
2836 if filename in fetchlist:
2838 " digest-%s::%s\n" % (pv, filename))
2841 _doebuild_manifest_exempt_depend -= 1
2843 def digestParseFile(myfilename, mysettings=None):
2844 """(filename) -- Parses a given file for entries matching:
2845 <checksumkey> <checksum_hex_string> <filename> <filesize>
2846 Ignores lines that don't start with a valid checksum identifier
2847 and returns a dict with the filenames as keys and {checksumkey:checksum}
2849 DEPRECATED: this function is now only a compability wrapper for
2850 portage_manifest.Manifest()."""
2852 mysplit = myfilename.split(os.sep)
2853 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2854 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2855 elif mysplit[-1] == "Manifest":
2856 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2858 if mysettings is None:
2860 mysettings = config(clone=settings)
2862 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2864 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2865 """Verifies checksums. Assumes all files have been downloaded.
2866 DEPRECATED: this is now only a compability wrapper for
2867 portage_manifest.Manifest()."""
2870 pkgdir = mysettings["O"]
2871 manifest_path = os.path.join(pkgdir, "Manifest")
2872 if not os.path.exists(manifest_path):
2873 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2877 mf = Manifest(pkgdir, mysettings["DISTDIR"])
2878 eout = output.EOutput()
2879 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2881 eout.ebegin("checking ebuild checksums ;-)")
2882 mf.checkTypeHashes("EBUILD")
2884 eout.ebegin("checking auxfile checksums ;-)")
2885 mf.checkTypeHashes("AUX")
2887 eout.ebegin("checking miscfile checksums ;-)")
2888 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2891 eout.ebegin("checking %s ;-)" % f)
2892 mf.checkFileHashes(mf.findFile(f), f)
2896 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2898 except portage_exception.FileNotFound, e:
2900 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2903 except portage_exception.DigestException, e:
2905 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2906 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2907 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2908 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2909 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2911 # Make sure that all of the ebuilds are actually listed in the Manifest.
2912 for f in os.listdir(pkgdir):
2913 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2914 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2915 os.path.join(pkgdir, f), noiselevel=-1)
2917 """ epatch will just grab all the patches out of a directory, so we have to
2918 make sure there aren't any foreign files that it might grab."""
2919 filesdir = os.path.join(pkgdir, "files")
2920 for parent, dirs, files in os.walk(filesdir):
2922 if d.startswith(".") or d == "CVS":
2925 if f.startswith("."):
2927 f = os.path.join(parent, f)[len(filesdir) + 1:]
2928 file_type = mf.findFile(f)
2929 if file_type != "AUX" and not f.startswith("digest-"):
2930 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2931 os.path.join(filesdir, f), noiselevel=-1)
2935 # parse actionmap to spawn ebuild with the appropriate args
2936 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2937 if alwaysdep or "noauto" not in mysettings.features:
2938 # process dependency first
2939 if "dep" in actionmap[mydo].keys():
2940 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2943 kwargs = actionmap[mydo]["args"]
2944 mysettings["EBUILD_PHASE"] = mydo
2945 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2946 mysettings["EBUILD_PHASE"] = ""
2948 if not kwargs["droppriv"] and secpass >= 2:
2949 """ Privileged phases may have left files that need to be made
2950 writable to a less privileged user."""
2951 apply_recursive_permissions(mysettings["T"],
2952 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2953 filemode=060, filemask=0)
2955 if phase_retval == os.EX_OK:
2956 if mydo == "install":
2957 # User and group bits that match the "portage" user or group are
2958 # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2959 # necessary. The chown system call may clear S_ISUID and S_ISGID
2960 # bits, so those bits are restored if necessary.
2961 inst_uid = int(mysettings["PORTAGE_INST_UID"])
2962 inst_gid = int(mysettings["PORTAGE_INST_GID"])
2963 for parent, dirs, files in os.walk(mysettings["D"]):
2964 for fname in chain(dirs, files):
2965 fpath = os.path.join(parent, fname)
2966 mystat = os.lstat(fpath)
2967 if mystat.st_uid != portage_uid and \
2968 mystat.st_gid != portage_gid:
2972 if mystat.st_uid == portage_uid:
2974 if mystat.st_gid == portage_gid:
2976 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2977 mode=mystat.st_mode, stat_cached=mystat,
2979 mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2980 qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2982 writemsg("!!! install_qa_check failed; exiting.\n",
2988 def eapi_is_supported(eapi):
2989 return str(eapi).strip() == str(portage_const.EAPI).strip()
2991 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2993 ebuild_path = os.path.abspath(myebuild)
2994 pkg_dir = os.path.dirname(ebuild_path)
2996 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2997 cat = mysettings.configdict["pkg"]["CATEGORY"]
2999 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
3000 mypv = os.path.basename(ebuild_path)[:-7]
3001 mycpv = cat+"/"+mypv
3002 mysplit=pkgsplit(mypv,silent=0)
3004 raise portage_exception.IncorrectParameter(
3005 "Invalid ebuild path: '%s'" % myebuild)
3007 if mydo != "depend":
3008 """For performance reasons, setcpv only triggers reset when it
3009 detects a package-specific change in config. For the ebuild
3010 environment, a reset call is forced in order to ensure that the
3011 latest env.d variables are used."""
3012 mysettings.reset(use_cache=use_cache)
3013 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
3015 mysettings["EBUILD_PHASE"] = mydo
3017 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
3019 # We are disabling user-specific bashrc files.
3020 mysettings["BASH_ENV"] = INVALID_ENV_FILE
3022 if debug: # Otherwise it overrides emerge's settings.
3023 # We have no other way to set debug... debug can't be passed in
3024 # due to how it's coded... Don't overwrite this so we can use it.
3025 mysettings["PORTAGE_DEBUG"] = "1"
3027 mysettings["ROOT"] = myroot
3028 mysettings["STARTDIR"] = getcwd()
3030 mysettings["EBUILD"] = ebuild_path
3031 mysettings["O"] = pkg_dir
3032 mysettings.configdict["pkg"]["CATEGORY"] = cat
3033 mysettings["FILESDIR"] = pkg_dir+"/files"
3034 mysettings["PF"] = mypv
3036 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
3037 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
3039 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
3040 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
3041 mysettings["PN"] = mysplit[0]
3042 mysettings["PV"] = mysplit[1]
3043 mysettings["PR"] = mysplit[2]
3045 if portage_util.noiselimit < 0:
3046 mysettings["PORTAGE_QUIET"] = "1"
3048 if mydo != "depend":
3049 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
3050 mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
3051 if not eapi_is_supported(eapi):
3052 # can't do anything with this.
3053 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
3054 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
3055 portage_dep.use_reduce(portage_dep.paren_reduce(
3056 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
3058 if mysplit[2] == "r0":
3059 mysettings["PVR"]=mysplit[1]
3061 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
3063 if mysettings.has_key("PATH"):
3064 mysplit=mysettings["PATH"].split(":")
3067 if PORTAGE_BIN_PATH not in mysplit:
3068 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
3070 # Sandbox needs cannonical paths.
3071 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
3072 mysettings["PORTAGE_TMPDIR"])
3073 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
3074 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
3076 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
3077 # locations in order to prevent interference.
3078 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
3079 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3080 mysettings["PKG_TMPDIR"],
3081 mysettings["CATEGORY"], mysettings["PF"])
3083 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3084 mysettings["BUILD_PREFIX"],
3085 mysettings["CATEGORY"], mysettings["PF"])
3087 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
3088 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
3089 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
3090 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
3092 mysettings["PORTAGE_BASHRC"] = os.path.join(
3093 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
3095 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
3096 if (mydo!="depend") or not mysettings.has_key("KV"):
3097 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
3099 # Regular source tree
3100 mysettings["KV"]=mykv
3104 # Allow color.map to control colors associated with einfo, ewarn, etc...
3106 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
3107 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
3108 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
3110 def prepare_build_dirs(myroot, mysettings, cleanup):
3112 clean_dirs = [mysettings["HOME"]]
3114 # We enable cleanup when we want to make sure old cruft (such as the old
3115 # environment) doesn't interfere with the current phase.
3117 clean_dirs.append(mysettings["T"])
3119 for clean_dir in clean_dirs:
3121 shutil.rmtree(clean_dir)
3123 if errno.ENOENT == oe.errno:
3125 elif errno.EPERM == oe.errno:
3126 writemsg("%s\n" % oe, noiselevel=-1)
3127 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3128 clean_dir, noiselevel=-1)
3133 def makedirs(dir_path):
3135 os.makedirs(dir_path)
3137 if errno.EEXIST == oe.errno:
3139 elif errno.EPERM == oe.errno:
3140 writemsg("%s\n" % oe, noiselevel=-1)
3141 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3142 dir_path, noiselevel=-1)
3148 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3150 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3151 mydirs.append(os.path.dirname(mydirs[-1]))
3154 for mydir in mydirs:
3155 portage_util.ensure_dirs(mydir)
3156 portage_util.apply_secpass_permissions(mydir,
3157 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3158 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3159 """These directories don't necessarily need to be group writable.
3160 However, the setup phase is commonly run as a privileged user prior
3161 to the other phases being run by an unprivileged user. Currently,
3162 we use the portage group to ensure that the unprivleged user still
3163 has write access to these directories in any case."""
3164 portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3165 portage_util.apply_secpass_permissions(mysettings[dir_key],
3166 uid=portage_uid, gid=portage_gid)
3167 except portage_exception.PermissionDenied, e:
3168 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3170 except portage_exception.OperationNotPermitted, e:
3171 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3173 except portage_exception.FileNotFound, e:
3174 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3179 "basedir_var":"CCACHE_DIR",
3180 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3181 "always_recurse":False},
3183 "basedir_var":"CONFCACHE_DIR",
3184 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3185 "always_recurse":False},
3187 "basedir_var":"DISTCC_DIR",
3188 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3189 "subdirs":("lock", "state"),
3190 "always_recurse":True}
3195 for myfeature, kwargs in features_dirs.iteritems():
3196 if myfeature in mysettings.features:
3197 basedir = mysettings[kwargs["basedir_var"]]
3199 basedir = kwargs["default_dir"]
3200 mysettings[kwargs["basedir_var"]] = basedir
3202 mydirs = [mysettings[kwargs["basedir_var"]]]
3203 if "subdirs" in kwargs:
3204 for subdir in kwargs["subdirs"]:
3205 mydirs.append(os.path.join(basedir, subdir))
3206 for mydir in mydirs:
3207 modified = portage_util.ensure_dirs(mydir)
3208 # Generally, we only want to apply permissions for
3209 # initial creation. Otherwise, we don't know exactly what
3210 # permissions the user wants, so should leave them as-is.
3211 if modified or kwargs["always_recurse"]:
3213 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3216 raise # The feature is disabled if a single error
3217 # occurs during permissions adjustment.
3218 if not apply_recursive_permissions(mydir,
3219 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3220 filemode=filemode, filemask=modemask, onerror=onerror):
3221 raise portage_exception.OperationNotPermitted(
3222 "Failed to apply recursive permissions for the portage group.")
3223 except portage_exception.PortageException, e:
3224 mysettings.features.remove(myfeature)
3225 mysettings["FEATURES"] = " ".join(mysettings.features)
3226 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3227 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3228 (kwargs["basedir_var"], basedir), noiselevel=-1)
3229 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3235 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3237 parsed_mode = int(mode, 8)
3242 if parsed_mode & 07777 != parsed_mode:
3243 raise ValueError("Invalid file mode: %s" % mode)
3245 workdir_mode = parsed_mode
3247 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3248 except ValueError, e:
3250 writemsg("%s\n" % e)
3251 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3252 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3253 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3255 apply_secpass_permissions(mysettings["WORKDIR"],
3256 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3257 except portage_exception.FileNotFound:
3258 pass # ebuild.sh will create it
3260 if mysettings.get("PORT_LOGDIR", "") == "":
3261 while "PORT_LOGDIR" in mysettings:
3262 del mysettings["PORT_LOGDIR"]
3263 if "PORT_LOGDIR" in mysettings:
3265 portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3266 uid=portage_uid, gid=portage_gid, mode=02770)
3267 except portage_exception.PortageException, e:
3268 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3269 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3270 mysettings["PORT_LOGDIR"], noiselevel=-1)
3271 writemsg("!!! Disabling logging.\n", noiselevel=-1)
3272 while "PORT_LOGDIR" in mysettings:
3273 del mysettings["PORT_LOGDIR"]
3274 if "PORT_LOGDIR" in mysettings:
3275 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3276 if not os.path.exists(logid_path):
3277 f = open(logid_path, "w")
3280 logid_time = time.strftime("%Y%m%d-%H%M%S",
3281 time.gmtime(os.stat(logid_path).st_mtime))
3282 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3283 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3284 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3285 del logid_path, logid_time
3287 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
3288 # enabled since it is possible that local SELinux security policies
3289 # do not allow ouput to be piped out of the sesandbox domain.
3290 if not (mysettings.selinux_enabled() and \
3291 "sesandbox" in mysettings.features):
3292 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3293 mysettings["T"], "build.log")
3295 _doebuild_manifest_exempt_depend = 0
3296 _doebuild_manifest_checked = None
3298 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3299 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3300 mydbapi=None, vartree=None, prev_mtimes=None):
3303 Wrapper function that invokes specific ebuild phases through the spawning
3306 @param myebuild: name of the ebuild to invoke the phase on (CPV)
3307 @type myebuild: String
3308 @param mydo: Phase to run
3310 @param myroot: $ROOT (usually '/', see man make.conf)
3311 @type myroot: String
3312 @param mysettings: Portage Configuration
3313 @type mysettings: instance of portage.config
3314 @param debug: Turns on various debug information (eg, debug for spawn)
3315 @type debug: Boolean
3316 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3317 @type listonly: Boolean
3318 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3319 @type fetchonly: Boolean
3320 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3321 @type cleanup: Boolean
3322 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3323 @type dbkey: Dict or String
3324 @param use_cache: Enables the cache
3325 @type use_cache: Boolean
3326 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3327 @type fetchall: Boolean
3328 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3330 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3331 @type mydbapi: portdbapi instance
3332 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3333 @type vartree: vartree instance
3334 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3335 @type prev_mtimes: dictionary
3341 Most errors have an accompanying error message.
3343 listonly and fetchonly are only really necessary for operations involving 'fetch'
3344 prev_mtimes are only necessary for merge operations.
3345 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3350 writemsg("Warning: tree not specified to doebuild\n")
3354 # chunked out deps for each phase, so that ebuild binary can use it
3355 # to collapse targets down.
3359 "unpack": ["setup"],
3360 "compile":["unpack"],
3361 "test": ["compile"],
3364 "package":["install"],
3368 mydbapi = db[myroot][tree].dbapi
3370 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3371 vartree = db[myroot]["vartree"]
3373 features = mysettings.features
3375 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3376 "config","setup","depend","fetch","digest",
3377 "unpack","compile","test","install","rpm","qmerge","merge",
3378 "package","unmerge", "manifest"]
3380 if mydo not in validcommands:
3381 validcommands.sort()
3382 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3384 for vcount in range(len(validcommands)):
3386 writemsg("\n!!! ", noiselevel=-1)
3387 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3388 writemsg("\n", noiselevel=-1)
3391 if not os.path.exists(myebuild):
3392 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3396 global _doebuild_manifest_exempt_depend
3398 if "strict" in features and \
3399 "digest" not in features and \
3400 tree == "porttree" and \
3401 mydo not in ("digest", "manifest", "help") and \
3402 not _doebuild_manifest_exempt_depend:
3403 # Always verify the ebuild checksums before executing it.
3404 pkgdir = os.path.dirname(myebuild)
3405 manifest_path = os.path.join(pkgdir, "Manifest")
3406 global _doebuild_manifest_checked
3407 # Avoid checking the same Manifest several times in a row during a
3408 # regen with an empty cache.
3409 if _doebuild_manifest_checked != manifest_path:
3410 if not os.path.exists(manifest_path):
3411 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3414 mf = Manifest(pkgdir, mysettings["DISTDIR"])
3416 mf.checkTypeHashes("EBUILD")
3417 except portage_exception.FileNotFound, e:
3418 writemsg("!!! A file listed in the Manifest " + \
3419 "could not be found: %s\n" % str(e), noiselevel=-1)
3421 except portage_exception.DigestException, e:
3422 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3423 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3424 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3425 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3426 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3428 # Make sure that all of the ebuilds are actually listed in the
3430 for f in os.listdir(pkgdir):
3431 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3432 writemsg("!!! A file is not listed in the " + \
3433 "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3436 _doebuild_manifest_checked = manifest_path
3439 builddir_lock = None
3441 if mydo in ("digest", "manifest", "help"):
3442 # Temporarily exempt the depend phase from manifest checks, in case
3443 # aux_get calls trigger cache generation.
3444 _doebuild_manifest_exempt_depend += 1
3446 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3449 # get possible slot information from the deps file
3450 if mydo == "depend":
3451 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3452 if isinstance(dbkey, dict):
3453 mysettings["dbkey"] = ""
3455 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3456 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3457 fd_pipes=fd_pipes, returnpid=True)
3458 os.close(pw) # belongs exclusively to the child process now
3462 mybytes.append(os.read(pr, maxbytes))
3466 mybytes = "".join(mybytes)
3468 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3470 retval = os.waitpid(mypids[0], 0)[1]
3471 portage_exec.spawned_pids.remove(mypids[0])
3472 # If it got a signal, return the signal that was sent, but
3473 # shift in order to distinguish it from a return value. (just
3474 # like portage_exec.spawn() would do).
3476 return (retval & 0xff) << 8
3477 # Otherwise, return its exit code.
3480 mysettings["dbkey"] = dbkey
3482 mysettings["dbkey"] = \
3483 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3485 return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3487 # Validate dependency metadata here to ensure that ebuilds with invalid
3488 # data are never installed (even via the ebuild command).
3489 invalid_dep_exempt_phases = \
3490 set(["clean", "cleanrm", "help", "prerm", "postrm"])
3491 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3492 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3493 misc_keys = ["LICENSE", "PROVIDE", "RESTRICT", "SRC_URI"]
3494 all_keys = dep_keys + misc_keys
3495 metadata = dict(izip(all_keys, mydbapi.aux_get(mycpv, all_keys)))
3496 class FakeTree(object):
3497 def __init__(self, mydb):
3499 dep_check_trees = {myroot:{}}
3500 dep_check_trees[myroot]["porttree"] = \
3501 FakeTree(fakedbapi(settings=mysettings))
3502 for dep_type in dep_keys:
3503 mycheck = dep_check(metadata[dep_type], None, mysettings,
3504 myuse="all", myroot=myroot, trees=dep_check_trees)
3506 writemsg("%s: %s\n%s\n" % (
3507 dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3508 if mydo not in invalid_dep_exempt_phases:
3510 del dep_type, mycheck
3513 portage_dep.use_reduce(
3514 portage_dep.paren_reduce(metadata[k]), matchall=True)
3515 except portage_exception.InvalidDependString, e:
3516 writemsg("%s: %s\n%s\n" % (
3517 k, metadata[k], str(e)), noiselevel=-1)
3519 if mydo not in invalid_dep_exempt_phases:
3522 del mycpv, dep_keys, metadata, misc_keys, FakeTree, dep_check_trees
3524 if "PORTAGE_TMPDIR" not in mysettings or \
3525 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3526 writemsg("The directory specified in your " + \
3527 "PORTAGE_TMPDIR variable, '%s',\n" % \
3528 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3529 writemsg("does not exist. Please create this directory or " + \
3530 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3533 # Build directory creation isn't required for any of these.
3534 if mydo not in ("digest", "fetch", "help", "manifest"):
3535 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3538 # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3539 logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3540 if mydo == "unmerge":
3541 return unmerge(mysettings["CATEGORY"],
3542 mysettings["PF"], myroot, mysettings, vartree=vartree)
3544 # if any of these are being called, handle them -- running them out of
3545 # the sandbox -- and stop now.
3546 if mydo in ["clean","cleanrm"]:
3547 return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3548 debug=debug, free=1, logfile=None)
3549 elif mydo == "help":
3550 return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3551 debug=debug, free=1, logfile=logfile)
3552 elif mydo == "setup":
3553 infodir = os.path.join(
3554 mysettings["PORTAGE_BUILDDIR"], "build-info")
3555 if os.path.isdir(infodir):
3556 """Load USE flags for setup phase of a binary package.
3557 Ideally, the environment.bz2 would be used instead."""
3558 mysettings.load_infodir(infodir)
3559 retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3560 debug=debug, free=1, logfile=logfile)
3562 """ Privileged phases may have left files that need to be made
3563 writable to a less privileged user."""
3564 apply_recursive_permissions(mysettings["T"],
3565 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3566 filemode=060, filemask=0)
3568 elif mydo == "preinst":
3569 mysettings["IMAGE"] = mysettings["D"]
3570 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3571 mysettings, debug=debug, free=1, logfile=logfile)
3572 if phase_retval == os.EX_OK:
3573 # Post phase logic and tasks that have been factored out of
3575 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3576 "preinst_sfperms", "preinst_selinux_labels",
3577 "preinst_suid_scan"]
3578 mysettings["EBUILD_PHASE"] = ""
3579 phase_retval = spawn(" ".join(myargs),
3580 mysettings, debug=debug, free=1, logfile=logfile)
3581 if phase_retval != os.EX_OK:
3582 writemsg("!!! post preinst failed; exiting.\n",
3584 del mysettings["IMAGE"]
3586 elif mydo == "postinst":
3587 mysettings.load_infodir(mysettings["O"])
3588 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3589 mysettings, debug=debug, free=1, logfile=logfile)
3590 if phase_retval == os.EX_OK:
3591 # Post phase logic and tasks that have been factored out of
3593 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3594 mysettings["EBUILD_PHASE"] = ""
3595 phase_retval = spawn(" ".join(myargs),
3596 mysettings, debug=debug, free=1, logfile=logfile)
3597 if phase_retval != os.EX_OK:
3598 writemsg("!!! post postinst failed; exiting.\n",
3601 elif mydo in ["prerm","postrm","config"]:
3602 mysettings.load_infodir(mysettings["O"])
3603 return spawn(EBUILD_SH_BINARY + " " + mydo,
3604 mysettings, debug=debug, free=1, logfile=logfile)
3606 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3608 # Make sure we get the correct tree in case there are overlays.
3609 mytree = os.path.realpath(
3610 os.path.dirname(os.path.dirname(mysettings["O"])))
3612 newuris, alist = mydbapi.getfetchlist(
3613 mycpv, mytree=mytree, mysettings=mysettings)
3614 alluris, aalist = mydbapi.getfetchlist(
3615 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3616 except portage_exception.InvalidDependString, e:
3617 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3618 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
3621 mysettings["A"] = " ".join(alist)
3622 mysettings["AA"] = " ".join(aalist)
3623 if ("mirror" in features) or fetchall:
3624 fetchme = alluris[:]
3626 elif mydo == "digest":
3627 fetchme = alluris[:]
3629 # Skip files that we already have digests for.
3630 mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3631 mydigests = mf.getTypeDigests("DIST")
3632 required_hash_types = set()
3633 required_hash_types.add("size")
3634 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
3635 for filename, hashes in mydigests.iteritems():
3636 if not required_hash_types.difference(hashes):
3637 checkme = [i for i in checkme if i != filename]
3638 fetchme = [i for i in fetchme \
3639 if os.path.basename(i) != filename]
3640 del filename, hashes
3642 fetchme = newuris[:]
3645 # Only try and fetch the files if we are going to need them ...
3646 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3647 # unpack compile install`, we will try and fetch 4 times :/
3648 need_distfiles = (mydo in ("fetch", "unpack") or \
3649 mydo not in ("digest", "manifest") and "noauto" not in features)
3650 if need_distfiles and not fetch(
3651 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3654 if mydo == "fetch" and listonly:
3658 if mydo == "manifest":
3659 return not digestgen(aalist, mysettings, overwrite=1,
3660 manifestonly=1, myportdb=mydbapi)
3661 elif mydo == "digest":
3662 return not digestgen(aalist, mysettings, overwrite=1,
3664 elif "digest" in mysettings.features:
3665 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3666 except portage_exception.PermissionDenied, e:
3667 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3668 if mydo in ("digest", "manifest"):
3671 # See above comment about fetching only when needed
3672 if not digestcheck(checkme, mysettings, ("strict" in features),
3673 (mydo not in ["digest","fetch","unpack"] and \
3674 mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3675 "noauto" in features)):
3681 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3682 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3683 orig_distdir = mysettings["DISTDIR"]
3684 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3685 edpath = mysettings["DISTDIR"] = \
3686 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3687 if os.path.exists(edpath):
3689 if os.path.isdir(edpath) and not os.path.islink(edpath):
3690 shutil.rmtree(edpath)
3694 print "!!! Failed reseting ebuild distdir path, " + edpath
3697 apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3700 os.symlink(os.path.join(orig_distdir, file),
3701 os.path.join(edpath, file))
3703 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3706 #initial dep checks complete; time to process main commands
3708 nosandbox = (("userpriv" in features) and \
3709 ("usersandbox" not in features) and \
3710 ("userpriv" not in mysettings["RESTRICT"]) and \
3711 ("nouserpriv" not in mysettings["RESTRICT"]))
3712 if nosandbox and ("userpriv" not in features or \
3713 "userpriv" in mysettings["RESTRICT"] or \
3714 "nouserpriv" in mysettings["RESTRICT"]):
3715 nosandbox = ("sandbox" not in features and \
3716 "usersandbox" not in features)
3718 sesandbox = mysettings.selinux_enabled() and \
3719 "sesandbox" in mysettings.features
3720 ebuild_sh = EBUILD_SH_BINARY + " %s"
3721 misc_sh = MISC_SH_BINARY + " dyn_%s"
3723 # args are for the to spawn function
3725 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":0}},
3726 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0}},
3727 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}},
3728 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3729 "test": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3730 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}},
3731 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3732 "package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3735 # merge the deps in so we have again a 'full' actionmap
3736 # be glad when this can die.
3737 for x in actionmap.keys():
3738 if len(actionmap_deps.get(x, [])):
3739 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3741 if mydo in actionmap.keys():
3743 portage_util.ensure_dirs(
3744 os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3745 portage_util.ensure_dirs(
3746 os.path.join(mysettings["PKGDIR"], "All"))
3747 retval = spawnebuild(mydo,
3748 actionmap, mysettings, debug, logfile=logfile)
3749 elif mydo=="qmerge":
3750 # check to ensure install was run. this *only* pops up when users
3751 # forget it and are using ebuild
3752 if not os.path.exists(
3753 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3754 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3757 # qmerge is a special phase that implies noclean.
3758 if "noclean" not in mysettings.features:
3759 mysettings.features.append("noclean")
3760 #qmerge is specifically not supposed to do a runtime dep check
3762 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3763 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3764 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3765 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3767 retval = spawnebuild("install", actionmap, mysettings, debug,
3768 alwaysdep=1, logfile=logfile)
3769 if retval == os.EX_OK:
3770 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3771 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3772 "build-info"), myroot, mysettings,
3773 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3774 vartree=vartree, prev_mtimes=prev_mtimes)
3776 print "!!! Unknown mydo:",mydo
3779 if retval != os.EX_OK and tree == "porttree":
3780 for i in xrange(len(mydbapi.porttrees)-1):
3781 t = mydbapi.porttrees[i+1]
3782 if myebuild.startswith(t):
3783 # Display the non-cannonical path, in case it's different, to
3784 # prevent confusion.
3785 overlays = mysettings["PORTDIR_OVERLAY"].split()
3787 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3788 overlays[i], noiselevel=-1)
3796 portage_locks.unlockdir(builddir_lock)
3798 # Make sure that DISTDIR is restored to it's normal value before we return!
3799 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3800 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3801 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3805 if os.stat(logfile).st_size == 0:
3810 if mydo in ("digest", "manifest", "help"):
3811 # If necessary, depend phase has been triggered by aux_get calls
3812 # and the exemption is no longer needed.
3813 _doebuild_manifest_exempt_depend -= 1
3817 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3818 """moves a file from src to dest, preserving all permissions and attributes; mtime will
3819 be preserved even when moving across filesystems. Returns true on success and false on
3820 failure. Move is atomic."""
3821 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3823 if mysettings is None:
3825 mysettings = settings
3826 selinux_enabled = mysettings.selinux_enabled()
3831 except SystemExit, e:
3833 except Exception, e:
3834 print "!!! Stating source file failed... movefile()"
3840 dstat=os.lstat(dest)
3841 except (OSError, IOError):
3842 dstat=os.lstat(os.path.dirname(dest))
3846 # Check that we can actually unset schg etc flags...
3847 # Clear the flags on source and destination; we'll reinstate them after merging
3848 if destexists and dstat.st_flags != 0:
3849 if bsd_chflags.lchflags(dest, 0) < 0:
3850 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3852 # We might have an immutable flag on the parent dir; save and clear.
3853 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3855 bsd_chflags.lchflags(os.path.dirname(dest), 0)
3857 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3858 bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3859 # This is bad: we can't merge the file with these flags set.
3860 writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3865 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3869 except SystemExit, e:
3871 except Exception, e:
3874 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3876 target=os.readlink(src)
3877 if mysettings and mysettings["D"]:
3878 if target.find(mysettings["D"])==0:
3879 target=target[len(mysettings["D"]):]
3880 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3883 sid = selinux.get_lsid(src)
3884 selinux.secure_symlink(target,dest,sid)
3886 os.symlink(target,dest)
3887 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3888 return os.lstat(dest)[stat.ST_MTIME]
3889 except SystemExit, e:
3891 except Exception, e:
3892 print "!!! failed to properly create symlink:"
3893 print "!!!",dest,"->",target
3898 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3901 ret=selinux.secure_rename(src,dest)
3903 ret=os.rename(src,dest)
3905 except SystemExit, e:
3907 except Exception, e:
3908 if e[0]!=errno.EXDEV:
3909 # Some random error.
3910 print "!!! Failed to move",src,"to",dest
3913 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3916 if stat.S_ISREG(sstat[stat.ST_MODE]):
3917 try: # For safety copy then move it over.
3919 selinux.secure_copy(src,dest+"#new")
3920 selinux.secure_rename(dest+"#new",dest)
3922 shutil.copyfile(src,dest+"#new")
3923 os.rename(dest+"#new",dest)
3925 except SystemExit, e:
3927 except Exception, e:
3928 print '!!! copy',src,'->',dest,'failed.'
3932 #we don't yet handle special, so we need to fall back to /bin/mv
3934 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3936 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3938 print "!!! Failed to move special file:"
3939 print "!!! '"+src+"' to '"+dest+"'"
3941 return None # failure
3944 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3945 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3947 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3948 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3950 except SystemExit, e:
3952 except Exception, e:
3953 print "!!! Failed to chown/chmod/unlink in movefile()"
3959 os.utime(dest,(newmtime,newmtime))
3961 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3962 newmtime=sstat[stat.ST_MTIME]
3965 # Restore the flags we saved before moving
3966 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3967 writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3968 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3973 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3974 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3975 if not os.access(myroot, os.W_OK):
3976 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3979 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3981 return mylink.merge(pkgloc, infloc, myroot, myebuild,
3982 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3984 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3986 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3990 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3991 ldpath_mtimes=ldpath_mtimes)
3992 if retval == os.EX_OK:
3999 def getCPFromCPV(mycpv):
4000 """Calls pkgsplit on a cpv and returns only the cp."""
4001 return pkgsplit(mycpv)[0]
4003 def dep_virtual(mysplit, mysettings):
4004 "Does virtual dependency conversion"
4006 myvirtuals = mysettings.getvirtuals()
4008 if type(x)==types.ListType:
4009 newsplit.append(dep_virtual(x, mysettings))
4012 mychoices = myvirtuals.get(mykey, None)
4014 if len(mychoices) == 1:
4015 a = x.replace(mykey, mychoices[0])
4018 # blocker needs "and" not "or(||)".
4023 a.append(x.replace(mykey, y))
4029 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
4030 trees=None, **kwargs):
4031 """Recursively expand new-style virtuals so as to collapse one or more
4032 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
4033 zero cost regardless of whether or not they are currently installed. Virtual
4034 blockers are supported but only when the virtual expands to a single
4035 atom because it wouldn't necessarily make sense to block all the components
4036 of a compound virtual. When more than one new-style virtual is matched,
4037 the matches are sorted from highest to lowest versions and the atom is
4038 expanded to || ( highest match ... lowest match )."""
4040 # According to GLEP 37, RDEPEND is the only dependency type that is valid
4041 # for new-style virtuals. Repoman should enforce this.
4042 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
4043 def compare_pkgs(a, b):
4044 return pkgcmp(b[1], a[1])
4045 portdb = trees[myroot]["porttree"].dbapi
4046 if kwargs["use_binaries"]:
4047 portdb = trees[myroot]["bintree"].dbapi
4048 myvirtuals = mysettings.getvirtuals()
4053 elif isinstance(x, list):
4054 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
4055 mysettings, myroot=myroot, trees=trees, **kwargs))
4057 if portage_dep._dep_check_strict and \
4058 not isvalidatom(x, allow_blockers=True):
4059 raise portage_exception.ParseError(
4060 "invalid atom: '%s'" % x)
4061 mykey = dep_getkey(x)
4062 if not mykey.startswith("virtual/"):
4065 mychoices = myvirtuals.get(mykey, [])
4066 isblocker = x.startswith("!")
4071 for cpv in portdb.match(match_atom):
4072 # only use new-style matches
4073 if cpv.startswith("virtual/"):
4074 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], portdb)
4075 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
4076 vardb = trees[myroot]["vartree"].dbapi
4077 for cpv in vardb.match(match_atom):
4078 # only use new-style matches
4079 if cpv.startswith("virtual/"):
4082 pkgs[cpv] = (cpv, catpkgsplit(cpv)[1:], vardb)
4083 if not (pkgs or mychoices):
4084 # This one couldn't be expanded as a new-style virtual. Old-style
4085 # virtuals have already been expanded by dep_virtual, so this one
4086 # is unavailable and dep_zapdeps will identify it as such. The
4087 # atom is not eliminated here since it may still represent a
4088 # dependency that needs to be satisfied.
4091 if not pkgs and len(mychoices) == 1:
4092 newsplit.append(x.replace(mykey, mychoices[0]))
4094 pkgs = pkgs.values()
4095 pkgs.sort(compare_pkgs) # Prefer higher versions.
4101 depstring = " ".join(y[2].aux_get(y[0], dep_keys))
4103 print "Virtual Parent: ", y[0]
4104 print "Virtual Depstring:", depstring
4105 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
4106 trees=trees, **kwargs)
4108 raise portage_exception.ParseError(
4109 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
4111 virtual_atoms = [atom for atom in mycheck[1] \
4112 if not atom.startswith("!")]
4113 if len(virtual_atoms) == 1:
4114 # It wouldn't make sense to block all the components of a
4115 # compound virtual, so only a single atom block is allowed.
4116 a.append("!" + virtual_atoms[0])
4118 mycheck[1].append("="+y[0]) # pull in the new-style virtual
4119 a.append(mycheck[1])
4120 # Plain old-style virtuals. New-style virtuals are preferred.
4122 a.append(x.replace(mykey, y))
4123 if isblocker and not a:
4124 # Probably a compound virtual. Pass the atom through unprocessed.
4130 def dep_eval(deplist):
4133 if deplist[0]=="||":
4134 #or list; we just need one "1"
4135 for x in deplist[1:]:
4136 if type(x)==types.ListType:
4141 #XXX: unless there's no available atoms in the list
4142 #in which case we need to assume that everything is
4143 #okay as some ebuilds are relying on an old bug.
4144 if len(deplist) == 1:
4149 if type(x)==types.ListType:
4156 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
4157 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
4158 Returned deplist contains steps that must be taken to satisfy dependencies."""
4162 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
4163 if not reduced or unreduced == ["||"] or dep_eval(reduced):
4166 if unreduced[0] != "||":
4168 for dep, satisfied in izip(unreduced, reduced):
4169 if isinstance(dep, list):
4170 unresolved += dep_zapdeps(dep, satisfied, myroot,
4171 use_binaries=use_binaries, trees=trees)
4173 unresolved.append(dep)
4176 # We're at a ( || atom ... ) type level and need to make a choice
4177 deps = unreduced[1:]
4178 satisfieds = reduced[1:]
4180 # Our preference order is for an the first item that:
4181 # a) contains all unmasked packages with the same key as installed packages
4182 # b) contains all unmasked packages
4183 # c) contains masked installed packages
4184 # d) is the first item
4187 preferred_any_slot = []
4188 possible_upgrades = []
4191 # Alias the trees we'll be checking availability against
4193 if "vartree" in trees[myroot]:
4194 vardb = trees[myroot]["vartree"].dbapi
4196 mydbapi = trees[myroot]["bintree"].dbapi
4198 mydbapi = trees[myroot]["porttree"].dbapi
4200 # Sort the deps into preferred (installed) and other
4201 # with values of [[required_atom], availablility]
4202 for dep, satisfied in izip(deps, satisfieds):
4203 if isinstance(dep, list):
4204 atoms = dep_zapdeps(dep, satisfied, myroot,
4205 use_binaries=use_binaries, trees=trees)
4211 other.append((atoms, None, False))
4214 all_available = True
4217 avail_pkg = best(mydbapi.match(atom))
4219 avail_slot = "%s:%s" % (dep_getkey(atom),
4220 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4221 elif not avail_pkg and use_binaries:
4222 # With --usepkgonly, count installed packages as "available".
4223 # Note that --usepkgonly currently has no package.mask support.
4225 avail_pkg = best(vardb.match(atom))
4227 avail_slot = "%s:%s" % (dep_getkey(atom),
4228 vardb.aux_get(avail_pkg, ["SLOT"])[0])
4230 all_available = False
4233 versions[avail_slot] = avail_pkg
4235 this_choice = (atoms, versions, all_available)
4237 # The "all installed" criterion is not version or slot specific.
4238 # If any version of a package is installed then we assume that it
4239 # is preferred over other possible packages choices.
4240 all_installed = True
4241 for atom in set([dep_getkey(atom) for atom in atoms]):
4242 # New-style virtuals have zero cost to install.
4243 if not vardb.match(atom) and not atom.startswith("virtual/"):
4244 all_installed = False
4246 all_installed_slots = False
4248 all_installed_slots = True
4249 for slot_atom in versions:
4250 # New-style virtuals have zero cost to install.
4251 if not vardb.match(slot_atom) and \
4252 not slot_atom.startswith("virtual/"):
4253 all_installed_slots = False
4256 if all_installed_slots:
4257 preferred.append(this_choice)
4259 preferred_any_slot.append(this_choice)
4261 possible_upgrades.append(this_choice)
4263 other.append(this_choice)
4265 # Compare the "all_installed" choices against the "all_available" choices
4266 # for possible missed upgrades. The main purpose of this code is to find
4267 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4268 # into || ( highest version ... lowest version ). We want to prefer the
4269 # highest all_available version of the new-style virtual when there is a
4270 # lower all_installed version.
4271 preferred.extend(preferred_any_slot)
4272 preferred.extend(possible_upgrades)
4273 possible_upgrades = preferred[1:]
4274 for possible_upgrade in possible_upgrades:
4275 atoms, versions, all_available = possible_upgrade
4276 myslots = set(versions)
4277 for other_choice in preferred:
4278 if possible_upgrade is other_choice:
4279 # possible_upgrade will not be promoted, so move on
4281 o_atoms, o_versions, o_all_available = other_choice
4282 intersecting_slots = myslots.intersection(o_versions)
4283 if not intersecting_slots:
4286 has_downgrade = False
4287 for myslot in intersecting_slots:
4288 myversion = versions[myslot]
4289 o_version = o_versions[myslot]
4290 if myversion != o_version:
4291 if myversion == best([myversion, o_version]):
4294 has_downgrade = True
4296 if has_upgrade and not has_downgrade:
4297 preferred.remove(possible_upgrade)
4298 o_index = preferred.index(other_choice)
4299 preferred.insert(o_index, possible_upgrade)
4302 # preferred now contains a) and c) from the order above with
4303 # the masked flag differentiating the two. other contains b)
4304 # and d) so adding other to preferred will give us a suitable
4305 # list to iterate over.
4306 preferred.extend(other)
4308 for allow_masked in (False, True):
4309 for atoms, versions, all_available in preferred:
4310 if all_available or allow_masked:
4313 assert(False) # This point should not be reachable
4316 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4322 mydep = dep_getcpv(orig_dep)
4323 myindex = orig_dep.index(mydep)
4324 prefix = orig_dep[:myindex]
4325 postfix = orig_dep[myindex+len(mydep):]
4326 return prefix + cpv_expand(
4327 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4329 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4330 use_cache=1, use_binaries=0, myroot="/", trees=None):
4331 """Takes a depend string and parses the condition."""
4332 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4333 #check_config_instance(mysettings)
4335 trees = globals()["db"]
4339 myusesplit = mysettings["USE"].split()
4342 # We've been given useflags to use.
4343 #print "USE FLAGS PASSED IN."
4345 #if "bindist" in myusesplit:
4346 # print "BINDIST is set!"
4348 # print "BINDIST NOT set."
4350 #we are being run by autouse(), don't consult USE vars yet.
4351 # WE ALSO CANNOT USE SETTINGS
4354 #convert parenthesis to sublists
4355 mysplit = portage_dep.paren_reduce(depstring)
4359 useforce.add(mysettings["ARCH"])
4361 # This masking/forcing is only for repoman. In other cases, relevant
4362 # masking/forcing should have already been applied via
4363 # config.regenerate(). Also, binary or installed packages may have
4364 # been built with flags that are now masked, and it would be
4365 # inconsistent to mask them now. Additionally, myuse may consist of
4366 # flags from a parent package that is being merged to a $ROOT that is
4367 # different from the one that mysettings represents.
4368 mymasks.update(mysettings.usemask)
4369 mymasks.update(mysettings.archlist())
4370 mymasks.discard(mysettings["ARCH"])
4371 useforce.update(mysettings.useforce)
4372 useforce.difference_update(mymasks)
4374 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4375 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4376 except portage_exception.InvalidDependString, e:
4379 # Do the || conversions
4380 mysplit=portage_dep.dep_opconvert(mysplit)
4383 #dependencies were reduced to nothing
4386 # Recursively expand new-style virtuals so as to
4387 # collapse one or more levels of indirection.
4389 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4390 use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4391 use_binaries=use_binaries, myroot=myroot, trees=trees)
4392 except portage_exception.ParseError, e:
4396 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4397 if mysplit2 is None:
4398 return [0,"Invalid token"]
4400 writemsg("\n\n\n", 1)
4401 writemsg("mysplit: %s\n" % (mysplit), 1)
4402 writemsg("mysplit2: %s\n" % (mysplit2), 1)
4404 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4405 use_binaries=use_binaries, trees=trees)
4406 mylist = flatten(myzaps)
4407 writemsg("myzaps: %s\n" % (myzaps), 1)
4408 writemsg("mylist: %s\n" % (mylist), 1)
4413 writemsg("mydict: %s\n" % (mydict), 1)
4414 return [1,mydict.keys()]
4416 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4417 "Reduces the deplist to ones and zeros"
4418 deplist=mydeplist[:]
4419 for mypos in xrange(len(deplist)):
4420 if type(deplist[mypos])==types.ListType:
4422 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4423 elif deplist[mypos]=="||":
4426 mykey = dep_getkey(deplist[mypos])
4427 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4428 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4430 elif mydbapi is None:
4431 # Assume nothing is satisfied. This forces dep_zapdeps to
4432 # return all of deps the deps that have been selected
4433 # (excluding those satisfied by package.provided).
4434 deplist[mypos] = False
4437 mydep=mydbapi.xmatch(mode,deplist[mypos])
4439 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4442 if deplist[mypos][0]=="!":
4446 #encountered invalid string
4450 def cpv_getkey(mycpv):
4451 myslash=mycpv.split("/")
4452 mysplit=pkgsplit(myslash[-1])
4455 return myslash[0]+"/"+mysplit[0]
4461 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4462 mysplit=mykey.split("/")
4463 if settings is None:
4464 settings = globals()["settings"]
4465 virts = settings.getvirtuals("/")
4466 virts_p = settings.get_virts_p("/")
4468 if mydb and type(mydb)==types.InstanceType:
4469 for x in settings.categories:
4470 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4472 if virts_p.has_key(mykey):
4473 return(virts_p[mykey][0])
4474 return "null/"+mykey
4476 if type(mydb)==types.InstanceType:
4477 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4478 return virts[mykey][0]
4481 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4482 """Given a string (packagename or virtual) expand it into a valid
4483 cat/package string. Virtuals use the mydb to determine which provided
4484 virtual is a valid choice and defaults to the first element when there
4485 are no installed/available candidates."""
4486 myslash=mycpv.split("/")
4487 mysplit=pkgsplit(myslash[-1])
4488 if settings is None:
4489 settings = globals()["settings"]
4490 virts = settings.getvirtuals("/")
4491 virts_p = settings.get_virts_p("/")
4493 # this is illegal case.
4496 elif len(myslash)==2:
4498 mykey=myslash[0]+"/"+mysplit[0]
4501 if mydb and virts and mykey in virts:
4502 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4503 if hasattr(mydb, "cp_list"):
4504 if not mydb.cp_list(mykey, use_cache=use_cache):
4505 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4506 mykey_orig = mykey[:]
4507 for vkey in virts[mykey]:
4508 if mydb.cp_list(vkey,use_cache=use_cache):
4510 writemsg("virts chosen: %s\n" % (mykey), 1)
4512 if mykey == mykey_orig:
4513 mykey=virts[mykey][0]
4514 writemsg("virts defaulted: %s\n" % (mykey), 1)
4515 #we only perform virtual expansion if we are passed a dbapi
4517 #specific cpv, no category, ie. "foo-1.0"
4526 for x in settings.categories:
4527 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4528 matches.append(x+"/"+myp)
4529 if len(matches) > 1:
4530 virtual_name_collision = False
4531 if len(matches) == 2:
4533 if not x.startswith("virtual/"):
4534 # Assume that the non-virtual is desired. This helps
4535 # avoid the ValueError for invalid deps that come from
4536 # installed packages (during reverse blocker detection,
4540 virtual_name_collision = True
4541 if not virtual_name_collision:
4542 raise ValueError, matches
4546 if not mykey and type(mydb)!=types.ListType:
4547 if virts_p.has_key(myp):
4548 mykey=virts_p[myp][0]
4549 #again, we only perform virtual expansion if we have a dbapi (not a list)
4553 if mysplit[2]=="r0":
4554 return mykey+"-"+mysplit[1]
4556 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4560 def getmaskingreason(mycpv, settings=None, portdb=None):
4561 from portage_util import grablines
4562 if settings is None:
4563 settings = globals()["settings"]
4565 portdb = globals()["portdb"]
4566 mysplit = catpkgsplit(mycpv)
4568 raise ValueError("invalid CPV: %s" % mycpv)
4569 if not portdb.cpv_exists(mycpv):
4570 raise KeyError("CPV %s does not exist" % mycpv)
4571 mycp=mysplit[0]+"/"+mysplit[1]
4573 # XXX- This is a temporary duplicate of code from the config constructor.
4574 locations = [os.path.join(settings["PORTDIR"], "profiles")]
4575 locations.extend(settings.profiles)
4576 for ov in settings["PORTDIR_OVERLAY"].split():
4577 profdir = os.path.join(normalize_path(ov), "profiles")
4578 if os.path.isdir(profdir):
4579 locations.append(profdir)
4580 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4581 USER_CONFIG_PATH.lstrip(os.path.sep)))
4583 pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4585 while pmasklists: # stack_lists doesn't preserve order so it can't be used
4586 pmasklines.extend(pmasklists.pop(0))
4589 if settings.pmaskdict.has_key(mycp):
4590 for x in settings.pmaskdict[mycp]:
4591 if mycpv in portdb.xmatch("match-all", x):
4595 for i in xrange(len(pmasklines)):
4596 l = pmasklines[i].strip()
4602 comment_valid = i + 1
4604 if comment_valid != i:
4607 elif comment_valid != -1:
4608 # Apparently this comment applies to muliple masks, so
4609 # it remains valid until a blank line is encountered.
4613 def getmaskingstatus(mycpv, settings=None, portdb=None):
4614 if settings is None:
4615 settings = globals()["settings"]
4617 portdb = globals()["portdb"]
4618 mysplit = catpkgsplit(mycpv)
4620 raise ValueError("invalid CPV: %s" % mycpv)
4621 if not portdb.cpv_exists(mycpv):
4622 raise KeyError("CPV %s does not exist" % mycpv)
4623 mycp=mysplit[0]+"/"+mysplit[1]
4628 revmaskdict=settings.prevmaskdict
4629 if revmaskdict.has_key(mycp):
4630 for x in revmaskdict[mycp]:
4635 if not match_to_list(mycpv, [myatom]):
4636 rValue.append("profile")
4639 # package.mask checking
4640 maskdict=settings.pmaskdict
4641 unmaskdict=settings.punmaskdict
4642 if maskdict.has_key(mycp):
4643 for x in maskdict[mycp]:
4644 if mycpv in portdb.xmatch("match-all", x):
4646 if unmaskdict.has_key(mycp):
4647 for z in unmaskdict[mycp]:
4648 if mycpv in portdb.xmatch("match-all",z):
4652 rValue.append("package.mask")
4656 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4658 # The "depend" phase apparently failed for some reason. An associated
4659 # error message will have already been printed to stderr.
4660 return ["corruption"]
4661 if not eapi_is_supported(eapi):
4662 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4663 mygroups = mygroups.split()
4664 pgroups = settings["ACCEPT_KEYWORDS"].split()
4665 myarch = settings["ARCH"]
4666 if pgroups and myarch not in pgroups:
4667 """For operating systems other than Linux, ARCH is not necessarily a
4669 myarch = pgroups[0].lstrip("~")
4670 pkgdict = settings.pkeywordsdict
4672 cp = dep_getkey(mycpv)
4673 if pkgdict.has_key(cp):
4674 matches = match_to_list(mycpv, pkgdict[cp].keys())
4675 for match in matches:
4676 pgroups.extend(pkgdict[cp][match])
4680 if x != "-*" and x.startswith("-"):
4682 inc_pgroups.remove(x[1:])
4685 if x not in inc_pgroups:
4686 inc_pgroups.append(x)
4687 pgroups = inc_pgroups
4692 for keyword in pgroups:
4693 if keyword in mygroups:
4702 elif gp=="-"+myarch:
4705 elif gp=="~"+myarch:
4710 rValue.append(kmask+" keyword")
4714 def __init__(self, root="/", virtual=None, clone=None, settings=None):
4717 writemsg("portagetree.__init__(): deprecated " + \
4718 "use of clone parameter\n", noiselevel=-1)
4719 self.root=clone.root
4720 self.portroot=clone.portroot
4721 self.pkglines=clone.pkglines
4724 if settings is None:
4725 settings = globals()["settings"]
4726 self.settings = settings
4727 self.portroot=settings["PORTDIR"]
4728 self.virtual=virtual
4729 self.dbapi = portdbapi(
4730 settings["PORTDIR"], mysettings=settings)
4732 def dep_bestmatch(self,mydep):
4733 "compatibility method"
4734 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4739 def dep_match(self,mydep):
4740 "compatibility method"
4741 mymatch=self.dbapi.xmatch("match-visible",mydep)
4746 def exists_specific(self,cpv):
4747 return self.dbapi.cpv_exists(cpv)
4749 def getallnodes(self):
4750 """new behavior: these are all *unmasked* nodes. There may or may not be available
4751 masked package for nodes in this nodes list."""
4752 return self.dbapi.cp_all()
4754 def getname(self,pkgname):
4755 "returns file location for this particular package (DEPRECATED)"
4758 mysplit=pkgname.split("/")
4759 psplit=pkgsplit(mysplit[1])
4760 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4762 def resolve_specific(self,myspec):
4763 cps=catpkgsplit(myspec)
4766 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4767 settings=self.settings)
4768 mykey=mykey+"-"+cps[2]
4770 mykey=mykey+"-"+cps[3]
4773 def depcheck(self,mycheck,use="yes",myusesplit=None):
4774 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4776 def getslot(self,mycatpkg):
4777 "Get a slot for a catpkg; assume it exists."
4780 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4781 except SystemExit, e:
4783 except Exception, e:
4792 def close_caches(self):
4795 def cp_list(self,cp,use_cache=1):
4800 for cp in self.cp_all():
4801 cpv_list.extend(self.cp_list(cp))
4804 def aux_get(self,mycpv,mylist):
4805 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4806 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4807 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4808 raise NotImplementedError
4810 def match(self,origdep,use_cache=1):
4811 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4812 mykey=dep_getkey(mydep)
4813 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4814 myslot = portage_dep.dep_getslot(mydep)
4815 if myslot is not None:
4816 mylist = [cpv for cpv in mylist \
4817 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4820 def match2(self,mydep,mykey,mylist):
4821 writemsg("DEPRECATED: dbapi.match2\n")
4822 match_from_list(mydep,mylist)
4824 def invalidentry(self, mypath):
4825 if re.search("portage_lockfile$",mypath):
4826 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4827 writemsg("Lockfile removed: %s\n" % mypath, 1)
4828 portage_locks.unlockfile((mypath,None,None))
4830 # Nothing we can do about it. We're probably sandboxed.
4832 elif re.search(".*/-MERGING-(.*)",mypath):
4833 if os.path.exists(mypath):
4834 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4836 writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4840 class fakedbapi(dbapi):
4841 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4842 def __init__(self, settings=None):
4845 if settings is None:
4846 settings = globals()["settings"]
4847 self.settings = settings
4848 self._match_cache = {}
4850 def _clear_cache(self):
4851 if self._match_cache:
4852 self._match_cache = {}
4854 def match(self, origdep, use_cache=1):
4855 result = self._match_cache.get(origdep, None)
4856 if result is not None:
4858 result = dbapi.match(self, origdep, use_cache=use_cache)
4859 self._match_cache[origdep] = result
4862 def cpv_exists(self,mycpv):
4863 return self.cpvdict.has_key(mycpv)
4865 def cp_list(self,mycp,use_cache=1):
4866 if not self.cpdict.has_key(mycp):
4869 return self.cpdict[mycp]
4873 for x in self.cpdict.keys():
4874 returnme.extend(self.cpdict[x])
4878 return self.cpvdict.keys()
4880 def cpv_inject(self, mycpv, metadata=None):
4881 """Adds a cpv from the list of available packages."""
4883 mycp=cpv_getkey(mycpv)
4884 self.cpvdict[mycpv] = metadata
4887 myslot = metadata.get("SLOT", None)
4888 if myslot and mycp in self.cpdict:
4889 # If necessary, remove another package in the same SLOT.
4890 for cpv in self.cpdict[mycp]:
4892 other_metadata = self.cpvdict[cpv]
4894 if myslot == other_metadata.get("SLOT", None):
4895 self.cpv_remove(cpv)
4897 if mycp not in self.cpdict:
4898 self.cpdict[mycp] = []
4899 if not mycpv in self.cpdict[mycp]:
4900 self.cpdict[mycp].append(mycpv)
4902 def cpv_remove(self,mycpv):
4903 """Removes a cpv from the list of available packages."""
4905 mycp=cpv_getkey(mycpv)
4906 if self.cpvdict.has_key(mycpv):
4907 del self.cpvdict[mycpv]
4908 if not self.cpdict.has_key(mycp):
4910 while mycpv in self.cpdict[mycp]:
4911 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4912 if not len(self.cpdict[mycp]):
4913 del self.cpdict[mycp]
4915 def aux_get(self, mycpv, wants):
4916 if not self.cpv_exists(mycpv):
4917 raise KeyError(mycpv)
4918 metadata = self.cpvdict[mycpv]
4920 return ["" for x in wants]
4921 return [metadata.get(x, "") for x in wants]
4923 def aux_update(self, cpv, values):
4925 self.cpvdict[cpv].update(values)
4927 class bindbapi(fakedbapi):
4928 def __init__(self, mybintree=None, settings=None):
4929 self.bintree = mybintree
4932 if settings is None:
4933 settings = globals()["settings"]
4934 self.settings = settings
4935 self._match_cache = {}
4936 # Selectively cache metadata in order to optimize dep matching.
4937 self._aux_cache_keys = set(["SLOT"])
4938 self._aux_cache = {}
4940 def match(self, *pargs, **kwargs):
4941 if self.bintree and not self.bintree.populated:
4942 self.bintree.populate()
4943 return fakedbapi.match(self, *pargs, **kwargs)
4945 def aux_get(self,mycpv,wants):
4946 if self.bintree and not self.bintree.populated:
4947 self.bintree.populate()
4949 if not set(wants).difference(self._aux_cache_keys):
4950 aux_cache = self._aux_cache.get(mycpv)
4951 if aux_cache is not None:
4952 return [aux_cache[x] for x in wants]
4954 mysplit = mycpv.split("/")
4956 tbz2name = mysplit[1]+".tbz2"
4957 if self.bintree and not self.bintree.isremote(mycpv):
4958 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4959 getitem = tbz2.getfile
4961 getitem = self.bintree.remotepkgs[tbz2name].get
4965 mykeys = self._aux_cache_keys.union(wants)
4968 # myval is None if the key doesn't exist
4969 # or the tbz2 is corrupt.
4971 mydata[x] = " ".join(myval.split())
4972 if "EAPI" in mykeys:
4973 if not mydata.setdefault("EAPI", "0"):
4974 mydata["EAPI"] = "0"
4977 for x in self._aux_cache_keys:
4978 aux_cache[x] = mydata.get(x, "")
4979 self._aux_cache[mycpv] = aux_cache
4980 return [mydata.get(x, "") for x in wants]
4982 def aux_update(self, cpv, values):
4983 if not self.bintree.populated:
4984 self.bintree.populate()
4985 tbz2path = self.bintree.getname(cpv)
4986 if not os.path.exists(tbz2path):
4988 mytbz2 = xpak.tbz2(tbz2path)
4989 mydata = mytbz2.get_data()
4990 mydata.update(values)
4991 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4993 def cp_list(self, *pargs, **kwargs):
4994 if not self.bintree.populated:
4995 self.bintree.populate()
4996 return fakedbapi.cp_list(self, *pargs, **kwargs)
4999 if not self.bintree.populated:
5000 self.bintree.populate()
5001 return fakedbapi.cpv_all(self)
5003 class vardbapi(dbapi):
5004 def __init__(self, root, categories=None, settings=None, vartree=None):
5006 #cache for category directory mtimes
5007 self.mtdircache = {}
5008 #cache for dependency checks
5009 self.matchcache = {}
5010 #cache for cp_list results
5012 self.blockers = None
5013 if settings is None:
5014 settings = globals()["settings"]
5015 self.settings = settings
5016 if categories is None:
5017 categories = settings.categories
5018 self.categories = categories[:]
5020 vartree = globals()["db"][root]["vartree"]
5021 self.vartree = vartree
5022 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
5023 "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
5024 self._aux_cache = None
5025 self._aux_cache_version = "1"
5026 self._aux_cache_filename = os.path.join(self.root,
5027 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
5029 def cpv_exists(self,mykey):
5030 "Tells us whether an actual ebuild exists on disk (no masking)"
5031 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
5033 def cpv_counter(self,mycpv):
5034 "This method will grab the COUNTER. Returns a counter value."
5036 return long(self.aux_get(mycpv, ["COUNTER"])[0])
5037 except (KeyError, ValueError):
5039 cdir=self.root+VDB_PATH+"/"+mycpv
5040 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
5042 # We write our new counter value to a new file that gets moved into
5043 # place to avoid filesystem corruption on XFS (unexpected reboot.)
5045 if os.path.exists(cpath):
5046 cfile=open(cpath, "r")
5048 counter=long(cfile.readline())
5050 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
5054 elif os.path.exists(cdir):
5055 mys = pkgsplit(mycpv)
5056 myl = self.match(mys[0],use_cache=0)
5060 # Only one package... Counter doesn't matter.
5061 write_atomic(cpath, "1")
5063 except SystemExit, e:
5065 except Exception, e:
5066 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5068 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5070 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
5071 writemsg("!!! %s\n" % e, noiselevel=-1)
5074 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5076 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
5078 writemsg("!!! remerge the package.\n", noiselevel=-1)
5083 # update new global counter file
5084 write_atomic(cpath, str(counter))
5087 def cpv_inject(self,mycpv):
5088 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
5089 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
5090 counter = self.counter_tick(self.root, mycpv=mycpv)
5091 # write local package counter so that emerge clean does the right thing
5092 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
5094 def isInjected(self,mycpv):
5095 if self.cpv_exists(mycpv):
5096 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
5098 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
5102 def move_ent(self,mylist):
5107 for cp in [origcp,newcp]:
5108 if not (isvalidatom(cp) and isjustname(cp)):
5109 raise portage_exception.InvalidPackageName(cp)
5110 origmatches=self.match(origcp,use_cache=0)
5113 for mycpv in origmatches:
5114 mycpsplit=catpkgsplit(mycpv)
5115 mynewcpv=newcp+"-"+mycpsplit[2]
5116 mynewcat=newcp.split("/")[0]
5117 if mycpsplit[3]!="r0":
5118 mynewcpv += "-"+mycpsplit[3]
5119 mycpsplit_new = catpkgsplit(mynewcpv)
5120 origpath=self.root+VDB_PATH+"/"+mycpv
5121 if not os.path.exists(origpath):
5123 writemsg_stdout("@")
5124 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
5125 #create the directory
5126 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
5127 newpath=self.root+VDB_PATH+"/"+mynewcpv
5128 if os.path.exists(newpath):
5129 #dest already exists; keep this puppy where it is.
5131 os.rename(origpath, newpath)
5133 # We need to rename the ebuild now.
5134 old_pf = catsplit(mycpv)[1]
5135 new_pf = catsplit(mynewcpv)[1]
5136 if new_pf != old_pf:
5138 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
5139 os.path.join(newpath, new_pf + ".ebuild"))
5141 if e.errno != errno.ENOENT:
5144 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
5146 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
5147 fixdbentries([mylist], newpath)
5149 def update_ents(self, update_iter):
5150 """Run fixdbentries on all installed packages (time consuming). Like
5151 fixpackages, this should be run from a helper script and display
5152 a progress indicator."""
5153 dbdir = os.path.join(self.root, VDB_PATH)
5154 for catdir in listdir(dbdir):
5155 catdir = dbdir+"/"+catdir
5156 if os.path.isdir(catdir):
5157 for pkgdir in listdir(catdir):
5158 pkgdir = catdir+"/"+pkgdir
5159 if os.path.isdir(pkgdir):
5160 fixdbentries(update_iter, pkgdir)
5162 def move_slot_ent(self,mylist):
5167 if not isvalidatom(pkg):
5168 raise portage_exception.InvalidAtom(pkg)
5170 origmatches=self.match(pkg,use_cache=0)
5174 for mycpv in origmatches:
5175 origpath=self.root+VDB_PATH+"/"+mycpv
5176 if not os.path.exists(origpath):
5179 slot=grabfile(origpath+"/SLOT");
5183 if (slot[0]!=origslot):
5186 writemsg_stdout("s")
5187 write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
5189 def cp_list(self,mycp,use_cache=1):
5190 mysplit=mycp.split("/")
5191 if mysplit[0] == '*':
5192 mysplit[0] = mysplit[0][1:]
5194 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5197 if use_cache and self.cpcache.has_key(mycp):
5198 cpc=self.cpcache[mycp]
5201 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5207 if x.startswith("."):
5210 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5214 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5216 if len(mysplit) > 1:
5217 if ps[0]==mysplit[1]:
5218 returnme.append(mysplit[0]+"/"+x)
5220 self.cpcache[mycp]=[mystat,returnme]
5221 elif self.cpcache.has_key(mycp):
5222 del self.cpcache[mycp]
5225 def cpv_all(self,use_cache=1):
5227 basepath = self.root+VDB_PATH+"/"
5229 for x in self.categories:
5230 for y in listdir(basepath+x,EmptyOnError=1):
5231 if y.startswith("."):
5234 # -MERGING- should never be a cpv, nor should files.
5235 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5236 returnme += [subpath]
5239 def cp_all(self,use_cache=1):
5240 mylist = self.cpv_all(use_cache=use_cache)
5245 mysplit=catpkgsplit(y)
5247 self.invalidentry(self.root+VDB_PATH+"/"+y)
5249 d[mysplit[0]+"/"+mysplit[1]] = None
5252 def checkblockers(self,origdep):
5255 def match(self,origdep,use_cache=1):
5256 "caching match function"
5258 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5259 mykey=dep_getkey(mydep)
5260 mycat=mykey.split("/")[0]
5262 if self.matchcache.has_key(mycat):
5263 del self.mtdircache[mycat]
5264 del self.matchcache[mycat]
5265 mymatch = match_from_list(mydep,
5266 self.cp_list(mykey, use_cache=use_cache))
5267 myslot = portage_dep.dep_getslot(mydep)
5268 if myslot is not None:
5269 mymatch = [cpv for cpv in mymatch \
5270 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5273 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5274 except (IOError, OSError):
5277 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5279 self.mtdircache[mycat]=curmtime
5280 self.matchcache[mycat]={}
5281 if not self.matchcache[mycat].has_key(mydep):
5282 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5283 myslot = portage_dep.dep_getslot(mydep)
5284 if myslot is not None:
5285 mymatch = [cpv for cpv in mymatch \
5286 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5287 self.matchcache[mycat][mydep]=mymatch
5288 return self.matchcache[mycat][mydep][:]
5290 def findname(self, mycpv):
5291 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5293 def flush_cache(self):
5294 """If the current user has permission and the internal aux_get cache has
5295 been updated, save it to disk and mark it unmodified. This is called
5296 by emerge after it has loaded the full vdb for use in dependency
5297 calculations. Currently, the cache is only written if the user has
5298 superuser privileges (since that's required to obtain a lock), but all
5299 users have read access and benefit from faster metadata lookups (as
5300 long as at least part of the cache is still valid)."""
5301 if self._aux_cache is not None and \
5302 self._aux_cache["modified"] and \
5304 valid_nodes = set(self.cpv_all())
5305 for cpv in self._aux_cache["packages"].keys():
5306 if cpv not in valid_nodes:
5307 del self._aux_cache["packages"][cpv]
5308 del self._aux_cache["modified"]
5310 f = atomic_ofstream(self._aux_cache_filename)
5311 cPickle.dump(self._aux_cache, f, -1)
5313 portage_util.apply_secpass_permissions(
5314 self._aux_cache_filename, gid=portage_gid, mode=0644)
5315 except (IOError, OSError), e:
5317 self._aux_cache["modified"] = False
5319 def aux_get(self, mycpv, wants):
5320 """This automatically caches selected keys that are frequently needed
5321 by emerge for dependency calculations. The cached metadata is
5322 considered valid if the mtime of the package directory has not changed
5323 since the data was cached. The cache is stored in a pickled dict
5324 object with the following format:
5326 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5328 If an error occurs while loading the cache pickle or the version is
5329 unrecognized, the cache will simple be recreated from scratch (it is
5330 completely disposable).
5332 if not self._aux_cache_keys.intersection(wants):
5333 return self._aux_get(mycpv, wants)
5334 if self._aux_cache is None:
5336 f = open(self._aux_cache_filename)
5337 mypickle = cPickle.Unpickler(f)
5338 mypickle.find_global = None
5339 self._aux_cache = mypickle.load()
5342 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5344 if not self._aux_cache or \
5345 not isinstance(self._aux_cache, dict) or \
5346 self._aux_cache.get("version") != self._aux_cache_version or \
5347 not self._aux_cache.get("packages"):
5348 self._aux_cache = {"version":self._aux_cache_version}
5349 self._aux_cache["packages"] = {}
5350 self._aux_cache["modified"] = False
5351 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5354 mydir_stat = os.stat(mydir)
5356 if e.errno != errno.ENOENT:
5358 raise KeyError(mycpv)
5359 mydir_mtime = long(mydir_stat.st_mtime)
5360 pkg_data = self._aux_cache["packages"].get(mycpv)
5364 cache_mtime, metadata = pkg_data
5365 cache_valid = cache_mtime == mydir_mtime
5367 cache_incomplete = self._aux_cache_keys.difference(metadata)
5368 if cache_incomplete:
5369 # Allow self._aux_cache_keys to change without a cache version
5370 # bump and efficiently recycle partial cache whenever possible.
5372 pull_me = cache_incomplete.union(wants)
5374 pull_me = set(wants).difference(self._aux_cache_keys)
5375 mydata.update(metadata)
5377 pull_me = self._aux_cache_keys.union(wants)
5379 # pull any needed data and cache it
5380 aux_keys = list(pull_me)
5381 for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5385 for aux_key in self._aux_cache_keys:
5386 cache_data[aux_key] = mydata[aux_key]
5387 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5388 self._aux_cache["modified"] = True
5389 return [mydata[x] for x in wants]
5391 def _aux_get(self, mycpv, wants):
5392 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5394 if not stat.S_ISDIR(os.stat(mydir).st_mode):
5395 raise KeyError(mycpv)
5397 if e.errno == errno.ENOENT:
5398 raise KeyError(mycpv)
5404 myf = open(os.path.join(mydir, x), "r")
5409 myd = " ".join(myd.split())
5412 if x == "EAPI" and not myd:
5418 def aux_update(self, cpv, values):
5419 cat, pkg = cpv.split("/")
5420 mylink = dblink(cat, pkg, self.root, self.settings,
5421 treetype="vartree", vartree=self.vartree)
5422 if not mylink.exists():
5424 for k, v in values.iteritems():
5425 mylink.setfile(k, v)
5427 def counter_tick(self,myroot,mycpv=None):
5428 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5430 def get_counter_tick_core(self,myroot,mycpv=None):
5431 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5433 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5434 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
5435 cpath=myroot+"var/cache/edb/counter"
5439 mysplit = pkgsplit(mycpv)
5440 for x in self.match(mysplit[0],use_cache=0):
5444 old_counter = long(self.aux_get(x,["COUNTER"])[0])
5445 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5446 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5448 writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5449 if old_counter > min_counter:
5450 min_counter = old_counter
5452 # We write our new counter value to a new file that gets moved into
5453 # place to avoid filesystem corruption.
5454 find_counter = ("find '%s' -type f -name COUNTER | " + \
5455 "while read f; do echo $(<\"${f}\"); done | " + \
5456 "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5457 if os.path.exists(cpath):
5458 cfile=open(cpath, "r")
5460 counter=long(cfile.readline())
5461 except (ValueError,OverflowError):
5463 counter = long(commands.getoutput(find_counter).strip())
5464 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5467 except (ValueError,OverflowError):
5468 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5470 writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5472 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5477 counter = long(commands.getoutput(find_counter).strip())
5478 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5480 except ValueError: # Value Error for long(), probably others for commands.getoutput
5481 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5485 if counter < min_counter:
5486 counter = min_counter+1000
5489 if incrementing or changed:
5493 # update new global counter file
5494 write_atomic(cpath, str(counter))
5497 class vartree(object):
5498 "this tree will scan a var/db/pkg database located at root (passed to init)"
5499 def __init__(self, root="/", virtual=None, clone=None, categories=None,
5502 writemsg("vartree.__init__(): deprecated " + \
5503 "use of clone parameter\n", noiselevel=-1)
5504 self.root = clone.root[:]
5505 self.dbapi = copy.deepcopy(clone.dbapi)
5507 self.settings = config(clone=clone.settings)
5510 if settings is None:
5511 settings = globals()["settings"]
5512 self.settings = settings # for key_expand calls
5513 if categories is None:
5514 categories = settings.categories
5515 self.dbapi = vardbapi(self.root, categories=categories,
5516 settings=settings, vartree=self)
5519 def zap(self,mycpv):
5522 def inject(self,mycpv):
5525 def get_provide(self,mycpv):
5529 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5531 myuse = myuse.split()
5532 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5533 for myprovide in mylines:
5534 mys = catpkgsplit(myprovide)
5536 mys = myprovide.split("/")
5537 myprovides += [mys[0] + "/" + mys[1]]
5539 except SystemExit, e:
5541 except Exception, e:
5542 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5543 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5546 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5548 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5551 def get_all_provides(self):
5553 for node in self.getallcpv():
5554 for mykey in self.get_provide(node):
5555 if myprovides.has_key(mykey):
5556 myprovides[mykey] += [node]
5558 myprovides[mykey] = [node]
5561 def dep_bestmatch(self,mydep,use_cache=1):
5562 "compatibility method -- all matches, not just visible ones"
5563 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5564 mymatch = best(self.dbapi.match(
5565 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5566 use_cache=use_cache))
5572 def dep_match(self,mydep,use_cache=1):
5573 "compatibility method -- we want to see all matches, not just visible ones"
5574 #mymatch=match(mydep,self.dbapi)
5575 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5581 def exists_specific(self,cpv):
5582 return self.dbapi.cpv_exists(cpv)
5584 def getallcpv(self):
5585 """temporary function, probably to be renamed --- Gets a list of all
5586 category/package-versions installed on the system."""
5587 return self.dbapi.cpv_all()
5589 def getallnodes(self):
5590 """new behavior: these are all *unmasked* nodes. There may or may not be available
5591 masked package for nodes in this nodes list."""
5592 return self.dbapi.cp_all()
5594 def exists_specific_cat(self,cpv,use_cache=1):
5595 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5596 settings=self.settings)
5600 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5604 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5610 def getebuildpath(self,fullpackage):
5611 cat,package=fullpackage.split("/")
5612 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5614 def getnode(self,mykey,use_cache=1):
5615 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5616 settings=self.settings)
5619 mysplit=mykey.split("/")
5620 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5623 mypsplit=pkgsplit(x)
5625 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5627 if mypsplit[0]==mysplit[1]:
5628 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5629 returnme.append(appendme)
5633 def getslot(self,mycatpkg):
5634 "Get a slot for a catpkg; assume it exists."
5636 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5640 def hasnode(self,mykey,use_cache):
5641 """Does the particular node (cat/pkg key) exist?"""
5642 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5643 settings=self.settings)
5644 mysplit=mykey.split("/")
5645 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5647 mypsplit=pkgsplit(x)
5649 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5651 if mypsplit[0]==mysplit[1]:
5659 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
5660 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
5661 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
5662 'PDEPEND', 'PROVIDE', 'EAPI',
5663 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5664 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5666 auxdbkeylen=len(auxdbkeys)
5668 def close_portdbapi_caches():
5669 for i in portdbapi.portdbapi_instances:
5673 class portdbapi(dbapi):
5674 """this tree will scan a portage directory located at root (passed to init)"""
5675 portdbapi_instances = []
5677 def __init__(self,porttree_root,mysettings=None):
5678 portdbapi.portdbapi_instances.append(self)
5681 self.mysettings = mysettings
5684 self.mysettings = config(clone=settings)
5685 self._categories = set(self.mysettings.categories)
5686 # This is strictly for use in aux_get() doebuild calls when metadata
5687 # is generated by the depend phase. It's safest to use a clone for
5688 # this purpose because doebuild makes many changes to the config
5689 # instance that is passed in.
5690 self.doebuild_settings = config(clone=self.mysettings)
5692 self.manifestVerifyLevel = None
5693 self.manifestVerifier = None
5694 self.manifestCache = {} # {location: [stat, md5]}
5695 self.manifestMissingCache = []
5697 if "gpg" in self.mysettings.features:
5698 self.manifestVerifyLevel = portage_gpg.EXISTS
5699 if "strict" in self.mysettings.features:
5700 self.manifestVerifyLevel = portage_gpg.MARGINAL
5701 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5702 elif "severe" in self.mysettings.features:
5703 self.manifestVerifyLevel = portage_gpg.TRUSTED
5704 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5706 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5708 #self.root=settings["PORTDIR"]
5709 self.porttree_root = os.path.realpath(porttree_root)
5711 self.depcachedir = self.mysettings.depcachedir[:]
5713 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5714 if self.tmpfs and not os.path.exists(self.tmpfs):
5716 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5718 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5721 self.eclassdb = eclass_cache.cache(self.porttree_root,
5722 overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5724 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5726 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5730 self.porttrees = [self.porttree_root] + \
5731 [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5732 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5734 self._init_cache_dirs()
5735 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5737 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5739 from cache import metadata_overlay, volatile
5740 for x in self.porttrees:
5741 db_ro = self.auxdbmodule(self.depcachedir, x,
5742 filtered_auxdbkeys, gid=portage_gid, readonly=True)
5743 self.auxdb[x] = metadata_overlay.database(
5744 self.depcachedir, x, filtered_auxdbkeys,
5745 gid=portage_gid, db_rw=volatile.database,
5748 for x in self.porttrees:
5749 # location, label, auxdbkeys
5750 self.auxdb[x] = self.auxdbmodule(
5751 self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5752 # Selectively cache metadata in order to optimize dep matching.
5753 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5754 self._aux_cache = {}
5756 def _init_cache_dirs(self):
5757 """Create /var/cache/edb/dep and adjust permissions for the portage
5765 for mydir in (self.depcachedir,):
5766 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5767 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5770 raise # bail out on the first error that occurs during recursion
5771 if not apply_recursive_permissions(mydir,
5772 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5773 filemode=filemode, filemask=modemask, onerror=onerror):
5774 raise portage_exception.OperationNotPermitted(
5775 "Failed to apply recursive permissions for the portage group.")
5776 except portage_exception.PortageException, e:
5779 def close_caches(self):
5780 for x in self.auxdb.keys():
5781 self.auxdb[x].sync()
5784 def flush_cache(self):
5785 for x in self.auxdb.values():
5788 def finddigest(self,mycpv):
5790 mydig = self.findname2(mycpv)[0]
5793 mydigs = mydig.split("/")[:-1]
5794 mydig = "/".join(mydigs)
5795 mysplit = mycpv.split("/")
5798 return mydig+"/files/digest-"+mysplit[-1]
5800 def findname(self,mycpv):
5801 return self.findname2(mycpv)[0]
5803 def findname2(self, mycpv, mytree=None):
5805 Returns the location of the CPV, and what overlay it was in.
5806 Searches overlays first, then PORTDIR; this allows us to return the first
5807 matching file. As opposed to starting in portdir and then doing overlays
5808 second, we would have to exhaustively search the overlays until we found
5813 mysplit=mycpv.split("/")
5814 psplit=pkgsplit(mysplit[1])
5819 mytrees = self.porttrees[:]
5823 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5824 if os.access(file, os.R_OK):
5828 def aux_get(self, mycpv, mylist, mytree=None):
5829 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5830 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5831 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5833 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5834 aux_cache = self._aux_cache.get(mycpv)
5835 if aux_cache is not None:
5836 return [aux_cache[x] for x in mylist]
5838 global auxdbkeys,auxdbkeylen
5839 cat,pkg = mycpv.split("/", 1)
5841 myebuild, mylocation = self.findname2(mycpv, mytree)
5844 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5846 writemsg("!!! %s\n" % myebuild, noiselevel=1)
5847 raise KeyError(mycpv)
5849 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5850 if "gpg" in self.mysettings.features:
5852 mys = portage_gpg.fileStats(myManifestPath)
5853 if (myManifestPath in self.manifestCache) and \
5854 (self.manifestCache[myManifestPath] == mys):
5856 elif self.manifestVerifier:
5857 if not self.manifestVerifier.verify(myManifestPath):
5858 # Verification failed the desired level.
5859 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5861 if ("severe" in self.mysettings.features) and \
5862 (mys != portage_gpg.fileStats(myManifestPath)):
5863 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5865 except portage_exception.InvalidSignature, e:
5866 if ("strict" in self.mysettings.features) or \
5867 ("severe" in self.mysettings.features):
5869 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5870 except portage_exception.MissingSignature, e:
5871 if ("severe" in self.mysettings.features):
5873 if ("strict" in self.mysettings.features):
5874 if myManifestPath not in self.manifestMissingCache:
5875 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5876 self.manifestMissingCache.insert(0,myManifestPath)
5877 except (OSError,portage_exception.FileNotFound), e:
5878 if ("strict" in self.mysettings.features) or \
5879 ("severe" in self.mysettings.features):
5880 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5881 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5885 if os.access(myebuild, os.R_OK):
5886 emtime=os.stat(myebuild)[stat.ST_MTIME]
5888 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5890 writemsg("!!! %s\n" % myebuild,
5895 mydata = self.auxdb[mylocation][mycpv]
5896 if emtime != long(mydata.get("_mtime_", 0)):
5898 elif len(mydata.get("_eclasses_", [])) > 0:
5899 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5907 try: del self.auxdb[mylocation][mycpv]
5908 except KeyError: pass
5910 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5913 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5914 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5916 self.doebuild_settings.reset()
5918 myret = doebuild(myebuild, "depend",
5919 self.doebuild_settings["ROOT"], self.doebuild_settings,
5920 dbkey=mydata, tree="porttree", mydbapi=self)
5921 if myret != os.EX_OK:
5922 raise KeyError(mycpv)
5924 if "EAPI" not in mydata or not mydata["EAPI"].strip():
5925 mydata["EAPI"] = "0"
5927 if not eapi_is_supported(mydata["EAPI"]):
5928 # if newer version, wipe everything and negate eapi
5929 eapi = mydata["EAPI"]
5931 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5932 mydata["EAPI"] = "-"+eapi
5934 if mydata.get("INHERITED", False):
5935 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5937 mydata["_eclasses_"] = {}
5939 del mydata["INHERITED"]
5941 mydata["_mtime_"] = emtime
5943 self.auxdb[mylocation][mycpv] = mydata
5945 if not mydata.setdefault("EAPI", "0"):
5946 mydata["EAPI"] = "0"
5948 #finally, we look at our internal cache entry and return the requested data.
5951 if x == "INHERITED":
5952 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5954 returnme.append(mydata.get(x,""))
5958 for x in self._aux_cache_keys:
5959 aux_cache[x] = mydata.get(x, "")
5960 self._aux_cache[mycpv] = aux_cache
5964 def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5965 if mysettings is None:
5966 mysettings = self.mysettings
5968 myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5970 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5973 if useflags is None:
5974 useflags = mysettings["USE"].split()
5976 myurilist = portage_dep.paren_reduce(myuris)
5977 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5978 newuris = flatten(myurilist)
5982 mya = os.path.basename(x)
5983 if not mya in myfiles:
5985 return [newuris, myfiles]
5987 def getfetchsizes(self,mypkg,useflags=None,debug=0):
5988 # returns a filename:size dictionnary of remaining downloads
5989 myebuild = self.findname(mypkg)
5990 pkgdir = os.path.dirname(myebuild)
5991 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5992 checksums = mf.getDigests()
5994 if debug: print "[empty/missing/bad digest]: "+mypkg
5997 if useflags is None:
5998 myuris, myfiles = self.getfetchlist(mypkg,all=1)
6000 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
6001 #XXX: maybe this should be improved: take partial downloads
6002 # into account? check checksums?
6003 for myfile in myfiles:
6004 if myfile not in checksums:
6006 writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
6008 file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
6011 mystat = os.stat(file_path)
6017 existing_size = mystat.st_size
6018 remaining_size = int(checksums[myfile]["size"]) - existing_size
6019 if remaining_size > 0:
6020 # Assume the download is resumable.
6021 filesdict[myfile] = remaining_size
6022 elif remaining_size < 0:
6023 # The existing file is too large and therefore corrupt.
6024 filesdict[myfile] = int(checksums[myfile]["size"])
6027 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
6030 useflags = mysettings["USE"].split()
6031 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
6032 myebuild = self.findname(mypkg)
6033 pkgdir = os.path.dirname(myebuild)
6034 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
6035 mysums = mf.getDigests()
6039 if not mysums or x not in mysums:
6041 reason = "digest missing"
6044 ok, reason = portage_checksum.verify_all(
6045 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
6046 except portage_exception.FileNotFound, e:
6048 reason = "File Not Found: '%s'" % str(e)
6050 failures[x] = reason
6055 def cpv_exists(self,mykey):
6056 "Tells us whether an actual ebuild exists on disk (no masking)"
6057 cps2=mykey.split("/")
6058 cps=catpkgsplit(mykey,silent=0)
6062 if self.findname(cps[0]+"/"+cps2[1]):
6068 "returns a list of all keys in our tree"
6070 for x in self.mysettings.categories:
6071 for oroot in self.porttrees:
6072 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
6078 def p_list(self,mycp):
6080 for oroot in self.porttrees:
6081 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6082 if x[-7:]==".ebuild":
6086 def cp_list(self, mycp, use_cache=1, mytree=None):
6087 mysplit=mycp.split("/")
6088 invalid_category = mysplit[0] not in self._categories
6093 mytrees = self.porttrees
6094 for oroot in mytrees:
6095 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6096 if x.endswith(".ebuild"):
6100 writemsg("\nInvalid ebuild name: %s\n" % \
6101 os.path.join(oroot, mycp, x), noiselevel=-1)
6103 d[mysplit[0]+"/"+pf] = None
6104 if invalid_category and d:
6105 writemsg(("\n!!! '%s' has a category that is not listed in " + \
6106 "/etc/portage/categories\n") % mycp, noiselevel=-1)
6111 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
6119 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
6120 "caching match function; very trick stuff"
6121 #if no updates are being made to the tree, we can consult our xcache...
6124 return self.xcache[level][origdep][:]
6129 #this stuff only runs on first call of xmatch()
6130 #create mydep, mykey from origdep
6131 mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
6132 mykey=dep_getkey(mydep)
6134 if level=="list-visible":
6135 #a list of all visible packages, not called directly (just by xmatch())
6136 #myval=self.visible(self.cp_list(mykey))
6137 myval=self.gvisible(self.visible(self.cp_list(mykey)))
6138 elif level=="bestmatch-visible":
6139 #dep match -- best match of all visible packages
6140 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
6141 #get all visible matches (from xmatch()), then choose the best one
6142 elif level=="bestmatch-list":
6143 #dep match -- find best match but restrict search to sublist
6144 myval=best(match_from_list(mydep,mylist))
6145 #no point is calling xmatch again since we're not caching list deps
6146 elif level=="match-list":
6147 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
6148 myval=match_from_list(mydep,mylist)
6149 elif level=="match-visible":
6150 #dep match -- find all visible matches
6151 myval = match_from_list(mydep,
6152 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
6153 #get all visible packages, then get the matching ones
6154 elif level=="match-all":
6155 #match *all* visible *and* masked packages
6156 myval=match_from_list(mydep,self.cp_list(mykey))
6158 print "ERROR: xmatch doesn't handle",level,"query!"
6160 myslot = portage_dep.dep_getslot(mydep)
6161 if myslot is not None:
6165 if self.aux_get(cpv, ["SLOT"])[0] == myslot:
6166 slotmatches.append(cpv)
6168 pass # ebuild masked by corruption
6170 if self.frozen and (level not in ["match-list","bestmatch-list"]):
6171 self.xcache[level][mydep]=myval
6172 if origdep and origdep != mydep:
6173 self.xcache[level][origdep] = myval
6176 def match(self,mydep,use_cache=1):
6177 return self.xmatch("match-visible",mydep)
6179 def visible(self,mylist):
6180 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
6181 packages file to remove invisible entries, returning remaining items. This function assumes
6182 that all entries in mylist have the same category and package name."""
6183 if (mylist is None) or (len(mylist)==0):
6186 #first, we mask out packages in the package.mask file
6188 cpv=catpkgsplit(mykey)
6191 print "visible(): invalid cat/pkg-v:",mykey
6193 mycp=cpv[0]+"/"+cpv[1]
6194 maskdict=self.mysettings.pmaskdict
6195 unmaskdict=self.mysettings.punmaskdict
6196 if maskdict.has_key(mycp):
6197 for x in maskdict[mycp]:
6198 mymatches=self.xmatch("match-all",x)
6199 if mymatches is None:
6200 #error in package.mask file; print warning and continue:
6201 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
6205 if unmaskdict.has_key(mycp):
6206 for z in unmaskdict[mycp]:
6207 mymatches_unmask=self.xmatch("match-all",z)
6208 if y in mymatches_unmask:
6217 revmaskdict=self.mysettings.prevmaskdict
6218 if revmaskdict.has_key(mycp):
6219 for x in revmaskdict[mycp]:
6220 #important: only match against the still-unmasked entries...
6221 #notice how we pass "newlist" to the xmatch() call below....
6222 #Without this, ~ deps in the packages files are broken.
6223 mymatches=self.xmatch("match-list",x,mylist=newlist)
6224 if mymatches is None:
6225 #error in packages file; print warning and continue:
6226 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6229 while pos<len(newlist):
6230 if newlist[pos] not in mymatches:
6236 def gvisible(self,mylist):
6237 "strip out group-masked (not in current group) entries"
6243 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6244 pkgdict = self.mysettings.pkeywordsdict
6245 for mycpv in mylist:
6247 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6250 except portage_exception.PortageException, e:
6251 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6252 mycpv, noiselevel=-1)
6253 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6256 mygroups=keys.split()
6257 # Repoman may modify this attribute as necessary.
6258 pgroups = accept_keywords[:]
6260 cp = dep_getkey(mycpv)
6261 if pkgdict.has_key(cp):
6262 matches = match_to_list(mycpv, pkgdict[cp].keys())
6263 for atom in matches:
6264 pgroups.extend(pkgdict[cp][atom])
6268 # The -* special case should be removed once the tree
6269 # is clean of KEYWORDS=-* crap
6270 if x != "-*" and x.startswith("-"):
6272 inc_pgroups.remove(x[1:])
6275 if x not in inc_pgroups:
6276 inc_pgroups.append(x)
6277 pgroups = inc_pgroups
6283 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6294 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
6296 if match and eapi_is_supported(eapi):
6297 newlist.append(mycpv)
6300 class binarytree(object):
6301 "this tree scans for a list of all packages available in PKGDIR"
6302 def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6304 writemsg("binarytree.__init__(): deprecated " + \
6305 "use of clone parameter\n", noiselevel=-1)
6306 # XXX This isn't cloning. It's an instance of the same thing.
6307 self.root=clone.root
6308 self.pkgdir=clone.pkgdir
6309 self.dbapi=clone.dbapi
6310 self.populated=clone.populated
6311 self.tree=clone.tree
6312 self.remotepkgs=clone.remotepkgs
6313 self.invalids=clone.invalids
6314 self.settings = clone.settings
6317 #self.pkgdir=settings["PKGDIR"]
6318 self.pkgdir = normalize_path(pkgdir)
6319 self.dbapi = bindbapi(self, settings=settings)
6324 self.settings = settings
6325 self._pkg_paths = {}
6327 def move_ent(self,mylist):
6328 if not self.populated:
6333 for cp in [origcp,newcp]:
6334 if not (isvalidatom(cp) and isjustname(cp)):
6335 raise portage_exception.InvalidPackageName(cp)
6336 origcat = origcp.split("/")[0]
6337 mynewcat=newcp.split("/")[0]
6338 origmatches=self.dbapi.cp_list(origcp)
6341 for mycpv in origmatches:
6343 mycpsplit=catpkgsplit(mycpv)
6344 mynewcpv=newcp+"-"+mycpsplit[2]
6345 if mycpsplit[3]!="r0":
6346 mynewcpv += "-"+mycpsplit[3]
6347 myoldpkg=mycpv.split("/")[1]
6348 mynewpkg=mynewcpv.split("/")[1]
6350 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6351 writemsg("!!! Cannot update binary: Destination exists.\n",
6353 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6356 tbz2path=self.getname(mycpv)
6357 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6358 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6362 #print ">>> Updating data in:",mycpv
6363 writemsg_stdout("%")
6364 mytbz2 = xpak.tbz2(tbz2path)
6365 mydata = mytbz2.get_data()
6366 updated_items = update_dbentries([mylist], mydata)
6367 mydata.update(updated_items)
6368 mydata["CATEGORY"] = mynewcat+"\n"
6369 if mynewpkg != myoldpkg:
6370 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6371 del mydata[myoldpkg+".ebuild"]
6372 mydata["PF"] = mynewpkg + "\n"
6373 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6375 self.dbapi.cpv_remove(mycpv)
6376 del self._pkg_paths[mycpv]
6377 new_path = self.getname(mynewcpv)
6378 self._pkg_paths[mynewcpv] = os.path.join(
6379 *new_path.split(os.path.sep)[-2:])
6380 if new_path != mytbz2:
6382 os.makedirs(os.path.dirname(new_path))
6384 if e.errno != errno.EEXIST:
6387 os.rename(tbz2path, new_path)
6388 self._remove_symlink(mycpv)
6389 if new_path.split(os.path.sep)[-2] == "All":
6390 self._create_symlink(mynewcpv)
6391 self.dbapi.cpv_inject(mynewcpv)
6395 def _remove_symlink(self, cpv):
6396 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6397 the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
6398 removed if os.path.islink() returns False."""
6399 mycat, mypkg = catsplit(cpv)
6400 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6401 if os.path.islink(mylink):
6402 """Only remove it if it's really a link so that this method never
6403 removes a real package that was placed here to avoid a collision."""
6406 os.rmdir(os.path.join(self.pkgdir, mycat))
6408 if e.errno not in (errno.ENOENT,
6409 errno.ENOTEMPTY, errno.EEXIST):
6413 def _create_symlink(self, cpv):
6414 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6415 ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
6416 exist in the location of the symlink will first be removed."""
6417 mycat, mypkg = catsplit(cpv)
6418 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6420 os.makedirs(os.path.dirname(full_path))
6422 if e.errno != errno.EEXIST:
6426 os.unlink(full_path)
6428 if e.errno != errno.ENOENT:
6431 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6433 def move_slot_ent(self, mylist):
6434 if not self.populated:
6440 if not isvalidatom(pkg):
6441 raise portage_exception.InvalidAtom(pkg)
6443 origmatches=self.dbapi.match(pkg)
6446 for mycpv in origmatches:
6447 mycpsplit=catpkgsplit(mycpv)
6448 myoldpkg=mycpv.split("/")[1]
6449 tbz2path=self.getname(mycpv)
6450 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6451 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6455 #print ">>> Updating data in:",mycpv
6456 mytbz2 = xpak.tbz2(tbz2path)
6457 mydata = mytbz2.get_data()
6459 slot = mydata["SLOT"]
6463 if (slot[0]!=origslot):
6466 writemsg_stdout("S")
6467 mydata["SLOT"] = newslot+"\n"
6468 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6471 def update_ents(self, update_iter):
6472 if len(update_iter) == 0:
6474 if not self.populated:
6477 for mycpv in self.dbapi.cp_all():
6478 tbz2path=self.getname(mycpv)
6479 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6480 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6483 #print ">>> Updating binary data:",mycpv
6484 writemsg_stdout("*")
6485 mytbz2 = xpak.tbz2(tbz2path)
6486 mydata = mytbz2.get_data()
6487 updated_items = update_dbentries(update_iter, mydata)
6488 if len(updated_items) > 0:
6489 mydata.update(updated_items)
6490 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6493 def prevent_collision(self, cpv):
6494 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6495 use for a given cpv. If a collision will occur with an existing
6496 package from another category, the existing package will be bumped to
6497 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6498 if not self.populated:
6499 # Try to avoid the population routine when possible, so that
6500 # FEATURES=buildpkg doesn't always force population.
6501 mycat, mypkg = catsplit(cpv)
6502 myfile = mypkg + ".tbz2"
6503 full_path = os.path.join(self.pkgdir, "All", myfile)
6504 if not os.path.exists(full_path):
6506 tbz2_cat = xpak.tbz2(full_path).getfile("CATEGORY")
6507 if tbz2_cat and tbz2_cat.strip() == mycat:
6509 full_path = self.getname(cpv)
6510 if "All" == full_path.split(os.path.sep)[-2]:
6512 """Move a colliding package if it exists. Code below this point only
6513 executes in rare cases."""
6514 mycat, mypkg = catsplit(cpv)
6515 myfile = mypkg + ".tbz2"
6516 mypath = os.path.join("All", myfile)
6517 dest_path = os.path.join(self.pkgdir, mypath)
6518 if os.path.exists(dest_path):
6519 # For invalid packages, other_cat could be None.
6520 other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6522 other_cat = other_cat.strip()
6523 self._move_from_all(other_cat + "/" + mypkg)
6524 """The file may or may not exist. Move it if necessary and update
6525 internal state for future calls to getname()."""
6526 self._move_to_all(cpv)
6528 def _move_to_all(self, cpv):
6529 """If the file exists, move it. Whether or not it exists, update state
6530 for future getname() calls."""
6531 mycat , mypkg = catsplit(cpv)
6532 myfile = mypkg + ".tbz2"
6533 src_path = os.path.join(self.pkgdir, mycat, myfile)
6535 mystat = os.lstat(src_path)
6538 if mystat and stat.S_ISREG(mystat.st_mode):
6540 os.makedirs(os.path.join(self.pkgdir, "All"))
6542 if e.errno != errno.EEXIST:
6545 os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6546 self._create_symlink(cpv)
6547 self._pkg_paths[cpv] = os.path.join("All", myfile)
6549 def _move_from_all(self, cpv):
6550 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6551 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6552 self._remove_symlink(cpv)
6553 mycat , mypkg = catsplit(cpv)
6554 myfile = mypkg + ".tbz2"
6555 mypath = os.path.join(mycat, myfile)
6556 dest_path = os.path.join(self.pkgdir, mypath)
6558 os.makedirs(os.path.dirname(dest_path))
6560 if e.errno != errno.EEXIST:
6563 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6564 self._pkg_paths[cpv] = mypath
6566 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6567 "populates the binarytree"
6568 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6571 categories = set(self.settings.categories)
6573 if not getbinpkgsonly:
6575 dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6579 dirs.insert(0, "All")
6581 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6582 if not myfile.endswith(".tbz2"):
6584 mypath = os.path.join(mydir, myfile)
6585 full_path = os.path.join(self.pkgdir, mypath)
6586 if os.path.islink(full_path):
6588 mytbz2 = xpak.tbz2(full_path)
6589 # For invalid packages, mycat could be None.
6590 mycat = mytbz2.getfile("CATEGORY")
6591 mypf = mytbz2.getfile("PF")
6593 if not mycat or not mypf:
6594 #old-style or corrupt package
6595 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6597 writemsg("!!! This binary package is not " + \
6598 "recoverable and should be deleted.\n",
6600 self.invalids.append(mypkg)
6602 mycat = mycat.strip()
6603 if mycat != mydir and mydir != "All":
6605 if mypkg != mypf.strip():
6607 mycpv = mycat + "/" + mypkg
6608 if mycpv in pkg_paths:
6609 # All is first, so it's preferred.
6611 if mycat not in categories:
6612 writemsg(("!!! Binary package has an " + \
6613 "unrecognized category: '%s'\n") % full_path,
6615 writemsg(("!!! '%s' has a category that is not" + \
6616 " listed in /etc/portage/categories\n") % mycpv,
6619 pkg_paths[mycpv] = mypath
6620 self.dbapi.cpv_inject(mycpv)
6621 self._pkg_paths = pkg_paths
6623 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6624 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6628 self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6630 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6633 except (ValueError, KeyError):
6636 writemsg(green("Fetching binary packages info...\n"))
6637 self.remotepkgs = getbinpkg.dir_get_metadata(
6638 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6639 writemsg(green(" -- DONE!\n\n"))
6641 for mypkg in self.remotepkgs.keys():
6642 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6643 #old-style or corrupt package
6644 writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6646 del self.remotepkgs[mypkg]
6648 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6649 fullpkg=mycat+"/"+mypkg[:-5]
6650 if mycat not in categories:
6651 writemsg(("!!! Remote binary package has an " + \
6652 "unrecognized category: '%s'\n") % fullpkg,
6654 writemsg(("!!! '%s' has a category that is not" + \
6655 " listed in /etc/portage/categories\n") % fullpkg,
6658 mykey=dep_getkey(fullpkg)
6660 # invalid tbz2's can hurt things.
6661 #print "cpv_inject("+str(fullpkg)+")"
6662 self.dbapi.cpv_inject(fullpkg)
6663 #print " -- Injected"
6664 except SystemExit, e:
6667 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6669 del self.remotepkgs[mypkg]
6673 def inject(self,cpv):
6674 return self.dbapi.cpv_inject(cpv)
6676 def exists_specific(self,cpv):
6677 if not self.populated:
6679 return self.dbapi.match(
6680 dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6682 def dep_bestmatch(self,mydep):
6683 "compatibility method -- all matches, not just visible ones"
6684 if not self.populated:
6687 writemsg("mydep: %s\n" % mydep, 1)
6688 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6689 writemsg("mydep: %s\n" % mydep, 1)
6690 mykey=dep_getkey(mydep)
6691 writemsg("mykey: %s\n" % mykey, 1)
6692 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6693 writemsg("mymatch: %s\n" % mymatch, 1)
6698 def getname(self,pkgname):
6699 """Returns a file location for this package. The default location is
6700 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6701 in the rare event of a collision. The prevent_collision() method can
6702 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6704 if not self.populated:
6707 mypath = self._pkg_paths.get(mycpv, None)
6709 return os.path.join(self.pkgdir, mypath)
6710 mycat, mypkg = catsplit(mycpv)
6711 mypath = os.path.join("All", mypkg + ".tbz2")
6712 if mypath in self._pkg_paths.values():
6713 mypath = os.path.join(mycat, mypkg + ".tbz2")
6714 self._pkg_paths[mycpv] = mypath # cache for future lookups
6715 return os.path.join(self.pkgdir, mypath)
6717 def isremote(self,pkgname):
6718 "Returns true if the package is kept remotely."
6719 mysplit=pkgname.split("/")
6720 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6723 def get_use(self,pkgname):
6724 mysplit=pkgname.split("/")
6725 if self.isremote(pkgname):
6726 return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6727 tbz2=xpak.tbz2(self.getname(pkgname))
6728 return tbz2.getfile("USE").split()
6730 def gettbz2(self,pkgname):
6731 "fetches the package from a remote site, if necessary."
6732 print "Fetching '"+str(pkgname)+"'"
6733 mysplit = pkgname.split("/")
6734 tbz2name = mysplit[1]+".tbz2"
6735 if not self.isremote(pkgname):
6736 if (tbz2name not in self.invalids):
6739 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6741 mydest = self.pkgdir+"/All/"
6743 os.makedirs(mydest, 0775)
6744 except (OSError, IOError):
6746 return getbinpkg.file_get(
6747 self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6748 mydest, fcmd=self.settings["RESUMECOMMAND"])
6750 def getslot(self,mycatpkg):
6751 "Get a slot for a catpkg; assume it exists."
6754 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6755 except SystemExit, e:
6757 except Exception, e:
6763 This class provides an interface to the installed package database
6764 At present this is implemented as a text backend in /var/db/pkg.
6766 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6769 Creates a DBlink object for a given CPV.
6770 The given CPV may not be present in the database already.
6772 @param cat: Category
6774 @param pkg: Package (PV)
6776 @param myroot: Typically ${ROOT}
6777 @type myroot: String (Path)
6778 @param mysettings: Typically portage.config
6779 @type mysettings: An instance of portage.config
6780 @param treetype: one of ['porttree','bintree','vartree']
6781 @type treetype: String
6782 @param vartree: an instance of vartree corresponding to myroot.
6783 @type vartree: vartree
6788 self.mycpv = self.cat+"/"+self.pkg
6789 self.mysplit = pkgsplit(self.mycpv)
6790 self.treetype = treetype
6793 vartree = db[myroot]["vartree"]
6794 self.vartree = vartree
6796 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
6797 self.dbcatdir = self.dbroot+"/"+cat
6798 self.dbpkgdir = self.dbcatdir+"/"+pkg
6799 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6800 self.dbdir = self.dbpkgdir
6802 self._lock_vdb = None
6804 self.settings = mysettings
6805 if self.settings==1:
6809 protect_obj = portage_util.ConfigProtect(myroot,
6810 mysettings.get("CONFIG_PROTECT","").split(),
6811 mysettings.get("CONFIG_PROTECT_MASK","").split())
6812 self.updateprotect = protect_obj.updateprotect
6813 self._config_protect = protect_obj
6814 self._installed_instance = None
6815 self.contentscache=[]
6816 self._contents_inodes = None
6820 raise AssertionError("Lock already held.")
6821 # At least the parent needs to exist for the lock file.
6822 portage_util.ensure_dirs(self.dbroot)
6823 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6827 portage_locks.unlockdir(self._lock_vdb)
6828 self._lock_vdb = None
6831 "return path to location of db information (for >>> informational display)"
6835 "does the db entry exist? boolean."
6836 return os.path.exists(self.dbdir)
6839 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
6841 This function should never get called (there is no reason to use it).
6843 # XXXXX Delete this eventually
6844 raise Exception, "This is bad. Don't use it."
6845 if not os.path.exists(self.dbdir):
6846 os.makedirs(self.dbdir)
6850 Remove this entry from the database
6852 if not os.path.exists(self.dbdir):
6855 for x in listdir(self.dbdir):
6856 os.unlink(self.dbdir+"/"+x)
6857 os.rmdir(self.dbdir)
6859 print "!!! Unable to remove db entry for this package."
6860 print "!!! It is possible that a directory is in this one. Portage will still"
6861 print "!!! register this package as installed as long as this directory exists."
6862 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6867 def clearcontents(self):
6869 For a given db entry (self), erase the CONTENTS values.
6871 if os.path.exists(self.dbdir+"/CONTENTS"):
6872 os.unlink(self.dbdir+"/CONTENTS")
6874 def getcontents(self):
6876 Get the installed files of a given package (aka what that package installed)
6878 if not os.path.exists(self.dbdir+"/CONTENTS"):
6880 if self.contentscache != []:
6881 return self.contentscache
6883 myc=open(self.dbdir+"/CONTENTS","r")
6884 mylines=myc.readlines()
6887 contents_file = os.path.join(self.dbdir, "CONTENTS")
6889 for line in mylines:
6891 if null_byte in line:
6892 # Null bytes are a common indication of corruption.
6893 writemsg("!!! Null byte found in contents " + \
6894 "file, line %d: '%s'\n" % (pos, contents_file),
6897 mydat = line.split()
6898 # we do this so we can remove from non-root filesystems
6899 # (use the ROOT var to allow maintenance on other partitions)
6901 mydat[1] = normalize_path(os.path.join(
6902 self.myroot, mydat[1].lstrip(os.path.sep)))
6904 #format: type, mtime, md5sum
6905 pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6906 elif mydat[0]=="dir":
6908 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6909 elif mydat[0]=="sym":
6910 #format: type, mtime, dest
6912 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6913 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6914 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6924 pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6925 elif mydat[0]=="dev":
6927 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6928 elif mydat[0]=="fif":
6930 pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6933 except (KeyError,IndexError):
6934 print "portage: CONTENTS line",pos,"corrupt!"
6935 self.contentscache=pkgfiles
6938 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6939 ldpath_mtimes=None):
6942 Unmerges a given package (CPV)
6947 @param pkgfiles: files to unmerge (generally self.getcontents() )
6948 @type pkgfiles: Dictionary
6949 @param trimworld: Remove CPV from world file if True, not if False
6950 @type trimworld: Boolean
6951 @param cleanup: cleanup to pass to doebuild (see doebuild)
6952 @type cleanup: Boolean
6953 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6954 @type ldpath_mtimes: Dictionary
6957 1. os.EX_OK if everything went well.
6958 2. return code of the failed phase (for prerm, postrm, cleanrm)
6961 The caller must ensure that lockdb() and unlockdb() are called
6962 before and after this method.
6965 contents = self.getcontents()
6966 # Now, don't assume that the name of the ebuild is the same as the
6967 # name of the dir; the package may have been moved.
6969 mystuff = listdir(self.dbdir, EmptyOnError=1)
6971 if x.endswith(".ebuild"):
6972 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6973 if x[:-7] != self.pkg:
6974 # Clean up after vardbapi.move_ent() breakage in
6975 # portage versions before 2.1.2
6976 os.rename(os.path.join(self.dbdir, x), myebuildpath)
6977 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6980 self.settings.load_infodir(self.dbdir)
6983 doebuild_environment(myebuildpath, "prerm", self.myroot,
6984 self.settings, 0, 0, self.vartree.dbapi)
6985 except portage_exception.UnsupportedAPIException, e:
6986 # Sometimes this happens due to corruption of the EAPI file.
6987 writemsg("!!! FAILED prerm: %s\n" % \
6988 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
6989 writemsg("%s\n" % str(e), noiselevel=-1)
6991 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6992 portage_util.ensure_dirs(os.path.dirname(catdir),
6993 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6994 builddir_lock = None
6998 catdir_lock = portage_locks.lockdir(catdir)
6999 portage_util.ensure_dirs(catdir,
7000 uid=portage_uid, gid=portage_gid,
7002 builddir_lock = portage_locks.lockdir(
7003 self.settings["PORTAGE_BUILDDIR"])
7005 portage_locks.unlockdir(catdir_lock)
7008 # Eventually, we'd like to pass in the saved ebuild env here...
7009 retval = doebuild(myebuildpath, "prerm", self.myroot,
7010 self.settings, cleanup=cleanup, use_cache=0,
7011 mydbapi=self.vartree.dbapi, tree="vartree",
7012 vartree=self.vartree)
7013 # XXX: Decide how to handle failures here.
7014 if retval != os.EX_OK:
7015 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
7018 self._unmerge_pkgfiles(pkgfiles)
7021 retval = doebuild(myebuildpath, "postrm", self.myroot,
7022 self.settings, use_cache=0, tree="vartree",
7023 mydbapi=self.vartree.dbapi, vartree=self.vartree)
7025 # process logs created during pre/postrm
7026 elog_process(self.mycpv, self.settings)
7028 # XXX: Decide how to handle failures here.
7029 if retval != os.EX_OK:
7030 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
7032 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
7033 tree="vartree", mydbapi=self.vartree.dbapi,
7034 vartree=self.vartree)
7038 portage_locks.unlockdir(builddir_lock)
7040 if myebuildpath and not catdir_lock:
7041 # Lock catdir for removal if empty.
7042 catdir_lock = portage_locks.lockdir(catdir)
7048 if e.errno not in (errno.ENOENT,
7049 errno.ENOTEMPTY, errno.EEXIST):
7052 portage_locks.unlockdir(catdir_lock)
7053 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
7057 def _unmerge_pkgfiles(self, pkgfiles):
7060 Unmerges the contents of a package from the liveFS
7061 Removes the VDB entry for self
7063 @param pkgfiles: typically self.getcontents()
7064 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
7071 writemsg_stdout("No package files given... Grabbing a set.\n")
7072 pkgfiles=self.getcontents()
7075 mykeys=pkgfiles.keys()
7079 #process symlinks second-to-last, directories last.
7081 modprotect="/lib/modules/"
7082 for objkey in mykeys:
7083 obj = normalize_path(objkey)
7088 statobj = os.stat(obj)
7093 lstatobj = os.lstat(obj)
7094 except (OSError, AttributeError):
7096 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
7099 #we skip this if we're dealing with a symlink
7100 #because os.stat() will operate on the
7101 #link target rather than the link itself.
7102 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
7104 # next line includes a tweak to protect modules from being unmerged,
7105 # but we don't protect modules from being overwritten if they are
7106 # upgraded. We effectively only want one half of the config protection
7107 # functionality for /lib/modules. For portage-ng both capabilities
7108 # should be able to be independently specified.
7109 if obj.startswith(modprotect):
7110 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
7113 lmtime=str(lstatobj[stat.ST_MTIME])
7114 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
7115 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
7118 if pkgfiles[objkey][0]=="dir":
7119 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
7120 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
7123 elif pkgfiles[objkey][0]=="sym":
7125 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
7129 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
7130 except (OSError,IOError),e:
7131 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
7132 elif pkgfiles[objkey][0]=="obj":
7133 if statobj is None or not stat.S_ISREG(statobj.st_mode):
7134 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
7138 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
7139 except portage_exception.FileNotFound, e:
7140 # the file has disappeared between now and our stat call
7141 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
7144 # string.lower is needed because db entries used to be in upper-case. The
7145 # string.lower allows for backwards compatibility.
7146 if mymd5 != pkgfiles[objkey][2].lower():
7147 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
7150 if statobj.st_mode & (stat.S_ISUID | stat.S_ISGID):
7151 # Always blind chmod 0 before unlinking to avoid race conditions.
7153 if statobj.st_nlink > 1:
7154 writemsg("setXid: "+str(statobj.st_nlink-1)+ \
7155 " hardlinks to '%s'\n" % obj)
7157 except (OSError,IOError),e:
7159 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
7160 elif pkgfiles[objkey][0]=="fif":
7161 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
7162 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
7164 writemsg_stdout("--- %s %s\n" % ("fif",obj))
7165 elif pkgfiles[objkey][0]=="dev":
7166 writemsg_stdout("--- %s %s\n" % ("dev",obj))
7174 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
7175 except (OSError, IOError):
7176 writemsg_stdout("--- !empty dir %s\n" % obj)
7178 #remove self from vartree database so that our own virtual gets zapped if we're the last node
7179 self.vartree.zap(self.mycpv)
7181 def isowner(self,filename,destroot):
7183 Check if filename is a new file or belongs to this package
7184 (for this or a previous version)
7192 1. True if this package owns the file.
7193 2. False if this package does not own the file.
7195 destfile = normalize_path(
7196 os.path.join(destroot, filename.lstrip(os.path.sep)))
7198 mylstat = os.lstat(destfile)
7199 except (OSError, IOError):
7202 pkgfiles = self.getcontents()
7203 if pkgfiles and filename in pkgfiles:
7206 if self._contents_inodes is None:
7207 self._contents_inodes = set()
7211 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
7214 if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
7219 def isprotected(self, filename):
7220 """In cases where an installed package in the same slot owns a
7221 protected file that will be merged, bump the mtime on the installed
7222 file in order to ensure that it isn't unmerged."""
7223 if not self._config_protect.isprotected(filename):
7225 if self._installed_instance is None:
7227 mydata = self._installed_instance.getcontents().get(filename, None)
7231 # Bump the mtime in order to ensure that the old config file doesn't
7232 # get unmerged. The user will have an opportunity to merge the new
7233 # config with the old one.
7235 os.utime(filename, None)
7237 if e.errno != errno.ENOENT:
7240 # The file has disappeared, so it's not protected.
7244 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
7245 mydbapi=None, prev_mtimes=None):
7248 This function does the following:
7250 Collision Protection.
7251 calls doebuild(mydo=pkg_preinst)
7252 Merges the package to the livefs
7253 unmerges old version (if required)
7254 calls doebuild(mydo=pkg_postinst)
7257 @param srcroot: Typically this is ${D}
7258 @type srcroot: String (Path)
7259 @param destroot: Path to merge to (usually ${ROOT})
7260 @type destroot: String (Path)
7261 @param inforoot: root of the vardb entry ?
7262 @type inforoot: String (Path)
7263 @param myebuild: path to the ebuild that we are processing
7264 @type myebuild: String (Path)
7265 @param mydbapi: dbapi which is handed to doebuild.
7266 @type mydbapi: portdbapi instance
7267 @param prev_mtimes: { Filename:mtime } mapping for env_update
7268 @type prev_mtimes: Dictionary
7274 secondhand is a list of symlinks that have been skipped due to their target
7275 not existing; we will merge these symlinks at a later time.
7277 if not os.path.isdir(srcroot):
7278 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7282 if not os.path.exists(self.dbcatdir):
7283 os.makedirs(self.dbcatdir)
7286 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7287 otherversions.append(v.split("/")[1])
7289 slot_matches = self.vartree.dbapi.match(
7290 "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7292 # Used by self.isprotected().
7293 self._installed_instance = dblink(self.cat,
7294 catsplit(slot_matches[0])[1], destroot, self.settings,
7295 vartree=self.vartree)
7297 # check for package collisions
7298 if "collision-protect" in self.settings.features:
7299 collision_ignore = set([normalize_path(myignore) for myignore in \
7300 self.settings.get("COLLISION_IGNORE", "").split()])
7301 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7303 # the linkcheck only works if we are in srcroot
7306 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7307 myfilelist.extend(mysymlinks)
7308 mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7313 starttime=time.time()
7319 if self.pkg in otherversions:
7320 otherversions.remove(self.pkg) # we already checked this package
7322 myslot = self.settings["SLOT"]
7323 for v in otherversions:
7324 # only allow versions with same slot to overwrite files
7325 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7327 dblink(self.cat, v, destroot, self.settings,
7328 vartree=self.vartree))
7332 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7333 for f in myfilelist:
7335 # listdir isn't intelligent enough to exclude symlinked dirs,
7336 # so we have to do it ourself
7337 for s in mysymlinked_directories:
7345 print str(i)+" files checked ..."
7349 for ver in [self]+mypkglist:
7350 if (ver.isowner(f, destroot) or ver.isprotected(f)):
7354 collisions.append(f)
7355 print "existing file "+f+" is not owned by this package"
7357 if collision_ignore:
7358 if f in collision_ignore:
7361 for myignore in collision_ignore:
7362 if f.startswith(myignore + os.path.sep):
7365 #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7367 print red("*")+" This package is blocked because it wants to overwrite"
7368 print red("*")+" files belonging to other packages (see messages above)."
7369 print red("*")+" If you have no clue what this is all about report it "
7370 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7372 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7375 print "Searching all installed packages for file collisions..."
7376 print "Press Ctrl-C to Stop"
7378 """ Note: The isowner calls result in a stat call for *every*
7379 single installed file, since the inode numbers are used to work
7380 around the problem of ambiguous paths caused by symlinked files
7381 and/or directories. Though it is slow, it is as accurate as
7384 for cpv in self.vartree.dbapi.cpv_all():
7385 cat, pkg = catsplit(cpv)
7386 mylink = dblink(cat, pkg, destroot, self.settings,
7387 vartree=self.vartree)
7389 for f in collisions:
7390 if mylink.isowner(f, destroot):
7391 mycollisions.append(f)
7394 print " * %s:" % cpv
7396 for f in mycollisions:
7398 os.path.join(destroot, f.lstrip(os.path.sep))
7401 print "None of the installed packages claim the above file(s)."
7409 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7410 """ The merge process may move files out of the image directory,
7411 which causes invalidation of the .installed flag."""
7413 os.unlink(os.path.join(
7414 os.path.dirname(normalize_path(srcroot)), ".installed"))
7416 if e.errno != errno.ENOENT:
7420 # get old contents info for later unmerging
7421 oldcontents = self.getcontents()
7423 self.dbdir = self.dbtmpdir
7425 if not os.path.exists(self.dbtmpdir):
7426 os.makedirs(self.dbtmpdir)
7428 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7430 # run preinst script
7431 if myebuild is None:
7432 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7433 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7434 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7435 vartree=self.vartree)
7437 # XXX: Decide how to handle failures here.
7439 writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7442 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7443 for x in listdir(inforoot):
7444 self.copyfile(inforoot+"/"+x)
7446 # get current counter value (counter_tick also takes care of incrementing it)
7447 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7448 # XXX bis: leads to some invalidentry() call through cp_all().
7449 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7450 # write local package counter for recording
7451 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7452 lcfile.write(str(counter))
7455 # open CONTENTS file (possibly overwriting old one) for recording
7456 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7458 self.updateprotect()
7460 #if we have a file containing previously-merged config file md5sums, grab it.
7461 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7462 cfgfiledict = grabdict(conf_mem_file)
7463 if self.settings.has_key("NOCONFMEM"):
7464 cfgfiledict["IGNORE"]=1
7466 cfgfiledict["IGNORE"]=0
7468 # Timestamp for files being merged. Use time() - 1 in order to prevent
7469 # a collision with timestamps that are bumped by the utime() call
7470 # inside isprotected(). This ensures that the new and old config have
7471 # different timestamps (for the benefit of programs like rsync that
7472 # that need distiguishable timestamps to detect file changes).
7473 mymtime = long(time.time() - 1)
7475 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7476 prevmask = os.umask(0)
7479 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7480 # "second hand" of symlinks to merge later
7481 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7484 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
7485 # broken symlinks. We'll merge them too.
7487 while len(secondhand) and len(secondhand)!=lastlen:
7488 # clear the thirdhand. Anything from our second hand that
7489 # couldn't get merged will be added to thirdhand.
7492 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7495 lastlen=len(secondhand)
7497 # our thirdhand now becomes our secondhand. It's ok to throw
7498 # away secondhand since thirdhand contains all the stuff that
7499 # couldn't be merged.
7500 secondhand = thirdhand
7503 # force merge of remaining symlinks (broken or circular; oh well)
7504 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7509 #if we opened it, close it
7513 if os.path.exists(self.dbpkgdir):
7514 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7515 self.dbdir = self.dbpkgdir
7516 self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7517 self.dbdir = self.dbtmpdir
7518 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7520 # We hold both directory locks.
7521 self.dbdir = self.dbpkgdir
7523 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7524 contents = self.getcontents()
7526 #write out our collection of md5sums
7527 if cfgfiledict.has_key("IGNORE"):
7528 del cfgfiledict["IGNORE"]
7530 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7531 if not os.path.exists(my_private_path):
7532 os.makedirs(my_private_path)
7533 os.chown(my_private_path, os.getuid(), portage_gid)
7534 os.chmod(my_private_path, 02770)
7536 writedict(cfgfiledict, conf_mem_file)
7540 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7541 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7543 # XXX: Decide how to handle failures here.
7545 writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7549 for v in otherversions:
7550 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7553 #update environment settings, library paths. DO NOT change symlinks.
7554 env_update(makelinks=(not downgrade),
7555 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7557 #dircache may break autoclean because it remembers the -MERGING-pkg file
7559 if dircache.has_key(self.dbcatdir):
7560 del dircache[self.dbcatdir]
7561 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7563 # Process ebuild logfiles
7564 elog_process(self.mycpv, self.settings)
7565 if "noclean" not in self.settings.features:
7566 doebuild(myebuild, "clean", destroot, self.settings,
7567 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7570 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7573 This function handles actual merging of the package contents to the livefs.
7574 It also handles config protection.
7576 @param srcroot: Where are we copying files from (usually ${D})
7577 @type srcroot: String (Path)
7578 @param destroot: Typically ${ROOT}
7579 @type destroot: String (Path)
7580 @param outfile: File to log operations to
7581 @type outfile: File Object
7582 @param secondhand: A set of items to merge in pass two (usually
7583 or symlinks that point to non-existing files that may get merged later)
7584 @type secondhand: List
7585 @param stufftomerge: Either a diretory to merge, or a list of items.
7586 @type stufftomerge: String or List
7587 @param cfgfiledict: { File:mtime } mapping for config_protected files
7588 @type cfgfiledict: Dictionary
7589 @param thismtime: The current time (typically long(time.time())
7590 @type thismtime: Long
7591 @rtype: None or Boolean
7597 from os.path import sep, join
7598 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7599 destroot = normalize_path(destroot).rstrip(sep) + sep
7600 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
7601 if type(stufftomerge)==types.StringType:
7602 #A directory is specified. Figure out protection paths, listdir() it and process it.
7603 mergelist = listdir(join(srcroot, stufftomerge))
7606 mergelist=stufftomerge
7609 mysrc = join(srcroot, offset, x)
7610 mydest = join(destroot, offset, x)
7611 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7612 myrealdest = join(sep, offset, x)
7613 # stat file once, test using S_* macros many times (faster that way)
7615 mystat=os.lstat(mysrc)
7618 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7619 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
7620 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
7621 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7622 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
7623 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
7625 except Exception, e:
7627 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7628 writemsg(red("!!! A stat call returned the following error for the following file:"))
7629 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
7630 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7631 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
7632 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
7636 mymode=mystat[stat.ST_MODE]
7637 # handy variables; mydest is the target object on the live filesystems;
7638 # mysrc is the source object in the temporary install dir
7640 mydmode = os.lstat(mydest).st_mode
7642 if e.errno != errno.ENOENT:
7645 #dest file doesn't exist
7648 if stat.S_ISLNK(mymode):
7649 # we are merging a symbolic link
7650 myabsto=abssymlink(mysrc)
7651 if myabsto.startswith(srcroot):
7652 myabsto=myabsto[len(srcroot):]
7653 myabsto = myabsto.lstrip(sep)
7654 myto=os.readlink(mysrc)
7655 if self.settings and self.settings["D"]:
7656 if myto.startswith(self.settings["D"]):
7657 myto=myto[len(self.settings["D"]):]
7658 # myrealto contains the path of the real file to which this symlink points.
7659 # we can simply test for existence of this file to see if the target has been merged yet
7660 myrealto = normalize_path(os.path.join(destroot, myabsto))
7663 if not stat.S_ISLNK(mydmode):
7664 if stat.S_ISDIR(mydmode):
7665 # directory in the way: we can't merge a symlink over a directory
7666 # we won't merge this, continue with next file...
7669 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7670 # Kill file blocking installation of symlink to dir #71787
7672 elif self.isprotected(mydest):
7673 # Use md5 of the target in ${D} if it exists...
7675 newmd5 = portage_checksum.perform_md5(
7676 join(srcroot, myabsto))
7677 except portage_exception.FileNotFound:
7678 # Maybe the target is merged already.
7680 newmd5 = portage_checksum.perform_md5(
7682 except portage_exception.FileNotFound:
7684 mydest = new_protect_filename(mydest,newmd5=newmd5)
7686 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7687 if (secondhand!=None) and (not os.path.exists(myrealto)):
7688 # either the target directory doesn't exist yet or the target file doesn't exist -- or
7689 # the target is a broken symlink. We will add this file to our "second hand" and merge
7691 secondhand.append(mysrc[len(srcroot):])
7693 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7694 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7696 writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7697 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7699 print "!!! Failed to move file."
7700 print "!!!",mydest,"->",myto
7702 elif stat.S_ISDIR(mymode):
7703 # we are merging a directory
7705 # destination exists
7708 # Save then clear flags on dest.
7709 dflags=bsd_chflags.lgetflags(mydest)
7710 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7711 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7714 if not os.access(mydest, os.W_OK):
7715 pkgstuff = pkgsplit(self.pkg)
7716 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7717 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7718 writemsg("!!! You may start the merge process again by using ebuild:\n")
7719 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7720 writemsg("!!! And finish by running this: env-update\n\n")
7723 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7724 # a symlink to an existing directory will work for us; keep it:
7725 writemsg_stdout("--- %s/\n" % mydest)
7727 bsd_chflags.lchflags(mydest, dflags)
7729 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
7730 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7732 print "bak",mydest,mydest+".backup"
7733 #now create our directory
7734 if self.settings.selinux_enabled():
7735 sid = selinux.get_sid(mysrc)
7736 selinux.secure_mkdir(mydest,sid)
7740 bsd_chflags.lchflags(mydest, dflags)
7741 os.chmod(mydest,mystat[0])
7742 os.chown(mydest,mystat[4],mystat[5])
7743 writemsg_stdout(">>> %s/\n" % mydest)
7745 #destination doesn't exist
7746 if self.settings.selinux_enabled():
7747 sid = selinux.get_sid(mysrc)
7748 selinux.secure_mkdir(mydest,sid)
7751 os.chmod(mydest,mystat[0])
7752 os.chown(mydest,mystat[4],mystat[5])
7753 writemsg_stdout(">>> %s/\n" % mydest)
7754 outfile.write("dir "+myrealdest+"\n")
7755 # recurse and merge this directory
7756 if self.mergeme(srcroot, destroot, outfile, secondhand,
7757 join(offset, x), cfgfiledict, thismtime):
7759 elif stat.S_ISREG(mymode):
7760 # we are merging a regular file
7761 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7762 # calculate config file protection stuff
7763 mydestdir=os.path.dirname(mydest)
7767 # destination file exists
7768 if stat.S_ISDIR(mydmode):
7769 # install of destination is blocked by an existing directory with the same name
7771 writemsg_stdout("!!! %s\n" % mydest)
7772 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7774 # install of destination is blocked by an existing regular file,
7775 # or by a symlink to an existing regular file;
7776 # now, config file management may come into play.
7777 # we only need to tweak mydest if cfg file management is in play.
7778 if self.isprotected(mydest):
7779 # we have a protection path; enable config file management.
7780 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7782 #file already in place; simply update mtimes of destination
7783 os.utime(mydest,(thismtime,thismtime))
7787 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7788 """ An identical update has previously been
7789 merged. Skip it unless the user has chosen
7792 moveme = cfgfiledict["IGNORE"]
7793 cfgprot = cfgfiledict["IGNORE"]
7798 # Merging a new file, so update confmem.
7799 cfgfiledict[myrealdest] = [mymd5]
7800 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7801 """A previously remembered update has been
7802 accepted, so it is removed from confmem."""
7803 del cfgfiledict[myrealdest]
7805 mydest = new_protect_filename(mydest, newmd5=mymd5)
7807 # whether config protection or not, we merge the new file the
7808 # same way. Unless moveme=0 (blocking directory)
7810 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7815 mymtime = long(time.time())
7816 # We need to touch the destination so that on --update the
7817 # old package won't yank the file with it. (non-cfgprot related)
7818 os.utime(mydest, (mymtime, mymtime))
7820 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7822 # XXX kludge, can be killed when portage stops relying on
7823 # md5+mtime, and uses refcounts
7824 # alright, we've fooled w/ mtime on the file; this pisses off static archives
7825 # basically internal mtime != file's mtime, so the linker (falsely) thinks
7826 # the archive is stale, and needs to have it's toc rebuilt.
7828 myf = open(mydest, "r+")
7830 # ar mtime field is digits padded with spaces, 12 bytes.
7831 lms=str(thismtime+5).ljust(12)
7834 if magic != "!<arch>\n":
7835 # not an archive (dolib.a from portage.py makes it here fex)
7838 st = os.stat(mydest)
7839 while myf.tell() < st.st_size - 12:
7846 # skip uid/gid/mperm
7849 # read the archive member's size
7850 x=long(myf.read(10))
7852 # skip the trailing newlines, and add the potential
7853 # extra padding byte if it's not an even size
7854 myf.seek(x + 2 + (x % 2),1)
7856 # and now we're at the end. yay.
7858 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7859 os.utime(mydest,(thismtime,thismtime))
7863 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7864 writemsg_stdout("%s %s\n" % (zing,mydest))
7866 # we are merging a fifo or device node
7869 # destination doesn't exist
7870 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7874 if stat.S_ISFIFO(mymode):
7875 outfile.write("fif %s\n" % myrealdest)
7877 outfile.write("dev %s\n" % myrealdest)
7878 writemsg_stdout(zing+" "+mydest+"\n")
7880 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7881 mydbapi=None, prev_mtimes=None):
7884 return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7885 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7889 def getstring(self,name):
7890 "returns contents of a file with whitespace converted to spaces"
7891 if not os.path.exists(self.dbdir+"/"+name):
7893 myfile=open(self.dbdir+"/"+name,"r")
7894 mydata=myfile.read().split()
7896 return " ".join(mydata)
7898 def copyfile(self,fname):
7899 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7901 def getfile(self,fname):
7902 if not os.path.exists(self.dbdir+"/"+fname):
7904 myfile=open(self.dbdir+"/"+fname,"r")
7905 mydata=myfile.read()
7909 def setfile(self,fname,data):
7910 write_atomic(os.path.join(self.dbdir, fname), data)
7912 def getelements(self,ename):
7913 if not os.path.exists(self.dbdir+"/"+ename):
7915 myelement=open(self.dbdir+"/"+ename,"r")
7916 mylines=myelement.readlines()
7919 for y in x[:-1].split():
7924 def setelements(self,mylist,ename):
7925 myelement=open(self.dbdir+"/"+ename,"w")
7927 myelement.write(x+"\n")
7930 def isregular(self):
7931 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
7932 return os.path.exists(self.dbdir+"/CATEGORY")
7934 class FetchlistDict(UserDict.DictMixin):
7935 """This provide a mapping interface to retrieve fetch lists. It's used
7936 to allow portage_manifest.Manifest to access fetch lists via a standard
7937 mapping interface rather than use the dbapi directly."""
7938 def __init__(self, pkgdir, settings, mydbapi):
7939 """pkgdir is a directory containing ebuilds and settings is passed into
7940 portdbapi.getfetchlist for __getitem__ calls."""
7941 self.pkgdir = pkgdir
7942 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7943 self.settings = settings
7944 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7945 self.portdb = mydbapi
7946 def __getitem__(self, pkg_key):
7947 """Returns the complete fetch list for a given package."""
7948 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7949 all=True, mytree=self.mytree)[1]
7950 def has_key(self, pkg_key):
7951 """Returns true if the given package exists within pkgdir."""
7952 return pkg_key in self.keys()
7954 """Returns keys for all packages within pkgdir"""
7955 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7957 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7958 """will merge a .tbz2 file, returning a list of runtime dependencies
7959 that must be satisfied, or None if there was a merge error. This
7960 code assumes the package exists."""
7963 mydbapi = db[myroot]["bintree"].dbapi
7965 vartree = db[myroot]["vartree"]
7966 if mytbz2[-5:]!=".tbz2":
7967 print "!!! Not a .tbz2 file"
7971 builddir_lock = None
7974 """ Don't lock the tbz2 file because the filesytem could be readonly or
7975 shared by a cluster."""
7976 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7978 mypkg = os.path.basename(mytbz2)[:-5]
7979 xptbz2 = xpak.tbz2(mytbz2)
7980 mycat = xptbz2.getfile("CATEGORY")
7982 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7985 mycat = mycat.strip()
7987 # These are the same directories that would be used at build time.
7988 builddir = os.path.join(
7989 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7990 catdir = os.path.dirname(builddir)
7991 pkgloc = os.path.join(builddir, "image")
7992 infloc = os.path.join(builddir, "build-info")
7993 myebuild = os.path.join(
7994 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7995 portage_util.ensure_dirs(os.path.dirname(catdir),
7996 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7997 catdir_lock = portage_locks.lockdir(catdir)
7998 portage_util.ensure_dirs(catdir,
7999 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
8000 builddir_lock = portage_locks.lockdir(builddir)
8002 portage_locks.unlockdir(catdir_lock)
8006 shutil.rmtree(builddir)
8007 except (IOError, OSError), e:
8008 if e.errno != errno.ENOENT:
8011 for mydir in (builddir, pkgloc, infloc):
8012 portage_util.ensure_dirs(mydir, uid=portage_uid,
8013 gid=portage_gid, mode=0755)
8014 writemsg_stdout(">>> Extracting info\n")
8015 xptbz2.unpackinfo(infloc)
8016 mysettings.load_infodir(infloc)
8017 # Store the md5sum in the vdb.
8018 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
8019 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
8022 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8024 # Eventually we'd like to pass in the saved ebuild env here.
8025 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8026 tree="bintree", mydbapi=mydbapi, vartree=vartree)
8027 if retval != os.EX_OK:
8028 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
8031 writemsg_stdout(">>> Extracting %s\n" % mypkg)
8032 retval = portage_exec.spawn_bash(
8033 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8034 env=mysettings.environ())
8035 if retval != os.EX_OK:
8036 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
8038 #portage_locks.unlockfile(tbz2_lock)
8041 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8043 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8044 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8048 portage_locks.unlockfile(tbz2_lock)
8051 shutil.rmtree(builddir)
8052 except (IOError, OSError), e:
8053 if e.errno != errno.ENOENT:
8056 portage_locks.unlockdir(builddir_lock)
8059 # Lock catdir for removal if empty.
8060 catdir_lock = portage_locks.lockdir(catdir)
8066 if e.errno not in (errno.ENOENT,
8067 errno.ENOTEMPTY, errno.EEXIST):
8070 portage_locks.unlockdir(catdir_lock)
8072 def deprecated_profile_check():
8073 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
8075 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
8076 dcontent = deprecatedfile.readlines()
8077 deprecatedfile.close()
8078 newprofile = dcontent[0]
8079 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
8081 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
8083 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
8084 if len(dcontent) > 1:
8085 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
8086 for myline in dcontent[1:]:
8087 writemsg(myline, noiselevel=-1)
8088 writemsg("\n\n", noiselevel=-1)
8091 # gets virtual package settings
8092 def getvirtuals(myroot):
8094 writemsg("--- DEPRECATED call to getvirtual\n")
8095 return settings.getvirtuals(myroot)
8097 def commit_mtimedb(mydict=None, filename=None):
8100 if "mtimedb" not in globals() or mtimedb is None:
8104 if filename is None:
8106 filename = mtimedbfile
8107 mydict["version"] = VERSION
8108 d = {} # for full backward compat, pickle it as a plain dict object.
8111 f = atomic_ofstream(filename)
8112 cPickle.dump(d, f, -1)
8114 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
8115 except (IOError, OSError), e:
8119 global uid,portage_gid,portdb,db
8120 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
8121 close_portdbapi_caches()
8124 atexit_register(portageexit)
8126 def global_updates(mysettings, trees, prev_mtimes):
8128 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8130 @param mysettings: A config instance for ROOT="/".
8131 @type mysettings: config
8132 @param trees: A dictionary containing portage trees.
8134 @param prev_mtimes: A dictionary containing mtimes of files located in
8135 $PORTDIR/profiles/updates/.
8136 @type prev_mtimes: dict
8137 @rtype: None or List
8138 @return: None if no were no updates, otherwise a list of update commands
8139 that have been performed.
8141 # only do this if we're root and not running repoman/ebuild digest
8143 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8145 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8148 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8149 update_data = grab_updates(updpath)
8151 update_data = grab_updates(updpath, prev_mtimes)
8152 except portage_exception.DirectoryNotFound:
8153 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
8156 if len(update_data) > 0:
8157 do_upgrade_packagesmessage = 0
8160 for mykey, mystat, mycontent in update_data:
8161 writemsg_stdout("\n\n")
8162 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
8163 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
8164 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
8165 valid_updates, errors = parse_updates(mycontent)
8166 myupd.extend(valid_updates)
8167 writemsg_stdout(len(valid_updates) * "." + "\n")
8168 if len(errors) == 0:
8169 # Update our internal mtime since we
8170 # processed all of our directives.
8171 timestamps[mykey] = long(mystat.st_mtime)
8174 writemsg("%s\n" % msg, noiselevel=-1)
8176 update_config_files("/",
8177 mysettings.get("CONFIG_PROTECT","").split(),
8178 mysettings.get("CONFIG_PROTECT_MASK","").split(),
8181 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8182 settings=mysettings)
8183 for update_cmd in myupd:
8184 if update_cmd[0] == "move":
8185 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
8186 trees["/"]["bintree"].move_ent(update_cmd)
8187 elif update_cmd[0] == "slotmove":
8188 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
8189 trees["/"]["bintree"].move_slot_ent(update_cmd)
8191 # The above global updates proceed quickly, so they
8192 # are considered a single mtimedb transaction.
8193 if len(timestamps) > 0:
8194 # We do not update the mtime in the mtimedb
8195 # until after _all_ of the above updates have
8196 # been processed because the mtimedb will
8197 # automatically commit when killed by ctrl C.
8198 for mykey, mtime in timestamps.iteritems():
8199 prev_mtimes[mykey] = mtime
8201 # We gotta do the brute force updates for these now.
8202 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8203 "fixpackages" in mysettings.features:
8204 trees["/"]["bintree"].update_ents(myupd)
8206 do_upgrade_packagesmessage = 1
8208 # Update progress above is indicated by characters written to stdout so
8209 # we print a couple new lines here to separate the progress output from
8214 if do_upgrade_packagesmessage and \
8215 listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
8216 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
8217 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
8218 writemsg_stdout("\n")
8222 #continue setting up other trees
8224 class MtimeDB(dict):
8225 def __init__(self, filename):
8227 self.filename = filename
8228 self._load(filename)
8230 def _load(self, filename):
8233 mypickle = cPickle.Unpickler(f)
8234 mypickle.find_global = None
8238 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
8242 d["updates"] = d["old"]
8247 d.setdefault("starttime", 0)
8248 d.setdefault("version", "")
8249 for k in ("info", "ldpath", "updates"):
8252 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8253 "starttime", "updates", "version"))
8256 if k not in mtimedbkeys:
8257 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
8260 self._clean_data = copy.deepcopy(d)
8263 if not self.filename:
8267 # Only commit if the internal state has changed.
8268 if d != self._clean_data:
8269 commit_mtimedb(mydict=d, filename=self.filename)
8270 self._clean_data = copy.deepcopy(d)
8272 def create_trees(config_root=None, target_root=None, trees=None):
8276 # clean up any existing portdbapi instances
8277 for myroot in trees:
8278 portdb = trees[myroot]["porttree"].dbapi
8279 portdb.close_caches()
8280 portdbapi.portdbapi_instances.remove(portdb)
8281 del trees[myroot]["porttree"], myroot, portdb
8283 settings = config(config_root=config_root, target_root=target_root,
8284 config_incrementals=portage_const.INCREMENTALS)
8288 myroots = [(settings["ROOT"], settings)]
8289 if settings["ROOT"] != "/":
8290 settings = config(config_root=None, target_root=None,
8291 config_incrementals=portage_const.INCREMENTALS)
8294 myroots.append((settings["ROOT"], settings))
8296 for myroot, mysettings in myroots:
8297 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8298 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8299 trees[myroot].addLazySingleton(
8300 "vartree", vartree, myroot, categories=mysettings.categories,
8301 settings=mysettings)
8302 trees[myroot].addLazySingleton("porttree",
8303 portagetree, myroot, settings=mysettings)
8304 trees[myroot].addLazySingleton("bintree",
8305 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8308 # Initialization of legacy globals. No functions/classes below this point
8309 # please! When the above functions and classes become independent of the
8310 # below global variables, it will be possible to make the below code
8311 # conditional on a backward compatibility flag (backward compatibility could
8312 # be disabled via an environment variable, for example). This will enable new
8313 # code that is aware of this flag to import portage without the unnecessary
8314 # overhead (and other issues!) of initializing the legacy globals.
8316 def init_legacy_globals():
8317 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8318 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8319 profiledir, flushmtimedb
8321 # Portage needs to ensure a sane umask for the files it creates.
8325 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8326 kwargs[k] = os.environ.get(envvar, "/")
8328 db = create_trees(**kwargs)
8330 settings = db["/"]["vartree"].settings
8331 portdb = db["/"]["porttree"].dbapi
8335 settings = db[myroot]["vartree"].settings
8336 portdb = db[myroot]["porttree"].dbapi
8339 root = settings["ROOT"]
8341 mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8342 mtimedb = MtimeDB(mtimedbfile)
8344 # ========================================================================
8346 # These attributes should not be used
8347 # within Portage under any circumstances.
8348 # ========================================================================
8349 archlist = settings.archlist()
8350 features = settings.features
8351 groups = settings["ACCEPT_KEYWORDS"].split()
8352 pkglines = settings.packages
8353 selinux_enabled = settings.selinux_enabled()
8354 thirdpartymirrors = settings.thirdpartymirrors()
8355 usedefaults = settings.use_defs
8357 if os.path.isdir(PROFILE_PATH):
8358 profiledir = PROFILE_PATH
8359 def flushmtimedb(record):
8360 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8361 # ========================================================================
8363 # These attributes should not be used
8364 # within Portage under any circumstances.
8365 # ========================================================================
8368 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8369 # use within Portage. External use of this variable is unsupported because
8370 # it is experimental and it's behavior is likely to change.
8371 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8372 init_legacy_globals()
8377 # ============================================================================
8378 # ============================================================================