1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
16 print "Failed to import sys! Something is _VERY_ wrong with python."
20 import copy, errno, os, re, shutil, time, types
24 import pickle as cPickle
28 from time import sleep
29 from random import shuffle
31 if getattr(__builtins__, "set", None) is None:
32 from sets import Set as set
33 from itertools import chain, izip
34 except ImportError, e:
35 sys.stderr.write("\n\n")
36 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
37 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
38 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
40 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
41 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
42 sys.stderr.write(" "+str(e)+"\n\n");
46 # XXX: This should get renamed to bsd_chflags, I think.
53 from cache.cache_errors import CacheError
58 from portage_dep import dep_getcpv, dep_getkey, get_operator, \
59 isjustname, isspecific, isvalidatom, \
60 match_from_list, match_to_list, best_match_to_list
62 # XXX: This needs to get cleaned up.
64 from output import bold, colorize, green, red, yellow
67 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
68 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
69 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
70 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
71 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
72 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
73 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
74 INCREMENTALS, EAPI, MISC_SH_BINARY
76 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
77 portage_uid, portage_gid, userpriv_groups
78 from portage_manifest import Manifest
81 from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
82 dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
83 map_dictlist_vals, new_protect_filename, normalize_path, \
84 pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
85 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
86 import portage_exception
90 from portage_exec import atexit_register, run_exitfuncs
91 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
92 import portage_checksum
93 from portage_checksum import perform_md5,perform_checksum,prelink_capable
95 from portage_localization import _
96 from portage_update import dep_transform, fixdbentries, grab_updates, \
97 parse_updates, update_config_files, update_dbentries
99 # Need these functions directly in portage namespace to not break every external tool in existence
100 from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
101 pkgsplit, vercmp, ververify
103 # endversion and endversion_keys are for backward compatibility only.
104 from portage_versions import endversion_keys
105 from portage_versions import suffix_value as endversion
107 except ImportError, e:
108 sys.stderr.write("\n\n")
109 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
110 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
111 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
112 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
113 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
114 sys.stderr.write("!!! a recovery of portage.\n")
115 sys.stderr.write(" "+str(e)+"\n\n")
120 import portage_selinux as selinux
122 writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
133 modname = ".".join(name.split(".")[:-1])
134 mod = __import__(modname)
135 components = name.split('.')
136 for comp in components[1:]:
137 mod = getattr(mod, comp)
140 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
142 if top_dict.has_key(x) and top_dict[x].has_key(key):
144 return copy.deepcopy(top_dict[x][key])
146 return top_dict[x][key]
150 raise KeyError, "Key not found in list; '%s'" % key
153 "this fixes situations where the current directory doesn't exist"
156 except OSError: #dir doesn't exist
161 def abssymlink(symlink):
162 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
163 mylink=os.readlink(symlink)
165 mydir=os.path.dirname(symlink)
166 mylink=mydir+"/"+mylink
167 return os.path.normpath(mylink)
173 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
174 global cacheHit,cacheMiss,cacheStale
175 mypath = normalize_path(my_original_path)
176 if dircache.has_key(mypath):
178 cached_mtime, list, ftype = dircache[mypath]
181 cached_mtime, list, ftype = -1, [], []
183 pathstat = os.stat(mypath)
184 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
185 mtime = pathstat[stat.ST_MTIME]
187 raise portage_exception.DirectoryNotFound(mypath)
188 except (IOError,OSError,portage_exception.PortageException):
192 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
193 if mtime != cached_mtime or time.time() - mtime < 4:
194 if dircache.has_key(mypath):
196 list = os.listdir(mypath)
201 pathstat = os.stat(mypath+"/"+x)
203 pathstat = os.lstat(mypath+"/"+x)
205 if stat.S_ISREG(pathstat[stat.ST_MODE]):
207 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
209 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
213 except (IOError, OSError):
215 dircache[mypath] = mtime, list, ftype
219 for x in range(0, len(list)):
220 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
221 ret_list.append(list[x])
222 ret_ftype.append(ftype[x])
223 elif (list[x] not in ignorelist):
224 ret_list.append(list[x])
225 ret_ftype.append(ftype[x])
227 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
228 return ret_list, ret_ftype
230 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
231 EmptyOnError=False, dirsonly=False):
233 Portage-specific implementation of os.listdir
235 @param mypath: Path whose contents you wish to list
237 @param recursive: Recursively scan directories contained within mypath
238 @type recursive: Boolean
239 @param filesonly; Only return files, not more directories
240 @type filesonly: Boolean
241 @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
242 @type ignorecvs: Boolean
243 @param ignorelist: List of filenames/directories to exclude
244 @type ignorelist: List
245 @param followSymlinks: Follow Symlink'd files and directories
246 @type followSymlinks: Boolean
247 @param EmptyOnError: Return [] if an error occurs.
248 @type EmptyOnError: Boolean
249 @param dirsonly: Only return directories.
250 @type dirsonly: Boolean
252 @returns: A list of files and directories (or just files or just directories) or an empty list.
255 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
262 if not (filesonly or dirsonly or recursive):
268 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
269 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
273 for y in range(0,len(l)):
274 l[y]=list[x]+"/"+l[y]
280 for x in range(0,len(ftype)):
282 rlist=rlist+[list[x]]
285 for x in range(0, len(ftype)):
287 rlist = rlist + [list[x]]
293 def flatten(mytokens):
294 """this function now turns a [1,[2,3]] list into
295 a [1,2,3] list and returns it."""
298 if type(x)==types.ListType:
299 newlist.extend(flatten(x))
304 #beautiful directed graph object
308 """Create an empty digraph"""
310 # { node : ( { child : priority } , { parent : priority } ) }
314 def add(self, node, parent, priority=0):
315 """Adds the specified node with the specified parent.
317 If the dep is a soft-dep and the node already has a hard
318 relationship to the parent, the relationship is left as hard."""
320 if node not in self.nodes:
321 self.nodes[node] = ({}, {})
322 self.order.append(node)
327 if parent not in self.nodes:
328 self.nodes[parent] = ({}, {})
329 self.order.append(parent)
331 if parent in self.nodes[node][1]:
332 if priority > self.nodes[node][1][parent]:
333 self.nodes[node][1][parent] = priority
335 self.nodes[node][1][parent] = priority
337 if node in self.nodes[parent][0]:
338 if priority > self.nodes[parent][0][node]:
339 self.nodes[parent][0][node] = priority
341 self.nodes[parent][0][node] = priority
343 def remove(self, node):
344 """Removes the specified node from the digraph, also removing
345 and ties to other nodes in the digraph. Raises KeyError if the
346 node doesn't exist."""
348 if node not in self.nodes:
351 for parent in self.nodes[node][1]:
352 del self.nodes[parent][0][node]
353 for child in self.nodes[node][0]:
354 del self.nodes[child][1][node]
357 self.order.remove(node)
359 def contains(self, node):
360 """Checks if the digraph contains mynode"""
361 return node in self.nodes
364 """Return a list of all nodes in the graph"""
367 def child_nodes(self, node, ignore_priority=None):
368 """Return all children of the specified node"""
369 if ignore_priority is None:
370 return self.nodes[node][0].keys()
372 for child, priority in self.nodes[node][0].iteritems():
373 if priority > ignore_priority:
374 children.append(child)
377 def parent_nodes(self, node):
378 """Return all parents of the specified node"""
379 return self.nodes[node][1].keys()
381 def leaf_nodes(self, ignore_priority=None):
382 """Return all nodes that have no children
384 If ignore_soft_deps is True, soft deps are not counted as
385 children in calculations."""
388 for node in self.order:
390 for child in self.nodes[node][0]:
391 if self.nodes[node][0][child] > ignore_priority:
395 leaf_nodes.append(node)
398 def root_nodes(self, ignore_priority=None):
399 """Return all nodes that have no parents.
401 If ignore_soft_deps is True, soft deps are not counted as
402 parents in calculations."""
405 for node in self.order:
407 for parent in self.nodes[node][1]:
408 if self.nodes[node][1][parent] > ignore_priority:
412 root_nodes.append(node)
416 """Checks if the digraph is empty"""
417 return len(self.nodes) == 0
421 clone.nodes = copy.deepcopy(self.nodes)
422 clone.order = self.order[:]
425 # Backward compatibility
428 allzeros = leaf_nodes
433 def delnode(self, node):
440 leaf_nodes = self.leaf_nodes()
445 def hasallzeros(self, ignore_priority=None):
446 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
449 def debug_print(self):
450 for node in self.nodes:
452 if self.nodes[node][0]:
455 print "(no children)"
456 for child in self.nodes[node][0]:
458 print "(%s)" % self.nodes[node][0][child]
461 _elog_atexit_handlers = []
462 def elog_process(cpv, mysettings):
463 mylogfiles = listdir(mysettings["T"]+"/logging/")
464 # shortcut for packages without any messages
465 if len(mylogfiles) == 0:
467 # exploit listdir() file order so we process log entries in chronological order
470 my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
472 msgfunction, msgtype = f.split(".")
473 if msgtype.upper() not in my_elog_classes \
474 and msgtype.lower() not in my_elog_classes:
476 if msgfunction not in portage_const.EBUILD_PHASES:
477 writemsg("!!! can't process invalid log file: %s\n" % f,
480 if not msgfunction in mylogentries:
481 mylogentries[msgfunction] = []
482 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
483 mylogentries[msgfunction].append((msgtype, msgcontent))
485 # in case the filters matched all messages
486 if len(mylogentries) == 0:
489 # generate a single string with all log messages
491 for phase in portage_const.EBUILD_PHASES:
492 if not phase in mylogentries:
494 for msgtype,msgcontent in mylogentries[phase]:
495 fulllog += "%s: %s\n" % (msgtype, phase)
496 for line in msgcontent:
500 # pass the processing to the individual modules
501 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
503 # - is nicer than _ for module names, so allow people to use it.
504 s = s.replace("-", "_")
506 # FIXME: ugly ad.hoc import code
507 # TODO: implement a common portage module loader
508 logmodule = __import__("elog_modules.mod_"+s)
509 m = getattr(logmodule, "mod_"+s)
510 def timeout_handler(signum, frame):
511 raise portage_exception.PortageException(
512 "Timeout in elog_process for system '%s'" % s)
514 signal.signal(signal.SIGALRM, timeout_handler)
515 # Timeout after one minute (in case something like the mail
519 m.process(mysettings, cpv, mylogentries, fulllog)
522 if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
523 _elog_atexit_handlers.append(m.finalize)
524 atexit_register(m.finalize, mysettings)
525 except (ImportError, AttributeError), e:
526 writemsg("!!! Error while importing logging modules " + \
527 "while loading \"mod_%s\":\n" % str(s))
528 writemsg("%s\n" % str(e), noiselevel=-1)
529 except portage_exception.PortageException, e:
530 writemsg("%s\n" % str(e), noiselevel=-1)
532 # clean logfiles to avoid repetitions
535 os.unlink(os.path.join(mysettings["T"], "logging", f))
539 #parse /etc/env.d and generate /etc/profile.env
541 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
542 if target_root is None:
545 if prev_mtimes is None:
547 prev_mtimes = mtimedb["ldpath"]
548 envd_dir = os.path.join(target_root, "etc", "env.d")
549 portage_util.ensure_dirs(envd_dir, mode=0755)
550 fns = listdir(envd_dir, EmptyOnError=1)
556 if not x[0].isdigit() or not x[1].isdigit():
558 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
564 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
565 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
566 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
567 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
568 "PYTHONPATH", "ROOTPATH"])
573 file_path = os.path.join(envd_dir, x)
575 myconfig = getconfig(file_path, expand=False)
576 except portage_exception.ParseError, e:
577 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
581 # broken symlink or file removed by a concurrent process
582 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
584 config_list.append(myconfig)
585 if "SPACE_SEPARATED" in myconfig:
586 space_separated.update(myconfig["SPACE_SEPARATED"].split())
587 del myconfig["SPACE_SEPARATED"]
588 if "COLON_SEPARATED" in myconfig:
589 colon_separated.update(myconfig["COLON_SEPARATED"].split())
590 del myconfig["COLON_SEPARATED"]
594 for var in space_separated:
596 for myconfig in config_list:
598 mylist.extend(filter(None, myconfig[var].split()))
599 del myconfig[var] # prepare for env.update(myconfig)
601 env[var] = " ".join(mylist)
602 specials[var] = mylist
604 for var in colon_separated:
606 for myconfig in config_list:
608 mylist.extend(filter(None, myconfig[var].split(":")))
609 del myconfig[var] # prepare for env.update(myconfig)
611 env[var] = ":".join(mylist)
612 specials[var] = mylist
614 for myconfig in config_list:
615 """Cumulative variables have already been deleted from myconfig so that
616 they won't be overwritten by this dict.update call."""
619 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
621 myld = open(ldsoconf_path)
622 myldlines=myld.readlines()
626 #each line has at least one char (a newline)
630 except (IOError, OSError), e:
631 if e.errno != errno.ENOENT:
635 ld_cache_update=False
637 newld = specials["LDPATH"]
639 #ld.so.conf needs updating and ldconfig needs to be run
640 myfd = atomic_ofstream(ldsoconf_path)
641 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
642 myfd.write("# contents of /etc/env.d directory\n")
643 for x in specials["LDPATH"]:
648 # Update prelink.conf if we are prelink-enabled
650 newprelink = atomic_ofstream(
651 os.path.join(target_root, "etc", "prelink.conf"))
652 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
653 newprelink.write("# contents of /etc/env.d directory\n")
655 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
656 newprelink.write("-l "+x+"\n");
657 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
663 for y in specials["PRELINK_PATH_MASK"]:
672 newprelink.write("-h "+x+"\n")
673 for x in specials["PRELINK_PATH_MASK"]:
674 newprelink.write("-b "+x+"\n")
677 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
678 # granularity is possible. In order to avoid the potential ambiguity of
679 # mtimes that differ by less than 1 second, sleep here if any of the
680 # directories have been modified during the current second.
681 sleep_for_mtime_granularity = False
682 current_time = long(time.time())
683 mtime_changed = False
685 for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
686 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
688 newldpathtime = long(os.stat(x).st_mtime)
689 lib_dirs.add(normalize_path(x))
691 if oe.errno == errno.ENOENT:
696 # ignore this path because it doesn't exist
699 if newldpathtime == current_time:
700 sleep_for_mtime_granularity = True
702 if prev_mtimes[x] == newldpathtime:
705 prev_mtimes[x] = newldpathtime
708 prev_mtimes[x] = newldpathtime
712 ld_cache_update = True
715 not ld_cache_update and \
716 contents is not None:
717 libdir_contents_changed = False
718 for mypath, mydata in contents.iteritems():
719 if mydata[0] not in ("obj","sym"):
721 head, tail = os.path.split(mypath)
723 libdir_contents_changed = True
725 if not libdir_contents_changed:
728 # Only run ldconfig as needed
729 if (ld_cache_update or makelinks):
730 # ldconfig has very different behaviour between FreeBSD and Linux
731 if ostype=="Linux" or ostype.lower().endswith("gnu"):
732 # We can't update links if we haven't cleaned other versions first, as
733 # an older package installed ON TOP of a newer version will cause ldconfig
734 # to overwrite the symlinks we just made. -X means no links. After 'clean'
735 # we can safely create links.
736 writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
738 commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
740 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
741 elif ostype in ("FreeBSD","DragonFly"):
742 writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
743 commands.getstatusoutput(
744 "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
745 (target_root, target_root))
747 del specials["LDPATH"]
749 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
750 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
751 cenvnotice = penvnotice[:]
752 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
753 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
755 #create /etc/profile.env for bash support
756 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
757 outfile.write(penvnotice)
759 env_keys = [ x for x in env if x != "LDPATH" ]
762 outfile.write("export %s='%s'\n" % (x, env[x]))
765 #create /etc/csh.env for (t)csh support
766 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
767 outfile.write(cenvnotice)
769 outfile.write("setenv %s '%s'\n" % (x, env[x]))
772 if sleep_for_mtime_granularity:
773 while current_time == long(time.time()):
776 def ExtractKernelVersion(base_dir):
778 Try to figure out what kernel version we are running
779 @param base_dir: Path to sources (usually /usr/src/linux)
780 @type base_dir: string
781 @rtype: tuple( version[string], error[string])
783 1. tuple( version[string], error[string])
784 Either version or error is populated (but never both)
788 pathname = os.path.join(base_dir, 'Makefile')
790 f = open(pathname, 'r')
791 except OSError, details:
792 return (None, str(details))
793 except IOError, details:
794 return (None, str(details))
798 lines.append(f.readline())
799 except OSError, details:
800 return (None, str(details))
801 except IOError, details:
802 return (None, str(details))
804 lines = [l.strip() for l in lines]
808 #XXX: The following code relies on the ordering of vars within the Makefile
810 # split on the '=' then remove annoying whitespace
811 items = line.split("=")
812 items = [i.strip() for i in items]
813 if items[0] == 'VERSION' or \
814 items[0] == 'PATCHLEVEL':
817 elif items[0] == 'SUBLEVEL':
819 elif items[0] == 'EXTRAVERSION' and \
820 items[-1] != items[0]:
823 # Grab a list of files named localversion* and sort them
824 localversions = os.listdir(base_dir)
825 for x in range(len(localversions)-1,-1,-1):
826 if localversions[x][:12] != "localversion":
830 # Append the contents of each to the version string, stripping ALL whitespace
831 for lv in localversions:
832 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
834 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
835 kernelconfig = getconfig(base_dir+"/.config")
836 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
837 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
839 return (version,None)
841 def autouse(myvartree, use_cache=1, mysettings=None):
843 autuse returns a list of USE variables auto-enabled to packages being installed
845 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
846 @type myvartree: vartree
847 @param use_cache: read values from cache
848 @type use_cache: Boolean
849 @param mysettings: Instance of config
850 @type mysettings: config
852 @returns: A string containing a list of USE variables that are enabled via use.defaults
854 if mysettings is None:
856 mysettings = settings
857 if mysettings.profile_path is None:
860 usedefaults = mysettings.use_defs
861 for myuse in usedefaults:
863 for mydep in usedefaults[myuse]:
864 if not myvartree.dep_match(mydep,use_cache=True):
868 myusevars += " "+myuse
871 def check_config_instance(test):
872 if not test or (str(test.__class__) != 'portage.config'):
873 raise TypeError, "Invalid type for config object: %s" % test.__class__
877 This class encompasses the main portage configuration. Data is pulled from
878 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
879 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
882 Generally if you need data like USE flags, FEATURES, environment variables,
883 virtuals ...etc you look in here.
886 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
887 config_incrementals=None, config_root=None, target_root=None,
890 @param clone: If provided, init will use deepcopy to copy by value the instance.
891 @type clone: Instance of config class.
892 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
893 and then calling instance.setcpv(mycpv).
895 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
896 @type config_profile_path: String
897 @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
898 @type config_incrementals: List
899 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
900 @type config_root: String
901 @param target_root: __init__ override of $ROOT env variable.
902 @type target_root: String
903 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
904 ignore local config (keywording and unmasking)
905 @type local_config: Boolean
908 debug = os.environ.get("PORTAGE_DEBUG") == "1"
910 self.already_in_regenerate = 0
915 self.modifiedkeys = []
920 self.dirVirtuals = None
923 # Virtuals obtained from the vartree
924 self.treeVirtuals = {}
925 # Virtuals by user specification. Includes negatives.
926 self.userVirtuals = {}
927 # Virtual negatives from user specifications.
928 self.negVirtuals = {}
930 self.user_profile_dir = None
931 self.local_config = local_config
934 self.incrementals = copy.deepcopy(clone.incrementals)
935 self.profile_path = copy.deepcopy(clone.profile_path)
936 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
937 self.local_config = copy.deepcopy(clone.local_config)
939 self.module_priority = copy.deepcopy(clone.module_priority)
940 self.modules = copy.deepcopy(clone.modules)
942 self.depcachedir = copy.deepcopy(clone.depcachedir)
944 self.packages = copy.deepcopy(clone.packages)
945 self.virtuals = copy.deepcopy(clone.virtuals)
947 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
948 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
949 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
951 self.use_defs = copy.deepcopy(clone.use_defs)
952 self.usemask = copy.deepcopy(clone.usemask)
953 self.usemask_list = copy.deepcopy(clone.usemask_list)
954 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
955 self.useforce = copy.deepcopy(clone.useforce)
956 self.useforce_list = copy.deepcopy(clone.useforce_list)
957 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
958 self.puse = copy.deepcopy(clone.puse)
959 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
960 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
961 self.mycpv = copy.deepcopy(clone.mycpv)
963 self.configlist = copy.deepcopy(clone.configlist)
964 self.lookuplist = self.configlist[:]
965 self.lookuplist.reverse()
967 "env.d": self.configlist[0],
968 "pkginternal": self.configlist[1],
969 "globals": self.configlist[2],
970 "defaults": self.configlist[3],
971 "conf": self.configlist[4],
972 "pkg": self.configlist[5],
973 "auto": self.configlist[6],
974 "backupenv": self.configlist[7],
975 "env": self.configlist[8] }
976 self.profiles = copy.deepcopy(clone.profiles)
977 self.backupenv = self.configdict["backupenv"]
978 self.pusedict = copy.deepcopy(clone.pusedict)
979 self.categories = copy.deepcopy(clone.categories)
980 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
981 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
982 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
983 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
984 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
985 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
986 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
987 self.features = copy.deepcopy(clone.features)
990 # backupenv is for calculated incremental variables.
991 self.backupenv = os.environ.copy()
993 def check_var_directory(varname, var):
994 if not os.path.isdir(var):
995 writemsg(("!!! Error: %s='%s' is not a directory. " + \
996 "Please correct this.\n") % (varname, var),
998 raise portage_exception.DirectoryNotFound(var)
1000 if config_root is None:
1004 normalize_path(config_root).rstrip(os.path.sep) + os.path.sep
1006 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1008 self.depcachedir = DEPCACHE_PATH
1010 if not config_profile_path:
1011 config_profile_path = \
1012 os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
1013 if os.path.isdir(config_profile_path):
1014 self.profile_path = config_profile_path
1016 self.profile_path = None
1018 self.profile_path = config_profile_path[:]
1020 if not config_incrementals:
1021 writemsg("incrementals not specified to class config\n")
1022 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
1024 self.incrementals = copy.deepcopy(config_incrementals)
1026 self.module_priority = ["user","default"]
1028 self.modules["user"] = getconfig(
1029 os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
1030 if self.modules["user"] is None:
1031 self.modules["user"] = {}
1032 self.modules["default"] = {
1033 "portdbapi.metadbmodule": "cache.metadata.database",
1034 "portdbapi.auxdbmodule": "cache.flat_hash.database",
1040 # back up our incremental variables:
1042 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1043 self.configlist.append({})
1044 self.configdict["env.d"] = self.configlist[-1]
1046 self.configlist.append({})
1047 self.configdict["pkginternal"] = self.configlist[-1]
1049 # The symlink might not exist or might not be a symlink.
1050 if self.profile_path is None:
1054 def addProfile(currentPath):
1055 parentsFile = os.path.join(currentPath, "parent")
1056 if os.path.exists(parentsFile):
1057 parents = grabfile(parentsFile)
1059 raise portage_exception.ParseError(
1060 "Empty parent file: '%s'" % parents_file)
1061 for parentPath in parents:
1062 parentPath = normalize_path(os.path.join(
1063 currentPath, parentPath))
1064 if os.path.exists(parentPath):
1065 addProfile(parentPath)
1067 raise portage_exception.ParseError(
1068 "Parent '%s' not found: '%s'" % \
1069 (parentPath, parentsFile))
1070 self.profiles.append(currentPath)
1071 addProfile(os.path.realpath(self.profile_path))
1073 custom_prof = os.path.join(
1074 config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
1075 if os.path.exists(custom_prof):
1076 self.user_profile_dir = custom_prof
1077 self.profiles.append(custom_prof)
1080 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1081 self.packages = stack_lists(self.packages_list, incremental=1)
1082 del self.packages_list
1083 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1086 self.prevmaskdict={}
1087 for x in self.packages:
1088 mycatpkg=dep_getkey(x)
1089 if not self.prevmaskdict.has_key(mycatpkg):
1090 self.prevmaskdict[mycatpkg]=[x]
1092 self.prevmaskdict[mycatpkg].append(x)
1094 # get profile-masked use flags -- INCREMENTAL Child over parent
1095 self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
1096 for x in self.profiles]
1097 self.usemask = set(stack_lists(
1098 self.usemask_list, incremental=True))
1099 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1100 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1103 self.pusemask_list = []
1104 rawpusemask = [grabdict_package(
1105 os.path.join(x, "package.use.mask")) \
1106 for x in self.profiles]
1107 for i in xrange(len(self.profiles)):
1109 for k, v in rawpusemask[i].iteritems():
1110 cpdict.setdefault(dep_getkey(k), {})[k] = v
1111 self.pusemask_list.append(cpdict)
1114 self.pkgprofileuse = []
1115 rawprofileuse = [grabdict_package(
1116 os.path.join(x, "package.use"), juststrings=True) \
1117 for x in self.profiles]
1118 for i in xrange(len(self.profiles)):
1120 for k, v in rawprofileuse[i].iteritems():
1121 cpdict.setdefault(dep_getkey(k), {})[k] = v
1122 self.pkgprofileuse.append(cpdict)
1125 self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
1126 for x in self.profiles]
1127 self.useforce = set(stack_lists(
1128 self.useforce_list, incremental=True))
1130 self.puseforce_list = []
1131 rawpuseforce = [grabdict_package(
1132 os.path.join(x, "package.use.force")) \
1133 for x in self.profiles]
1134 for i in xrange(len(self.profiles)):
1136 for k, v in rawpuseforce[i].iteritems():
1137 cpdict.setdefault(dep_getkey(k), {})[k] = v
1138 self.puseforce_list.append(cpdict)
1142 self.mygcfg = getconfig(os.path.join(config_root, "etc", "make.globals"))
1144 if self.mygcfg is None:
1146 except SystemExit, e:
1148 except Exception, e:
1151 writemsg("!!! %s\n" % (e), noiselevel=-1)
1152 if not isinstance(e, EnvironmentError):
1153 writemsg("!!! Incorrect multiline literals can cause " + \
1154 "this. Do not use them.\n", noiselevel=-1)
1156 self.configlist.append(self.mygcfg)
1157 self.configdict["globals"]=self.configlist[-1]
1159 self.make_defaults_use = []
1163 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1164 for cfg in mygcfg_dlists:
1166 self.make_defaults_use.append(cfg.get("USE", ""))
1168 self.make_defaults_use.append("")
1169 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1170 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1171 if self.mygcfg is None:
1173 except SystemExit, e:
1175 except Exception, e:
1178 writemsg("!!! %s\n" % (e), noiselevel=-1)
1179 if not isinstance(e, EnvironmentError):
1180 writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
1181 "emerge sync' may fix this. If it does\n",
1183 writemsg("!!! not then please report this to " + \
1184 "bugs.gentoo.org and, if possible, a dev\n",
1186 writemsg("!!! on #gentoo (irc.freenode.org)\n",
1189 self.configlist.append(self.mygcfg)
1190 self.configdict["defaults"]=self.configlist[-1]
1193 self.mygcfg = getconfig(
1194 os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
1195 allow_sourcing=True)
1196 if self.mygcfg is None:
1198 except SystemExit, e:
1200 except Exception, e:
1203 writemsg("!!! %s\n" % (e), noiselevel=-1)
1204 if not isinstance(e, EnvironmentError):
1205 writemsg("!!! Incorrect multiline literals can cause " + \
1206 "this. Do not use them.\n", noiselevel=-1)
1209 # Allow ROOT setting to come from make.conf if it's not overridden
1210 # by the constructor argument (from the calling environment). As a
1211 # special exception for a very common use case, config_root == "/"
1212 # implies that ROOT in make.conf should be ignored. That way, the
1213 # user can chroot into $ROOT and the ROOT setting in make.conf will
1214 # be automatically ignored (unless config_root is other than "/").
1215 if config_root != "/" and \
1216 target_root is None and "ROOT" in self.mygcfg:
1217 target_root = self.mygcfg["ROOT"]
1219 self.configlist.append(self.mygcfg)
1220 self.configdict["conf"]=self.configlist[-1]
1222 self.configlist.append({})
1223 self.configdict["pkg"]=self.configlist[-1]
1226 self.configlist.append({})
1227 self.configdict["auto"]=self.configlist[-1]
1229 self.configlist.append(self.backupenv) # XXX Why though?
1230 self.configdict["backupenv"]=self.configlist[-1]
1232 self.configlist.append(os.environ.copy())
1233 self.configdict["env"]=self.configlist[-1]
1236 # make lookuplist for loading package.*
1237 self.lookuplist=self.configlist[:]
1238 self.lookuplist.reverse()
1240 # Blacklist vars that could interfere with portage internals.
1241 for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]:
1242 for cfg in self.lookuplist:
1244 del cfg[blacklisted]
1247 del blacklisted, cfg
1249 if target_root is None:
1253 normalize_path(target_root).rstrip(os.path.sep) + os.path.sep
1255 check_var_directory("ROOT", target_root)
1258 os.path.join(target_root, "etc", "profile.env"), expand=False)
1259 # env_d will be None if profile.env doesn't exist.
1261 self.configdict["env.d"].update(env_d)
1262 # Remove duplicate values so they don't override updated
1263 # profile.env values later (profile.env is reloaded in each
1264 # call to self.regenerate).
1265 for cfg in (self.configdict["backupenv"],
1266 self.configdict["env"]):
1267 for k, v in env_d.iteritems():
1275 self["PORTAGE_CONFIGROOT"] = config_root
1276 self.backup_changes("PORTAGE_CONFIGROOT")
1277 self["ROOT"] = target_root
1278 self.backup_changes("ROOT")
1281 self.pkeywordsdict = {}
1282 self.punmaskdict = {}
1283 abs_user_config = os.path.join(config_root,
1284 USER_CONFIG_PATH.lstrip(os.path.sep))
1286 # locations for "categories" and "arch.list" files
1287 locations = [os.path.join(self["PORTDIR"], "profiles")]
1288 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1289 pmask_locations.extend(self.profiles)
1291 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1292 special cases are needed here."""
1293 overlay_profiles = []
1294 for ov in self["PORTDIR_OVERLAY"].split():
1295 ov = normalize_path(ov)
1296 profiles_dir = os.path.join(ov, "profiles")
1297 if os.path.isdir(profiles_dir):
1298 overlay_profiles.append(profiles_dir)
1299 locations += overlay_profiles
1301 pmask_locations.extend(overlay_profiles)
1304 locations.append(abs_user_config)
1305 pmask_locations.append(abs_user_config)
1306 pusedict = grabdict_package(
1307 os.path.join(abs_user_config, "package.use"), recursive=1)
1308 for key in pusedict.keys():
1309 cp = dep_getkey(key)
1310 if not self.pusedict.has_key(cp):
1311 self.pusedict[cp] = {}
1312 self.pusedict[cp][key] = pusedict[key]
1315 pkgdict = grabdict_package(
1316 os.path.join(abs_user_config, "package.keywords"),
1318 for key in pkgdict.keys():
1319 # default to ~arch if no specific keyword is given
1320 if not pkgdict[key]:
1322 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1323 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1326 for keyword in groups:
1327 if not keyword[0] in "~-":
1328 mykeywordlist.append("~"+keyword)
1329 pkgdict[key] = mykeywordlist
1330 cp = dep_getkey(key)
1331 if not self.pkeywordsdict.has_key(cp):
1332 self.pkeywordsdict[cp] = {}
1333 self.pkeywordsdict[cp][key] = pkgdict[key]
1336 pkgunmasklines = grabfile_package(
1337 os.path.join(abs_user_config, "package.unmask"),
1339 for x in pkgunmasklines:
1340 mycatpkg=dep_getkey(x)
1341 if self.punmaskdict.has_key(mycatpkg):
1342 self.punmaskdict[mycatpkg].append(x)
1344 self.punmaskdict[mycatpkg]=[x]
1346 #getting categories from an external file now
1347 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1348 self.categories = stack_lists(categories, incremental=1)
1351 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1352 archlist = stack_lists(archlist, incremental=1)
1353 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1357 for x in pmask_locations:
1358 pkgmasklines.append(grabfile_package(
1359 os.path.join(x, "package.mask"), recursive=1))
1360 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1363 for x in pkgmasklines:
1364 mycatpkg=dep_getkey(x)
1365 if self.pmaskdict.has_key(mycatpkg):
1366 self.pmaskdict[mycatpkg].append(x)
1368 self.pmaskdict[mycatpkg]=[x]
1370 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1371 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1372 has_invalid_data = False
1373 for x in range(len(pkgprovidedlines)-1, -1, -1):
1374 myline = pkgprovidedlines[x]
1375 if not isvalidatom("=" + myline):
1376 writemsg("Invalid package name in package.provided:" + \
1377 " %s\n" % myline, noiselevel=-1)
1378 has_invalid_data = True
1379 del pkgprovidedlines[x]
1381 cpvr = catpkgsplit(pkgprovidedlines[x])
1382 if not cpvr or cpvr[0] == "null":
1383 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
1385 has_invalid_data = True
1386 del pkgprovidedlines[x]
1388 if cpvr[0] == "virtual":
1389 writemsg("Virtual package in package.provided: %s\n" % \
1390 myline, noiselevel=-1)
1391 has_invalid_data = True
1392 del pkgprovidedlines[x]
1394 if has_invalid_data:
1395 writemsg("See portage(5) for correct package.provided usage.\n",
1397 self.pprovideddict = {}
1398 for x in pkgprovidedlines:
1402 mycatpkg=dep_getkey(x)
1403 if self.pprovideddict.has_key(mycatpkg):
1404 self.pprovideddict[mycatpkg].append(x)
1406 self.pprovideddict[mycatpkg]=[x]
1408 # reasonable defaults; this is important as without USE_ORDER,
1409 # USE will always be "" (nothing set)!
1410 if "USE_ORDER" not in self:
1411 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
1413 self["PORTAGE_GID"] = str(portage_gid)
1414 self.backup_changes("PORTAGE_GID")
1416 if self.get("PORTAGE_DEPCACHEDIR", None):
1417 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1418 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1419 self.backup_changes("PORTAGE_DEPCACHEDIR")
1421 overlays = self.get("PORTDIR_OVERLAY","").split()
1425 ov = normalize_path(ov)
1426 if os.path.isdir(ov):
1429 writemsg("!!! Invalid PORTDIR_OVERLAY" + \
1430 " (not a dir): '%s'\n" % ov, noiselevel=-1)
1431 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1432 self.backup_changes("PORTDIR_OVERLAY")
1434 if "CBUILD" not in self and "CHOST" in self:
1435 self["CBUILD"] = self["CHOST"]
1436 self.backup_changes("CBUILD")
1438 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1439 self.backup_changes("PORTAGE_BIN_PATH")
1440 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1441 self.backup_changes("PORTAGE_PYM_PATH")
1443 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1445 self[var] = str(int(self.get(var, "0")))
1447 writemsg(("!!! %s='%s' is not a valid integer. " + \
1448 "Falling back to '0'.\n") % (var, self[var]),
1451 self.backup_changes(var)
1454 self.features = portage_util.unique_array(self["FEATURES"].split())
1456 if "gpg" in self.features:
1457 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
1458 not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1459 writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
1460 " Removing gpg from FEATURES.\n"), noiselevel=-1)
1461 self.features.remove("gpg")
1463 if not portage_exec.sandbox_capable and \
1464 ("sandbox" in self.features or "usersandbox" in self.features):
1465 if self.profile_path is not None and \
1466 os.path.realpath(self.profile_path) == \
1467 os.path.realpath(PROFILE_PATH):
1468 """ Don't show this warning when running repoman and the
1469 sandbox feature came from a profile that doesn't belong to
1471 writemsg(colorize("BAD", "!!! Problem with sandbox" + \
1472 " binary. Disabling...\n\n"), noiselevel=-1)
1473 if "sandbox" in self.features:
1474 self.features.remove("sandbox")
1475 if "usersandbox" in self.features:
1476 self.features.remove("usersandbox")
1478 self.features.sort()
1479 self["FEATURES"] = " ".join(self.features)
1480 self.backup_changes("FEATURES")
1487 def _init_dirs(self):
1489 Create a few directories that are critical to portage operation
1491 if not os.access(self["ROOT"], os.W_OK):
1495 "tmp" :(-1, 01777, 0),
1496 "var/tmp" :(-1, 01777, 0),
1497 "var/lib/portage" :(portage_gid, 02750, 02),
1498 "var/cache/edb" :(portage_gid, 0755, 02)
1501 for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
1503 mydir = os.path.join(self["ROOT"], mypath)
1504 portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1505 except portage_exception.PortageException, e:
1506 writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
1508 writemsg("!!! %s\n" % str(e),
1512 """Validate miscellaneous settings and display warnings if necessary.
1513 (This code was previously in the global scope of portage.py)"""
1515 groups = self["ACCEPT_KEYWORDS"].split()
1516 archlist = self.archlist()
1518 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
1520 for group in groups:
1521 if group not in archlist and group[0] != '-':
1522 writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
1525 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1526 PROFILE_PATH.lstrip(os.path.sep))
1527 if not os.path.islink(abs_profile_path) and \
1528 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1529 os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
1530 writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
1532 writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
1533 writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
1535 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1536 USER_VIRTUALS_FILE.lstrip(os.path.sep))
1537 if os.path.exists(abs_user_virtuals):
1538 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1539 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1540 writemsg("!!! this new location.\n\n")
1542 def loadVirtuals(self,root):
1543 """Not currently used by portage."""
1544 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1545 self.getvirtuals(root)
1547 def load_best_module(self,property_string):
1548 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1550 mod = load_mod(best_mod)
1552 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1562 def modifying(self):
1564 raise Exception, "Configuration is locked."
1566 def backup_changes(self,key=None):
1568 if key and self.configdict["env"].has_key(key):
1569 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1571 raise KeyError, "No such key defined in environment: %s" % key
1573 def reset(self,keeping_pkg=0,use_cache=1):
1575 Restore environment from self.backupenv, call self.regenerate()
1576 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1577 @type keeping_pkg: Boolean
1578 @param use_cache: Should self.regenerate use the cache or not
1579 @type use_cache: Boolean
1583 self.configdict["env"].clear()
1584 self.configdict["env"].update(self.backupenv)
1586 self.modifiedkeys = []
1590 self.configdict["pkg"].clear()
1591 self.configdict["pkginternal"].clear()
1592 self.configdict["defaults"]["USE"] = \
1593 " ".join(self.make_defaults_use)
1594 self.usemask = set(stack_lists(
1595 self.usemask_list, incremental=True))
1596 self.useforce = set(stack_lists(
1597 self.useforce_list, incremental=True))
1598 self.regenerate(use_cache=use_cache)
1600 def load_infodir(self,infodir):
1602 if self.configdict.has_key("pkg"):
1603 for x in self.configdict["pkg"].keys():
1604 del self.configdict["pkg"][x]
1606 writemsg("No pkg setup for settings instance?\n",
1610 if os.path.exists(infodir):
1611 if os.path.exists(infodir+"/environment"):
1612 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1614 myre = re.compile('^[A-Z]+$')
1616 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1617 if myre.match(filename):
1619 file_path = os.path.join(infodir, filename)
1620 mydata = open(file_path).read().strip()
1621 if len(mydata) < 2048 or filename == "USE":
1622 if null_byte in mydata:
1623 writemsg("!!! Null byte found in metadata " + \
1624 "file: '%s'\n" % file_path, noiselevel=-1)
1626 if filename == "USE":
1627 binpkg_flags = "-* " + mydata
1628 self.configdict["pkg"][filename] = binpkg_flags
1629 self.configdict["env"][filename] = mydata
1631 self.configdict["pkg"][filename] = mydata
1632 self.configdict["env"][filename] = mydata
1633 # CATEGORY is important because it's used in doebuild
1634 # to infer the cpv. If it's corrupted, it leads to
1635 # strange errors later on, so we'll validate it and
1636 # print a warning if necessary.
1637 if filename == "CATEGORY":
1638 matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
1639 if not matchobj or matchobj.start() != 0 or \
1640 matchobj.end() != len(mydata):
1641 writemsg("!!! CATEGORY file is corrupt: %s\n" % \
1642 os.path.join(infodir, filename), noiselevel=-1)
1643 except (OSError, IOError):
1644 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
1650 def setcpv(self, mycpv, use_cache=1, mydb=None):
1652 Load a particular CPV into the config, this lets us see the
1653 Default USE flags for a particular ebuild as well as the USE
1654 flags from package.use.
1656 @param mycpv: A cpv to load
1658 @param use_cache: Enables caching
1659 @type use_cache: Boolean
1660 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1661 @type mydb: dbapi or derivative.
1666 if self.mycpv == mycpv:
1670 cp = dep_getkey(mycpv)
1673 pkginternaluse = " ".join([x[1:] \
1674 for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \
1675 if x.startswith("+")])
1676 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1677 self.configdict["pkginternal"]["USE"] = pkginternaluse
1680 for i in xrange(len(self.profiles)):
1681 defaults.append(self.make_defaults_use[i])
1682 cpdict = self.pkgprofileuse[i].get(cp, None)
1684 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1686 defaults.append(cpdict[best_match])
1687 defaults = " ".join(defaults)
1688 if defaults != self.configdict["defaults"].get("USE",""):
1689 self.configdict["defaults"]["USE"] = defaults
1692 for i in xrange(len(self.profiles)):
1693 useforce.append(self.useforce_list[i])
1694 cpdict = self.puseforce_list[i].get(cp, None)
1696 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1698 useforce.append(cpdict[best_match])
1699 useforce = set(stack_lists(useforce, incremental=True))
1700 if useforce != self.useforce:
1701 self.useforce = useforce
1704 for i in xrange(len(self.profiles)):
1705 usemask.append(self.usemask_list[i])
1706 cpdict = self.pusemask_list[i].get(cp, None)
1708 best_match = best_match_to_list(self.mycpv, cpdict.keys())
1710 usemask.append(cpdict[best_match])
1711 usemask = set(stack_lists(usemask, incremental=True))
1712 if usemask != self.usemask:
1713 self.usemask = usemask
1717 if self.pusedict.has_key(cp):
1718 self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1720 self.puse = " ".join(self.pusedict[cp][self.pusekey])
1721 if oldpuse != self.puse:
1723 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1724 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1725 # CATEGORY is essential for doebuild calls
1726 self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
1728 self.reset(keeping_pkg=1,use_cache=use_cache)
1730 def setinst(self,mycpv,mydbapi):
1732 if len(self.virtuals) == 0:
1734 # Grab the virtuals this package provides and add them into the tree virtuals.
1735 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1736 if isinstance(mydbapi, portdbapi):
1739 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1740 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1742 cp = dep_getkey(mycpv)
1744 virt = dep_getkey(virt)
1745 if not self.treeVirtuals.has_key(virt):
1746 self.treeVirtuals[virt] = []
1747 # XXX: Is this bad? -- It's a permanent modification
1748 if cp not in self.treeVirtuals[virt]:
1749 self.treeVirtuals[virt].append(cp)
1751 self.virtuals = self.__getvirtuals_compile()
1754 def regenerate(self,useonly=0,use_cache=1):
1757 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
1758 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
1759 variables. This also updates the env.d configdict; useful in case an ebuild
1760 changes the environment.
1762 If FEATURES has already stacked, it is not stacked twice.
1764 @param useonly: Only regenerate USE flags (not any other incrementals)
1765 @type useonly: Boolean
1766 @param use_cache: Enable Caching (only for autouse)
1767 @type use_cache: Boolean
1772 if self.already_in_regenerate:
1773 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1774 writemsg("!!! Looping in regenerate.\n",1)
1777 self.already_in_regenerate = 1
1779 # We grab the latest profile.env here since it changes frequently.
1780 self.configdict["env.d"].clear()
1782 os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
1784 # env_d will be None if profile.env doesn't exist.
1785 self.configdict["env.d"].update(env_d)
1788 myincrementals=["USE"]
1790 myincrementals = self.incrementals
1791 myincrementals = set(myincrementals)
1792 # If self.features exists, it has already been stacked and may have
1793 # been mutated, so don't stack it again or else any mutations will be
1795 if "FEATURES" in myincrementals and hasattr(self, "features"):
1796 myincrementals.remove("FEATURES")
1798 if "USE" in myincrementals:
1799 # Process USE last because it depends on USE_EXPAND which is also
1801 myincrementals.remove("USE")
1803 for mykey in myincrementals:
1805 mydbs=self.configlist[:-1]
1809 if mykey not in curdb:
1811 #variables are already expanded
1812 mysplit = curdb[mykey].split()
1816 # "-*" is a special "minus" var that means "unset all settings".
1817 # so USE="-* gnome" will have *just* gnome enabled.
1822 # Not legal. People assume too much. Complain.
1823 writemsg(red("USE flags should not start with a '+': %s\n" % x),
1830 if (x[1:] in myflags):
1832 del myflags[myflags.index(x[1:])]
1835 # We got here, so add it now.
1836 if x not in myflags:
1840 #store setting in last element of configlist, the original environment:
1841 if myflags or mykey in self:
1842 self.configlist[-1][mykey] = " ".join(myflags)
1845 # Do the USE calculation last because it depends on USE_EXPAND.
1846 if "auto" in self["USE_ORDER"].split(":"):
1847 self.configdict["auto"]["USE"] = autouse(
1848 vartree(root=self["ROOT"], categories=self.categories,
1850 use_cache=use_cache, mysettings=self)
1852 self.configdict["auto"]["USE"] = ""
1854 use_expand_protected = []
1855 use_expand = self.get("USE_EXPAND", "").split()
1856 for var in use_expand:
1857 var_lower = var.lower()
1858 for x in self.get(var, "").split():
1859 # Any incremental USE_EXPAND variables have already been
1860 # processed, so leading +/- operators are invalid here.
1862 writemsg(colorize("BAD", "Invalid '+' operator in " + \
1863 "non-incremental variable '%s': '%s'\n" % (var, x)),
1867 writemsg(colorize("BAD", "Invalid '-' operator in " + \
1868 "non-incremental variable '%s': '%s'\n" % (var, x)),
1871 mystr = var_lower + "_" + x
1872 if mystr not in use_expand_protected:
1873 use_expand_protected.append(mystr)
1876 for x in self["USE_ORDER"].split(":"):
1877 if x in self.configdict:
1878 self.uvlist.append(self.configdict[x])
1879 self.uvlist.reverse()
1881 myflags = use_expand_protected[:]
1882 for curdb in self.uvlist:
1883 if "USE" not in curdb:
1885 mysplit = curdb["USE"].split()
1888 myflags = use_expand_protected[:]
1892 writemsg(colorize("BAD", "USE flags should not start " + \
1893 "with a '+': %s\n" % x), noiselevel=-1)
1900 myflags.remove(x[1:])
1905 if x not in myflags:
1908 myflags = set(myflags)
1909 myflags.update(self.useforce)
1911 # FEATURES=test should imply USE=test
1912 if "test" in self.configlist[-1].get("FEATURES","").split():
1915 usesplit = [ x for x in myflags if \
1916 x not in self.usemask]
1920 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1921 # that they are consistent.
1922 for var in use_expand:
1923 prefix = var.lower() + "_"
1924 prefix_len = len(prefix)
1925 expand_flags = set([ x[prefix_len:] for x in usesplit \
1926 if x.startswith(prefix) ])
1927 var_split = self.get(var, "").split()
1928 # Preserve the order of var_split because it can matter for things
1930 var_split = [ x for x in var_split if x in expand_flags ]
1931 var_split.extend(expand_flags.difference(var_split))
1932 if var_split or var in self:
1933 # Don't export empty USE_EXPAND vars unless the user config
1934 # exports them as empty. This is required for vars such as
1935 # LINGUAS, where unset and empty have different meanings.
1936 self[var] = " ".join(var_split)
1938 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1939 if self.configdict["defaults"].has_key("ARCH"):
1940 if self.configdict["defaults"]["ARCH"]:
1941 if self.configdict["defaults"]["ARCH"] not in usesplit:
1942 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1944 self.configlist[-1]["USE"]= " ".join(usesplit)
1946 self.already_in_regenerate = 0
1948 def get_virts_p(self, myroot):
1951 virts = self.getvirtuals(myroot)
1953 myvkeys = virts.keys()
1955 vkeysplit = x.split("/")
1956 if not self.virts_p.has_key(vkeysplit[1]):
1957 self.virts_p[vkeysplit[1]] = virts[x]
1960 def getvirtuals(self, myroot=None):
1961 """myroot is now ignored because, due to caching, it has always been
1962 broken for all but the first call."""
1963 myroot = self["ROOT"]
1965 return self.virtuals
1968 for x in self.profiles:
1969 virtuals_file = os.path.join(x, "virtuals")
1970 virtuals_dict = grabdict(virtuals_file)
1971 for k in virtuals_dict.keys():
1972 if not isvalidatom(k) or dep_getkey(k) != k:
1973 writemsg("--- Invalid virtuals atom in %s: %s\n" % \
1974 (virtuals_file, k), noiselevel=-1)
1975 del virtuals_dict[k]
1977 myvalues = virtuals_dict[k]
1980 if x.startswith("-"):
1981 # allow incrementals
1983 if not isvalidatom(myatom):
1984 writemsg("--- Invalid atom in %s: %s\n" % \
1985 (virtuals_file, x), noiselevel=-1)
1988 del virtuals_dict[k]
1990 virtuals_list.append(virtuals_dict)
1992 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
1995 for virt in self.dirVirtuals:
1996 # Preference for virtuals decreases from left to right.
1997 self.dirVirtuals[virt].reverse()
1999 # Repoman does not use user or tree virtuals.
2000 if self.local_config and not self.treeVirtuals:
2001 temp_vartree = vartree(myroot, None,
2002 categories=self.categories, settings=self)
2003 # Reduce the provides into a list by CP.
2004 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
2006 self.virtuals = self.__getvirtuals_compile()
2007 return self.virtuals
2009 def __getvirtuals_compile(self):
2010 """Stack installed and profile virtuals. Preference for virtuals
2011 decreases from left to right.
2012 Order of preference:
2013 1. installed and in profile
2018 # Virtuals by profile+tree preferences.
2021 for virt, installed_list in self.treeVirtuals.iteritems():
2022 profile_list = self.dirVirtuals.get(virt, None)
2023 if not profile_list:
2025 for cp in installed_list:
2026 if cp in profile_list:
2027 ptVirtuals.setdefault(virt, [])
2028 ptVirtuals[virt].append(cp)
2030 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2034 def __delitem__(self,mykey):
2036 for x in self.lookuplist:
2041 def __getitem__(self,mykey):
2043 for x in self.lookuplist:
2045 writemsg("!!! lookuplist is null.\n")
2046 elif x.has_key(mykey):
2051 def has_key(self,mykey):
2052 for x in self.lookuplist:
2053 if x.has_key(mykey):
2057 def __contains__(self, mykey):
2058 """Called to implement membership test operators (in and not in)."""
2059 return bool(self.has_key(mykey))
2061 def setdefault(self, k, x=None):
2068 def get(self, k, x=None):
2075 return unique_array(flatten([x.keys() for x in self.lookuplist]))
2077 def __setitem__(self,mykey,myvalue):
2078 "set a value; will be thrown away at reset() time"
2079 if type(myvalue) != types.StringType:
2080 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2082 self.modifiedkeys += [mykey]
2083 self.configdict["env"][mykey]=myvalue
2086 "return our locally-maintained environment"
2088 for x in self.keys():
2090 if not isinstance(myvalue, basestring):
2091 writemsg("!!! Non-string value in config: %s=%s\n" % \
2092 (x, myvalue), noiselevel=-1)
2095 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
2096 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2097 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2101 def thirdpartymirrors(self):
2102 if getattr(self, "_thirdpartymirrors", None) is None:
2103 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
2104 for x in self["PORTDIR_OVERLAY"].split():
2105 profileroots.insert(0, os.path.join(x, "profiles"))
2106 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
2107 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
2108 return self._thirdpartymirrors
2111 return flatten([[myarch, "~" + myarch] \
2112 for myarch in self["PORTAGE_ARCHLIST"].split()])
2114 def selinux_enabled(self):
2115 if getattr(self, "_selinux_enabled", None) is None:
2116 self._selinux_enabled = 0
2117 if "selinux" in self["USE"].split():
2118 if "selinux" in globals():
2119 if selinux.is_selinux_enabled() == 1:
2120 self._selinux_enabled = 1
2122 self._selinux_enabled = 0
2124 writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
2126 self._selinux_enabled = 0
2127 if self._selinux_enabled == 0:
2129 del sys.modules["selinux"]
2132 return self._selinux_enabled
2134 # XXX This would be to replace getstatusoutput completely.
2135 # XXX Issue: cannot block execution. Deadlock condition.
2136 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
2138 Spawn a subprocess with extra portage-specific options.
2141 Sandbox: Sandbox means the spawned process will be limited in its ability t
2142 read and write files (normally this means it is restricted to ${IMAGE}/)
2143 SElinux Sandbox: Enables sandboxing on SElinux
2144 Reduced Privileges: Drops privilages such that the process runs as portage:portage
2147 Notes: os.system cannot be used because it messes with signal handling. Instead we
2148 use the portage_exec spawn* family of functions.
2150 This function waits for the process to terminate.
2152 @param mystring: Command to run
2153 @type mystring: String
2154 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
2155 @type mysettings: Dictionary or config instance
2156 @param debug: Ignored
2157 @type debug: Boolean
2158 @param free: Enable sandboxing for this process
2160 @param droppriv: Drop to portage:portage when running this command
2161 @type droppriv: Boolean
2162 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
2163 @type sesandbox: Boolean
2164 @param keywords: Extra options encoded as a dict, to be passed to spawn
2165 @type keywords: Dictionary
2168 1. The return code of the spawned process.
2171 if type(mysettings) == types.DictType:
2173 keywords["opt_name"]="[ %s ]" % "portage"
2175 check_config_instance(mysettings)
2176 env=mysettings.environ()
2177 keywords["opt_name"]="[%s]" % mysettings["PF"]
2179 # The default policy for the sesandbox domain only allows entry (via exec)
2180 # from shells and from binaries that belong to portage (the number of entry
2181 # points is minimized). The "tee" binary is not among the allowed entry
2182 # points, so it is spawned outside of the sesandbox domain and reads from a
2183 # pipe between two domains.
2184 logfile = keywords.get("logfile")
2188 del keywords["logfile"]
2189 fd_pipes = keywords.get("fd_pipes")
2190 if fd_pipes is None:
2191 fd_pipes = {0:0, 1:1, 2:2}
2192 elif 1 not in fd_pipes or 2 not in fd_pipes:
2193 raise ValueError(fd_pipes)
2195 mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
2196 returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
2200 keywords["fd_pipes"] = fd_pipes
2202 features = mysettings.features
2203 # XXX: Negative RESTRICT word
2204 droppriv=(droppriv and ("userpriv" in features) and not \
2205 (("nouserpriv" in mysettings["RESTRICT"].split()) or \
2206 ("userpriv" in mysettings["RESTRICT"].split())))
2208 if droppriv and not uid and portage_gid and portage_uid:
2209 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
2212 free=((droppriv and "usersandbox" not in features) or \
2213 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
2216 keywords["opt_name"] += " bash"
2217 spawn_func = portage_exec.spawn_bash
2219 keywords["opt_name"] += " sandbox"
2220 spawn_func = portage_exec.spawn_sandbox
2223 con = selinux.getcontext()
2224 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
2225 selinux.setexec(con)
2227 returnpid = keywords.get("returnpid")
2228 keywords["returnpid"] = True
2230 mypids.extend(spawn_func(mystring, env=env, **keywords))
2235 selinux.setexec(None)
2242 retval = os.waitpid(pid, 0)[1]
2243 portage_exec.spawned_pids.remove(pid)
2244 if retval != os.EX_OK:
2246 if os.waitpid(pid, os.WNOHANG) == (0,0):
2247 os.kill(pid, signal.SIGTERM)
2249 portage_exec.spawned_pids.remove(pid)
2251 return (retval & 0xff) << 8
2255 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
2256 "fetch files. Will use digest file if available."
2258 features = mysettings.features
2259 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
2260 if ("mirror" in mysettings["RESTRICT"].split()) or \
2261 ("nomirror" in mysettings["RESTRICT"].split()):
2262 if ("mirror" in features) and ("lmirror" not in features):
2263 # lmirror should allow you to bypass mirror restrictions.
2264 # XXX: This is not a good thing, and is temporary at best.
2265 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
2268 thirdpartymirrors = mysettings.thirdpartymirrors()
2270 check_config_instance(mysettings)
2272 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
2273 CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
2277 if listonly or ("distlocks" not in features):
2281 if "skiprocheck" in features:
2284 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
2286 writemsg(red("!!! For fetching to a read-only filesystem, " + \
2287 "locking should be turned off.\n"), noiselevel=-1)
2288 writemsg("!!! This can be done by adding -distlocks to " + \
2289 "FEATURES in /etc/make.conf\n", noiselevel=-1)
2292 # local mirrors are always added
2293 if custommirrors.has_key("local"):
2294 mymirrors += custommirrors["local"]
2296 if ("nomirror" in mysettings["RESTRICT"].split()) or \
2297 ("mirror" in mysettings["RESTRICT"].split()):
2298 # We don't add any mirrors.
2302 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
2304 mydigests = Manifest(
2305 mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST")
2308 for x in range(len(mymirrors)-1,-1,-1):
2309 if mymirrors[x] and mymirrors[x][0]=='/':
2310 fsmirrors += [mymirrors[x]]
2313 restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
2314 custom_local_mirrors = custommirrors.get("local", [])
2316 # With fetch restriction, a normal uri may only be fetched from
2317 # custom local mirrors (if available). A mirror:// uri may also
2318 # be fetched from specific mirrors (effectively overriding fetch
2319 # restriction, but only for specific mirrors).
2320 locations = custom_local_mirrors
2322 locations = mymirrors
2325 primaryuri_indexes={}
2326 for myuri in myuris:
2327 myfile=os.path.basename(myuri)
2328 if not filedict.has_key(myfile):
2330 for y in range(0,len(locations)):
2331 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
2332 if myuri[:9]=="mirror://":
2333 eidx = myuri.find("/", 9)
2335 mirrorname = myuri[9:eidx]
2337 # Try user-defined mirrors first
2338 if custommirrors.has_key(mirrorname):
2339 for cmirr in custommirrors[mirrorname]:
2340 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
2341 # remove the mirrors we tried from the list of official mirrors
2342 if cmirr.strip() in thirdpartymirrors[mirrorname]:
2343 thirdpartymirrors[mirrorname].remove(cmirr)
2344 # now try the official mirrors
2345 if thirdpartymirrors.has_key(mirrorname):
2346 shuffle(thirdpartymirrors[mirrorname])
2348 for locmirr in thirdpartymirrors[mirrorname]:
2349 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
2351 if not filedict[myfile]:
2352 writemsg("No known mirror by the name: %s\n" % (mirrorname))
2354 writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
2355 writemsg(" %s\n" % (myuri), noiselevel=-1)
2358 # Only fetch from specific mirrors is allowed.
2360 if "primaryuri" in mysettings["RESTRICT"].split():
2361 # Use the source site first.
2362 if primaryuri_indexes.has_key(myfile):
2363 primaryuri_indexes[myfile] += 1
2365 primaryuri_indexes[myfile] = 0
2366 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
2368 filedict[myfile].append(myuri)
2375 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2376 if not mysettings.get(var_name, None):
2384 if "distlocks" in features:
2385 distdir_dirs.append(".locks")
2388 for x in distdir_dirs:
2389 mydir = os.path.join(mysettings["DISTDIR"], x)
2390 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
2391 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
2394 raise # bail out on the first error that occurs during recursion
2395 if not apply_recursive_permissions(mydir,
2396 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
2397 filemode=filemode, filemask=modemask, onerror=onerror):
2398 raise portage_exception.OperationNotPermitted(
2399 "Failed to apply recursive permissions for the portage group.")
2400 except portage_exception.PortageException, e:
2401 if not os.path.isdir(mysettings["DISTDIR"]):
2402 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2403 writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
2404 writemsg("!!! Fetching will fail!\n", noiselevel=-1)
2407 not fetch_to_ro and \
2408 not os.access(mysettings["DISTDIR"], os.W_OK):
2409 writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
2413 if can_fetch and use_locks and locks_in_subdir:
2414 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
2415 if not os.access(distlocks_subdir, os.W_OK):
2416 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
2419 del distlocks_subdir
2420 for myfile in filedict.keys():
2424 1 partially downloaded
2425 2 completely downloaded
2427 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
2431 writemsg_stdout("\n", noiselevel=-1)
2433 if use_locks and can_fetch:
2435 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
2437 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
2440 if fsmirrors and not os.path.exists(myfile_path):
2441 for mydir in fsmirrors:
2442 mirror_file = os.path.join(mydir, myfile)
2444 shutil.copyfile(mirror_file, myfile_path)
2445 writemsg(_("Local mirror has file:" + \
2446 " %(file)s\n" % {"file":myfile}))
2448 except (IOError, OSError), e:
2449 if e.errno != errno.ENOENT:
2454 mystat = os.stat(myfile_path)
2456 if e.errno != errno.ENOENT:
2461 apply_secpass_permissions(
2462 myfile_path, gid=portage_gid, mode=0664, mask=02,
2464 except portage_exception.PortageException, e:
2465 if not os.access(myfile_path, os.R_OK):
2466 writemsg("!!! Failed to adjust permissions:" + \
2467 " %s\n" % str(e), noiselevel=-1)
2468 if myfile not in mydigests:
2469 # We don't have a digest, but the file exists. We must
2470 # assume that it is fully downloaded.
2473 if mystat.st_size < mydigests[myfile]["size"] and \
2475 fetched = 1 # Try to resume this download.
2477 verified_ok, reason = portage_checksum.verify_all(
2478 myfile_path, mydigests[myfile])
2480 writemsg("!!! Previously fetched" + \
2481 " file: '%s'\n" % myfile, noiselevel=-1)
2482 writemsg("!!! Reason: %s\n" % reason[0],
2484 writemsg(("!!! Got: %s\n" + \
2485 "!!! Expected: %s\n") % \
2486 (reason[1], reason[2]), noiselevel=-1)
2487 if can_fetch and not restrict_fetch:
2488 writemsg("Refetching...\n\n",
2490 os.unlink(myfile_path)
2492 eout = output.EOutput()
2494 mysettings.get("PORTAGE_QUIET", None) == "1"
2495 for digest_name in mydigests[myfile]:
2497 "%s %s ;-)" % (myfile, digest_name))
2499 continue # fetch any remaining files
2501 for loc in filedict[myfile]:
2503 writemsg_stdout(loc+" ", noiselevel=-1)
2505 # allow different fetchcommands per protocol
2506 protocol = loc[0:loc.find("://")]
2507 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
2508 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
2510 fetchcommand=mysettings["FETCHCOMMAND"]
2511 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
2512 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
2514 resumecommand=mysettings["RESUMECOMMAND"]
2516 fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2517 resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"])
2522 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
2525 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
2527 for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
2528 if not mysettings.get(var_name, None):
2529 writemsg(("!!! %s is unset. It should " + \
2530 "have been defined in /etc/make.globals.\n") \
2531 % var_name, noiselevel=-1)
2537 #we either need to resume or start the download
2538 #you can't use "continue" when you're inside a "try" block
2541 writemsg(">>> Resuming download...\n")
2542 locfetch=resumecommand
2545 locfetch=fetchcommand
2546 writemsg_stdout(">>> Downloading '%s'\n" % \
2547 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2548 myfetch=locfetch.replace("${URI}",loc)
2549 myfetch=myfetch.replace("${FILE}",myfile)
2552 if "userfetch" in mysettings.features and \
2553 os.getuid() == 0 and portage_gid and portage_uid:
2554 spawn_keywords.update({
2555 "uid" : portage_uid,
2556 "gid" : portage_gid,
2557 "groups" : userpriv_groups,
2562 if mysettings.selinux_enabled():
2563 con = selinux.getcontext()
2564 con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
2565 selinux.setexec(con)
2567 myret = portage_exec.spawn_bash(myfetch,
2568 env=mysettings.environ(), **spawn_keywords)
2570 if mysettings.selinux_enabled():
2571 selinux.setexec(None)
2575 apply_secpass_permissions(myfile_path,
2576 gid=portage_gid, mode=0664, mask=02)
2577 except portage_exception.FileNotFound, e:
2579 except portage_exception.PortageException, e:
2580 if not os.access(myfile_path, os.R_OK):
2581 writemsg("!!! Failed to adjust permissions:" + \
2582 " %s\n" % str(e), noiselevel=-1)
2584 if mydigests!=None and mydigests.has_key(myfile):
2586 mystat = os.stat(myfile_path)
2588 if e.errno != errno.ENOENT:
2593 # no exception? file exists. let digestcheck() report
2594 # an appropriately for size or checksum errors
2595 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
2596 # Fetch failed... Try the next one... Kill 404 files though.
2597 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2598 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2599 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
2601 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2602 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2605 except (IOError, OSError):
2613 # File is the correct size--check the checksums for the fetched
2614 # file NOW, for those users who don't have a stable/continuous
2615 # net connection. This way we have a chance to try to download
2616 # from another mirror...
2617 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2620 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
2622 writemsg("!!! Reason: "+reason[0]+"\n",
2624 writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
2625 (reason[1], reason[2]), noiselevel=-1)
2626 writemsg("Removing corrupt distfile...\n", noiselevel=-1)
2627 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2630 eout = output.EOutput()
2631 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2632 for x_key in mydigests[myfile].keys():
2633 eout.ebegin("%s %s ;-)" % (myfile, x_key))
2641 elif mydigests!=None:
2642 writemsg("No digest file available and download failed.\n\n",
2645 if use_locks and file_lock:
2646 portage_locks.unlockfile(file_lock)
2649 writemsg_stdout("\n", noiselevel=-1)
2652 print "\n!!!", mysettings["CATEGORY"] + "/" + \
2653 mysettings["PF"], "has fetch restriction turned on."
2654 print "!!! This probably means that this " + \
2655 "ebuild's files must be downloaded"
2656 print "!!! manually. See the comments in" + \
2657 " the ebuild for more information.\n"
2658 spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
2661 elif not filedict[myfile]:
2662 writemsg("Warning: No mirrors available for file" + \
2663 " '%s'\n" % (myfile), noiselevel=-1)
2665 writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
2670 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
2672 Generates a digest file if missing. Assumes all files are available.
2673 DEPRECATED: this now only is a compability wrapper for
2674 portage_manifest.Manifest()
2675 NOTE: manifestonly and overwrite are useless with manifest2 and
2676 are therefore ignored."""
2677 if myportdb is None:
2678 writemsg("Warning: myportdb not specified to digestgen\n")
2681 global _doebuild_manifest_exempt_depend
2683 _doebuild_manifest_exempt_depend += 1
2685 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
2686 for cpv in fetchlist_dict:
2688 for myfile in fetchlist_dict[cpv]:
2689 distfiles_map.setdefault(myfile, []).append(cpv)
2690 except portage_exception.InvalidDependString, e:
2691 writemsg("!!! %s\n" % str(e), noiselevel=-1)
2692 writemsg("!!! Invalid SRC_URI for '%s'.\n" % cpv, noiselevel=-1)
2695 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
2696 fetchlist_dict=fetchlist_dict)
2697 # Don't require all hashes since that can trigger excessive
2698 # fetches when sufficient digests already exist. To ease transition
2699 # while Manifest 1 is being removed, only require hashes that will
2700 # exist before and after the transition.
2701 required_hash_types = set()
2702 required_hash_types.add("size")
2703 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
2704 dist_hashes = mf.fhashdict.get("DIST", {})
2705 missing_hashes = set()
2706 for myfile in distfiles_map:
2707 myhashes = dist_hashes.get(myfile)
2709 missing_hashes.add(myfile)
2711 if required_hash_types.difference(myhashes):
2712 missing_hashes.add(myfile)
2715 for myfile in missing_hashes:
2717 os.stat(os.path.join(mysettings["DISTDIR"], myfile))
2719 if e.errno != errno.ENOENT:
2722 missing_files.append(myfile)
2724 mytree = os.path.realpath(os.path.dirname(
2725 os.path.dirname(mysettings["O"])))
2726 fetch_settings = config(clone=mysettings)
2727 debug = mysettings.get("PORTAGE_DEBUG") == "1"
2728 for myfile in missing_files:
2730 for cpv in distfiles_map[myfile]:
2731 myebuild = os.path.join(mysettings["O"],
2732 catsplit(cpv)[1] + ".ebuild")
2733 # for RESTRICT=fetch, mirror, etc...
2734 doebuild_environment(myebuild, "fetch",
2735 mysettings["ROOT"], fetch_settings,
2737 alluris, aalist = myportdb.getfetchlist(
2738 cpv, mytree=mytree, all=True,
2739 mysettings=fetch_settings)
2740 myuris = [uri for uri in alluris \
2741 if os.path.basename(uri) == myfile]
2742 if fetch(myuris, fetch_settings):
2746 writemsg(("!!! File %s doesn't exist, can't update " + \
2747 "Manifest\n") % myfile, noiselevel=-1)
2749 writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
2751 mf.create(requiredDistfiles=myarchives,
2752 assumeDistHashesSometimes=True,
2753 assumeDistHashesAlways=(
2754 "assume-digests" in mysettings.features))
2755 except portage_exception.FileNotFound, e:
2756 writemsg(("!!! File %s doesn't exist, can't update " + \
2757 "Manifest\n") % e, noiselevel=-1)
2759 mf.write(sign=False)
2760 if "assume-digests" not in mysettings.features:
2761 distlist = mf.fhashdict.get("DIST", {}).keys()
2764 for filename in distlist:
2765 if not os.path.exists(
2766 os.path.join(mysettings["DISTDIR"], filename)):
2767 auto_assumed.append(filename)
2769 mytree = os.path.realpath(
2770 os.path.dirname(os.path.dirname(mysettings["O"])))
2771 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
2772 pkgs = myportdb.cp_list(cp, mytree=mytree)
2774 writemsg_stdout(" digest.assumed" + output.colorize("WARN",
2775 str(len(auto_assumed)).rjust(18)) + "\n")
2776 for pkg_key in pkgs:
2777 fetchlist = myportdb.getfetchlist(pkg_key,
2778 mysettings=mysettings, all=True, mytree=mytree)[1]
2779 pv = pkg_key.split("/")[1]
2780 for filename in auto_assumed:
2781 if filename in fetchlist:
2783 " digest-%s::%s\n" % (pv, filename))
2786 _doebuild_manifest_exempt_depend -= 1
2788 def digestParseFile(myfilename, mysettings=None):
2789 """(filename) -- Parses a given file for entries matching:
2790 <checksumkey> <checksum_hex_string> <filename> <filesize>
2791 Ignores lines that don't start with a valid checksum identifier
2792 and returns a dict with the filenames as keys and {checksumkey:checksum}
2794 DEPRECATED: this function is now only a compability wrapper for
2795 portage_manifest.Manifest()."""
2797 mysplit = myfilename.split(os.sep)
2798 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
2799 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
2800 elif mysplit[-1] == "Manifest":
2801 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
2803 if mysettings is None:
2805 mysettings = config(clone=settings)
2807 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
2809 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2810 """Verifies checksums. Assumes all files have been downloaded.
2811 DEPRECATED: this is now only a compability wrapper for
2812 portage_manifest.Manifest()."""
2815 pkgdir = mysettings["O"]
2816 manifest_path = os.path.join(pkgdir, "Manifest")
2817 if not os.path.exists(manifest_path):
2818 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
2822 mf = Manifest(pkgdir, mysettings["DISTDIR"])
2823 eout = output.EOutput()
2824 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2826 eout.ebegin("checking ebuild checksums ;-)")
2827 mf.checkTypeHashes("EBUILD")
2829 eout.ebegin("checking auxfile checksums ;-)")
2830 mf.checkTypeHashes("AUX")
2832 eout.ebegin("checking miscfile checksums ;-)")
2833 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
2836 eout.ebegin("checking %s ;-)" % f)
2837 mf.checkFileHashes(mf.findFile(f), f)
2841 writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
2843 except portage_exception.FileNotFound, e:
2845 writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
2848 except portage_exception.DigestException, e:
2850 writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
2851 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
2852 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
2853 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
2854 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
2856 # Make sure that all of the ebuilds are actually listed in the Manifest.
2857 for f in os.listdir(pkgdir):
2858 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
2859 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2860 os.path.join(pkgdir, f), noiselevel=-1)
2862 """ epatch will just grab all the patches out of a directory, so we have to
2863 make sure there aren't any foreign files that it might grab."""
2864 filesdir = os.path.join(pkgdir, "files")
2865 for parent, dirs, files in os.walk(filesdir):
2867 if d.startswith(".") or d == "CVS":
2870 if f.startswith("."):
2872 f = os.path.join(parent, f)[len(filesdir) + 1:]
2873 file_type = mf.findFile(f)
2874 if file_type != "AUX" and not f.startswith("digest-"):
2875 writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
2876 os.path.join(filesdir, f), noiselevel=-1)
2880 # parse actionmap to spawn ebuild with the appropriate args
2881 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2882 if alwaysdep or "noauto" not in mysettings.features:
2883 # process dependency first
2884 if "dep" in actionmap[mydo].keys():
2885 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2888 kwargs = actionmap[mydo]["args"]
2889 mysettings["EBUILD_PHASE"] = mydo
2890 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
2891 mysettings["EBUILD_PHASE"] = ""
2893 if not kwargs["droppriv"] and secpass >= 2:
2894 """ Privileged phases may have left files that need to be made
2895 writable to a less privileged user."""
2896 apply_recursive_permissions(mysettings["T"],
2897 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
2898 filemode=060, filemask=0)
2900 if phase_retval == os.EX_OK:
2901 if mydo == "install":
2902 # User and group bits that match the "portage" user or group are
2903 # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
2904 # necessary. The chown system call may clear S_ISUID and S_ISGID
2905 # bits, so those bits are restored if necessary.
2906 inst_uid = int(mysettings["PORTAGE_INST_UID"])
2907 inst_gid = int(mysettings["PORTAGE_INST_GID"])
2908 for parent, dirs, files in os.walk(mysettings["D"]):
2909 for fname in chain(dirs, files):
2910 fpath = os.path.join(parent, fname)
2911 mystat = os.lstat(fpath)
2912 if mystat.st_uid != portage_uid and \
2913 mystat.st_gid != portage_gid:
2917 if mystat.st_uid == portage_uid:
2919 if mystat.st_gid == portage_gid:
2921 apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
2922 mode=mystat.st_mode, stat_cached=mystat,
2924 mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
2925 qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
2927 writemsg("!!! install_qa_check failed; exiting.\n",
2933 def eapi_is_supported(eapi):
2934 return str(eapi).strip() == str(portage_const.EAPI).strip()
2936 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
2938 ebuild_path = os.path.abspath(myebuild)
2939 pkg_dir = os.path.dirname(ebuild_path)
2941 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2942 cat = mysettings.configdict["pkg"]["CATEGORY"]
2944 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
2945 mypv = os.path.basename(ebuild_path)[:-7]
2946 mycpv = cat+"/"+mypv
2947 mysplit=pkgsplit(mypv,silent=0)
2949 raise portage_exception.IncorrectParameter(
2950 "Invalid ebuild path: '%s'" % myebuild)
2952 if mydo != "depend":
2953 """For performance reasons, setcpv only triggers reset when it
2954 detects a package-specific change in config. For the ebuild
2955 environment, a reset call is forced in order to ensure that the
2956 latest env.d variables are used."""
2957 mysettings.reset(use_cache=use_cache)
2958 mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
2960 mysettings["EBUILD_PHASE"] = mydo
2962 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
2964 # We are disabling user-specific bashrc files.
2965 mysettings["BASH_ENV"] = INVALID_ENV_FILE
2967 if debug: # Otherwise it overrides emerge's settings.
2968 # We have no other way to set debug... debug can't be passed in
2969 # due to how it's coded... Don't overwrite this so we can use it.
2970 mysettings["PORTAGE_DEBUG"] = "1"
2972 mysettings["ROOT"] = myroot
2973 mysettings["STARTDIR"] = getcwd()
2975 mysettings["EBUILD"] = ebuild_path
2976 mysettings["O"] = pkg_dir
2977 mysettings.configdict["pkg"]["CATEGORY"] = cat
2978 mysettings["FILESDIR"] = pkg_dir+"/files"
2979 mysettings["PF"] = mypv
2981 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
2982 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2984 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
2985 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
2986 mysettings["PN"] = mysplit[0]
2987 mysettings["PV"] = mysplit[1]
2988 mysettings["PR"] = mysplit[2]
2990 if portage_util.noiselimit < 0:
2991 mysettings["PORTAGE_QUIET"] = "1"
2993 if mydo != "depend":
2994 eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
2995 mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
2996 if not eapi_is_supported(eapi):
2997 # can't do anything with this.
2998 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2999 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
3000 portage_dep.use_reduce(portage_dep.paren_reduce(
3001 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
3003 if mysplit[2] == "r0":
3004 mysettings["PVR"]=mysplit[1]
3006 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
3008 if mysettings.has_key("PATH"):
3009 mysplit=mysettings["PATH"].split(":")
3012 if PORTAGE_BIN_PATH not in mysplit:
3013 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
3015 # Sandbox needs cannonical paths.
3016 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
3017 mysettings["PORTAGE_TMPDIR"])
3018 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
3019 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
3021 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
3022 # locations in order to prevent interference.
3023 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
3024 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3025 mysettings["PKG_TMPDIR"],
3026 mysettings["CATEGORY"], mysettings["PF"])
3028 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
3029 mysettings["BUILD_PREFIX"],
3030 mysettings["CATEGORY"], mysettings["PF"])
3032 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
3033 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
3034 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
3035 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
3037 mysettings["PORTAGE_BASHRC"] = os.path.join(
3038 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
3040 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
3041 if (mydo!="depend") or not mysettings.has_key("KV"):
3042 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
3044 # Regular source tree
3045 mysettings["KV"]=mykv
3049 if (mydo!="depend") or not mysettings.has_key("KVERS"):
3051 mysettings["KVERS"]=myso[1]
3053 # Allow color.map to control colors associated with einfo, ewarn, etc...
3055 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
3056 mycolors.append("%s=$'%s'" % (c, output.codes[c]))
3057 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
3059 def prepare_build_dirs(myroot, mysettings, cleanup):
3061 clean_dirs = [mysettings["HOME"]]
3063 # We enable cleanup when we want to make sure old cruft (such as the old
3064 # environment) doesn't interfere with the current phase.
3066 clean_dirs.append(mysettings["T"])
3068 for clean_dir in clean_dirs:
3070 shutil.rmtree(clean_dir)
3072 if errno.ENOENT == oe.errno:
3074 elif errno.EPERM == oe.errno:
3075 writemsg("%s\n" % oe, noiselevel=-1)
3076 writemsg("Operation Not Permitted: rmtree('%s')\n" % \
3077 clean_dir, noiselevel=-1)
3082 def makedirs(dir_path):
3084 os.makedirs(dir_path)
3086 if errno.EEXIST == oe.errno:
3088 elif errno.EPERM == oe.errno:
3089 writemsg("%s\n" % oe, noiselevel=-1)
3090 writemsg("Operation Not Permitted: makedirs('%s')\n" % \
3091 dir_path, noiselevel=-1)
3097 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
3099 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
3100 mydirs.append(os.path.dirname(mydirs[-1]))
3103 for mydir in mydirs:
3104 portage_util.ensure_dirs(mydir)
3105 portage_util.apply_secpass_permissions(mydir,
3106 gid=portage_gid, uid=portage_uid, mode=070, mask=0)
3107 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
3108 """These directories don't necessarily need to be group writable.
3109 However, the setup phase is commonly run as a privileged user prior
3110 to the other phases being run by an unprivileged user. Currently,
3111 we use the portage group to ensure that the unprivleged user still
3112 has write access to these directories in any case."""
3113 portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
3114 portage_util.apply_secpass_permissions(mysettings[dir_key],
3115 uid=portage_uid, gid=portage_gid)
3116 except portage_exception.PermissionDenied, e:
3117 writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
3119 except portage_exception.OperationNotPermitted, e:
3120 writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
3122 except portage_exception.FileNotFound, e:
3123 writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
3128 "basedir_var":"CCACHE_DIR",
3129 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
3130 "always_recurse":False},
3132 "basedir_var":"CONFCACHE_DIR",
3133 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
3134 "always_recurse":True},
3136 "basedir_var":"DISTCC_DIR",
3137 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
3138 "subdirs":("lock", "state"),
3139 "always_recurse":True}
3144 for myfeature, kwargs in features_dirs.iteritems():
3145 if myfeature in mysettings.features:
3146 basedir = mysettings[kwargs["basedir_var"]]
3148 basedir = kwargs["default_dir"]
3149 mysettings[kwargs["basedir_var"]] = basedir
3151 mydirs = [mysettings[kwargs["basedir_var"]]]
3152 if "subdirs" in kwargs:
3153 for subdir in kwargs["subdirs"]:
3154 mydirs.append(os.path.join(basedir, subdir))
3155 for mydir in mydirs:
3156 modified = portage_util.ensure_dirs(mydir,
3157 gid=portage_gid, mode=dirmode, mask=modemask)
3158 # To avoid excessive recursive stat calls, we trigger
3159 # recursion when the top level directory does not initially
3160 # match our permission requirements.
3161 if modified or kwargs["always_recurse"]:
3163 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
3166 raise # The feature is disabled if a single error
3167 # occurs during permissions adjustment.
3168 if not apply_recursive_permissions(mydir,
3169 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
3170 filemode=filemode, filemask=modemask, onerror=onerror):
3171 raise portage_exception.OperationNotPermitted(
3172 "Failed to apply recursive permissions for the portage group.")
3173 except portage_exception.PortageException, e:
3174 mysettings.features.remove(myfeature)
3175 mysettings["FEATURES"] = " ".join(mysettings.features)
3176 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3177 writemsg("!!! Failed resetting perms on %s='%s'\n" % \
3178 (kwargs["basedir_var"], basedir), noiselevel=-1)
3179 writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
3185 mode = mysettings["PORTAGE_WORKDIR_MODE"]
3187 parsed_mode = int(mode, 8)
3192 if parsed_mode & 07777 != parsed_mode:
3193 raise ValueError("Invalid file mode: %s" % mode)
3195 workdir_mode = parsed_mode
3197 writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
3198 except ValueError, e:
3200 writemsg("%s\n" % e)
3201 writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
3202 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
3203 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
3205 apply_secpass_permissions(mysettings["WORKDIR"],
3206 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
3207 except portage_exception.FileNotFound:
3208 pass # ebuild.sh will create it
3210 if mysettings.get("PORT_LOGDIR", "") == "":
3211 while "PORT_LOGDIR" in mysettings:
3212 del mysettings["PORT_LOGDIR"]
3213 if "PORT_LOGDIR" in mysettings:
3215 portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
3216 uid=portage_uid, gid=portage_gid, mode=02770)
3217 except portage_exception.PortageException, e:
3218 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3219 writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
3220 mysettings["PORT_LOGDIR"], noiselevel=-1)
3221 writemsg("!!! Disabling logging.\n", noiselevel=-1)
3222 while "PORT_LOGDIR" in mysettings:
3223 del mysettings["PORT_LOGDIR"]
3224 if "PORT_LOGDIR" in mysettings:
3225 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
3226 if not os.path.exists(logid_path):
3227 f = open(logid_path, "w")
3230 logid_time = time.strftime("%Y%m%d-%H%M%S",
3231 time.gmtime(os.stat(logid_path).st_mtime))
3232 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3233 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
3234 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
3235 del logid_path, logid_time
3237 # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
3238 # enabled since it is possible that local SELinux security policies
3239 # do not allow ouput to be piped out of the sesandbox domain.
3240 if not (mysettings.selinux_enabled() and \
3241 "sesandbox" in mysettings.features):
3242 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
3243 mysettings["T"], "build.log")
3245 _doebuild_manifest_exempt_depend = 0
3246 _doebuild_manifest_checked = None
3248 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
3249 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
3250 mydbapi=None, vartree=None, prev_mtimes=None):
3253 Wrapper function that invokes specific ebuild phases through the spawning
3256 @param myebuild: name of the ebuild to invoke the phase on (CPV)
3257 @type myebuild: String
3258 @param mydo: Phase to run
3260 @param myroot: $ROOT (usually '/', see man make.conf)
3261 @type myroot: String
3262 @param mysettings: Portage Configuration
3263 @type mysettings: instance of portage.config
3264 @param debug: Turns on various debug information (eg, debug for spawn)
3265 @type debug: Boolean
3266 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
3267 @type listonly: Boolean
3268 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
3269 @type fetchonly: Boolean
3270 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
3271 @type cleanup: Boolean
3272 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
3273 @type dbkey: Dict or String
3274 @param use_cache: Enables the cache
3275 @type use_cache: Boolean
3276 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
3277 @type fetchall: Boolean
3278 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
3280 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
3281 @type mydbapi: portdbapi instance
3282 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
3283 @type vartree: vartree instance
3284 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
3285 @type prev_mtimes: dictionary
3291 Most errors have an accompanying error message.
3293 listonly and fetchonly are only really necessary for operations involving 'fetch'
3294 prev_mtimes are only necessary for merge operations.
3295 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
3300 writemsg("Warning: tree not specified to doebuild\n")
3304 # chunked out deps for each phase, so that ebuild binary can use it
3305 # to collapse targets down.
3309 "unpack": ["setup"],
3310 "compile":["unpack"],
3311 "test": ["compile"],
3314 "package":["install"],
3318 mydbapi = db[myroot][tree].dbapi
3320 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
3321 vartree = db[myroot]["vartree"]
3323 features = mysettings.features
3325 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
3326 "config","setup","depend","fetch","digest",
3327 "unpack","compile","test","install","rpm","qmerge","merge",
3328 "package","unmerge", "manifest"]
3330 if mydo not in validcommands:
3331 validcommands.sort()
3332 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
3334 for vcount in range(len(validcommands)):
3336 writemsg("\n!!! ", noiselevel=-1)
3337 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
3338 writemsg("\n", noiselevel=-1)
3341 if not os.path.exists(myebuild):
3342 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
3346 global _doebuild_manifest_exempt_depend
3348 if "strict" in features and \
3349 "digest" not in features and \
3350 tree == "porttree" and \
3351 mydo not in ("digest", "manifest", "help") and \
3352 not _doebuild_manifest_exempt_depend:
3353 # Always verify the ebuild checksums before executing it.
3354 pkgdir = os.path.dirname(myebuild)
3355 manifest_path = os.path.join(pkgdir, "Manifest")
3356 global _doebuild_manifest_checked
3357 # Avoid checking the same Manifest several times in a row during a
3358 # regen with an empty cache.
3359 if _doebuild_manifest_checked != manifest_path:
3360 if not os.path.exists(manifest_path):
3361 writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
3364 mf = Manifest(pkgdir, mysettings["DISTDIR"])
3366 mf.checkTypeHashes("EBUILD")
3367 except portage_exception.FileNotFound, e:
3368 writemsg("!!! A file listed in the Manifest " + \
3369 "could not be found: %s\n" % str(e), noiselevel=-1)
3371 except portage_exception.DigestException, e:
3372 writemsg("!!! Digest verification failed:\n", noiselevel=-1)
3373 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
3374 writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
3375 writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
3376 writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
3378 # Make sure that all of the ebuilds are actually listed in the
3380 for f in os.listdir(pkgdir):
3381 if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
3382 writemsg("!!! A file is not listed in the " + \
3383 "Manifest: '%s'\n" % os.path.join(pkgdir, f),
3386 _doebuild_manifest_checked = manifest_path
3389 builddir_lock = None
3391 if mydo in ("digest", "manifest", "help"):
3392 # Temporarily exempt the depend phase from manifest checks, in case
3393 # aux_get calls trigger cache generation.
3394 _doebuild_manifest_exempt_depend += 1
3396 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
3399 # get possible slot information from the deps file
3400 if mydo == "depend":
3401 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
3402 if isinstance(dbkey, dict):
3403 mysettings["dbkey"] = ""
3405 fd_pipes = {0:0, 1:1, 2:2, 9:pw}
3406 mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
3407 fd_pipes=fd_pipes, returnpid=True)
3408 os.close(pw) # belongs exclusively to the child process now
3412 mybytes.append(os.read(pr, maxbytes))
3416 mybytes = "".join(mybytes)
3418 for k, v in izip(auxdbkeys, mybytes.splitlines()):
3420 retval = os.waitpid(mypids[0], 0)[1]
3421 portage_exec.spawned_pids.remove(mypids[0])
3422 # If it got a signal, return the signal that was sent, but
3423 # shift in order to distinguish it from a return value. (just
3424 # like portage_exec.spawn() would do).
3426 return (retval & 0xff) << 8
3427 # Otherwise, return its exit code.
3430 mysettings["dbkey"] = dbkey
3432 mysettings["dbkey"] = \
3433 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
3435 return spawn(EBUILD_SH_BINARY + " depend", mysettings)
3437 # Validate dependency metadata here to ensure that ebuilds with invalid
3438 # data are never installed (even via the ebuild command).
3439 invalid_dep_exempt_phases = \
3440 set(["clean", "cleanrm", "help", "prerm", "postrm"])
3441 mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
3442 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3443 metadata = dict(izip(dep_keys, mydbapi.aux_get(mycpv, dep_keys)))
3444 class FakeTree(object):
3445 def __init__(self, mydb):
3447 dep_check_trees = {myroot:{}}
3448 dep_check_trees[myroot]["porttree"] = \
3449 FakeTree(fakedbapi(settings=mysettings))
3450 for dep_type in dep_keys:
3451 mycheck = dep_check(metadata[dep_type], None, mysettings,
3452 myuse="all", myroot=myroot, trees=dep_check_trees)
3454 writemsg("%s: %s\n%s\n" % (
3455 dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
3456 if mydo not in invalid_dep_exempt_phases:
3458 del dep_type, mycheck
3459 del mycpv, dep_keys, metadata, FakeTree, dep_check_trees
3461 if "PORTAGE_TMPDIR" not in mysettings or \
3462 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
3463 writemsg("The directory specified in your " + \
3464 "PORTAGE_TMPDIR variable, '%s',\n" % \
3465 mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
3466 writemsg("does not exist. Please create this directory or " + \
3467 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
3470 # Build directory creation isn't required for any of these.
3471 if mydo not in ("digest", "fetch", "help", "manifest"):
3472 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
3475 # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
3476 logfile = mysettings.get("PORTAGE_LOG_FILE", None)
3477 if mydo == "unmerge":
3478 return unmerge(mysettings["CATEGORY"],
3479 mysettings["PF"], myroot, mysettings, vartree=vartree)
3481 # if any of these are being called, handle them -- running them out of
3482 # the sandbox -- and stop now.
3483 if mydo in ["clean","cleanrm"]:
3484 return spawn(EBUILD_SH_BINARY + " clean", mysettings,
3485 debug=debug, free=1, logfile=None)
3486 elif mydo == "help":
3487 return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3488 debug=debug, free=1, logfile=logfile)
3489 elif mydo == "setup":
3490 infodir = os.path.join(
3491 mysettings["PORTAGE_BUILDDIR"], "build-info")
3492 if os.path.isdir(infodir):
3493 """Load USE flags for setup phase of a binary package.
3494 Ideally, the environment.bz2 would be used instead."""
3495 mysettings.load_infodir(infodir)
3496 retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
3497 debug=debug, free=1, logfile=logfile)
3499 """ Privileged phases may have left files that need to be made
3500 writable to a less privileged user."""
3501 apply_recursive_permissions(mysettings["T"],
3502 uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
3503 filemode=060, filemask=0)
3505 elif mydo == "preinst":
3506 mysettings["IMAGE"] = mysettings["D"]
3507 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3508 mysettings, debug=debug, free=1, logfile=logfile)
3509 if phase_retval == os.EX_OK:
3510 # Post phase logic and tasks that have been factored out of
3512 myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
3513 "preinst_sfperms", "preinst_selinux_labels",
3514 "preinst_suid_scan"]
3515 mysettings["EBUILD_PHASE"] = ""
3516 phase_retval = spawn(" ".join(myargs),
3517 mysettings, debug=debug, free=1, logfile=logfile)
3518 if phase_retval != os.EX_OK:
3519 writemsg("!!! post preinst failed; exiting.\n",
3521 del mysettings["IMAGE"]
3523 elif mydo == "postinst":
3524 mysettings.load_infodir(mysettings["O"])
3525 phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
3526 mysettings, debug=debug, free=1, logfile=logfile)
3527 if phase_retval == os.EX_OK:
3528 # Post phase logic and tasks that have been factored out of
3530 myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
3531 mysettings["EBUILD_PHASE"] = ""
3532 phase_retval = spawn(" ".join(myargs),
3533 mysettings, debug=debug, free=1, logfile=logfile)
3534 if phase_retval != os.EX_OK:
3535 writemsg("!!! post postinst failed; exiting.\n",
3538 elif mydo in ["prerm","postrm","config"]:
3539 mysettings.load_infodir(mysettings["O"])
3540 return spawn(EBUILD_SH_BINARY + " " + mydo,
3541 mysettings, debug=debug, free=1, logfile=logfile)
3543 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
3545 # Make sure we get the correct tree in case there are overlays.
3546 mytree = os.path.realpath(
3547 os.path.dirname(os.path.dirname(mysettings["O"])))
3549 newuris, alist = mydbapi.getfetchlist(
3550 mycpv, mytree=mytree, mysettings=mysettings)
3551 alluris, aalist = mydbapi.getfetchlist(
3552 mycpv, mytree=mytree, all=True, mysettings=mysettings)
3553 except portage_exception.InvalidDependString, e:
3554 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3555 writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
3558 mysettings["A"] = " ".join(alist)
3559 mysettings["AA"] = " ".join(aalist)
3560 if ("mirror" in features) or fetchall:
3561 fetchme = alluris[:]
3563 elif mydo == "digest":
3564 fetchme = alluris[:]
3566 # Skip files that we already have digests for.
3567 mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
3568 mydigests = mf.getTypeDigests("DIST")
3569 required_hash_types = set()
3570 required_hash_types.add("size")
3571 required_hash_types.add(portage_const.MANIFEST2_REQUIRED_HASH)
3572 for filename, hashes in mydigests.iteritems():
3573 if not required_hash_types.difference(hashes):
3574 checkme = [i for i in checkme if i != filename]
3575 fetchme = [i for i in fetchme \
3576 if os.path.basename(i) != filename]
3577 del filename, hashes
3579 fetchme = newuris[:]
3582 # Only try and fetch the files if we are going to need them ...
3583 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
3584 # unpack compile install`, we will try and fetch 4 times :/
3585 need_distfiles = (mydo in ("fetch", "unpack") or \
3586 mydo not in ("digest", "manifest") and "noauto" not in features)
3587 if need_distfiles and not fetch(
3588 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
3591 if mydo == "fetch" and listonly:
3595 if mydo == "manifest":
3596 return not digestgen(aalist, mysettings, overwrite=1,
3597 manifestonly=1, myportdb=mydbapi)
3598 elif mydo == "digest":
3599 return not digestgen(aalist, mysettings, overwrite=1,
3601 elif "digest" in mysettings.features:
3602 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
3603 except portage_exception.PermissionDenied, e:
3604 writemsg("!!! %s\n" % str(e), noiselevel=-1)
3605 if mydo in ("digest", "manifest"):
3608 # See above comment about fetching only when needed
3609 if not digestcheck(checkme, mysettings, ("strict" in features),
3610 (mydo not in ["digest","fetch","unpack"] and \
3611 mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
3612 "noauto" in features)):
3618 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
3619 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
3620 orig_distdir = mysettings["DISTDIR"]
3621 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
3622 edpath = mysettings["DISTDIR"] = \
3623 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
3624 if os.path.exists(edpath):
3626 if os.path.isdir(edpath) and not os.path.islink(edpath):
3627 shutil.rmtree(edpath)
3631 print "!!! Failed reseting ebuild distdir path, " + edpath
3634 apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
3637 os.symlink(os.path.join(orig_distdir, file),
3638 os.path.join(edpath, file))
3640 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
3643 #initial dep checks complete; time to process main commands
3645 nosandbox = (("userpriv" in features) and \
3646 ("usersandbox" not in features) and \
3647 ("userpriv" not in mysettings["RESTRICT"]) and \
3648 ("nouserpriv" not in mysettings["RESTRICT"]))
3649 if nosandbox and ("userpriv" not in features or \
3650 "userpriv" in mysettings["RESTRICT"] or \
3651 "nouserpriv" in mysettings["RESTRICT"]):
3652 nosandbox = ("sandbox" not in features and \
3653 "usersandbox" not in features)
3655 sesandbox = mysettings.selinux_enabled() and \
3656 "sesandbox" in mysettings.features
3657 ebuild_sh = EBUILD_SH_BINARY + " %s"
3658 misc_sh = MISC_SH_BINARY + " dyn_%s"
3660 # args are for the to spawn function
3662 "depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":0}},
3663 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0}},
3664 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}},
3665 "compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3666 "test": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
3667 "install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}},
3668 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3669 "package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
3672 # merge the deps in so we have again a 'full' actionmap
3673 # be glad when this can die.
3674 for x in actionmap.keys():
3675 if len(actionmap_deps.get(x, [])):
3676 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
3678 if mydo in actionmap.keys():
3680 portage_util.ensure_dirs(
3681 os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
3682 portage_util.ensure_dirs(
3683 os.path.join(mysettings["PKGDIR"], "All"))
3684 retval = spawnebuild(mydo,
3685 actionmap, mysettings, debug, logfile=logfile)
3686 elif mydo=="qmerge":
3687 # check to ensure install was run. this *only* pops up when users
3688 # forget it and are using ebuild
3689 if not os.path.exists(
3690 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
3691 writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
3694 # qmerge is a special phase that implies noclean.
3695 if "noclean" not in mysettings.features:
3696 mysettings.features.append("noclean")
3697 #qmerge is specifically not supposed to do a runtime dep check
3699 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
3700 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
3701 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
3702 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
3704 retval = spawnebuild("install", actionmap, mysettings, debug,
3705 alwaysdep=1, logfile=logfile)
3706 if retval == os.EX_OK:
3707 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
3708 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
3709 "build-info"), myroot, mysettings,
3710 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
3711 vartree=vartree, prev_mtimes=prev_mtimes)
3713 print "!!! Unknown mydo:",mydo
3716 if retval != os.EX_OK and tree == "porttree":
3717 for i in xrange(len(mydbapi.porttrees)-1):
3718 t = mydbapi.porttrees[i+1]
3719 if myebuild.startswith(t):
3720 # Display the non-cannonical path, in case it's different, to
3721 # prevent confusion.
3722 overlays = mysettings["PORTDIR_OVERLAY"].split()
3724 writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
3725 overlays[i], noiselevel=-1)
3733 portage_locks.unlockdir(builddir_lock)
3735 # Make sure that DISTDIR is restored to it's normal value before we return!
3736 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
3737 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
3738 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
3742 if os.stat(logfile).st_size == 0:
3747 if mydo in ("digest", "manifest", "help"):
3748 # If necessary, depend phase has been triggered by aux_get calls
3749 # and the exemption is no longer needed.
3750 _doebuild_manifest_exempt_depend -= 1
3754 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
3755 """moves a file from src to dest, preserving all permissions and attributes; mtime will
3756 be preserved even when moving across filesystems. Returns true on success and false on
3757 failure. Move is atomic."""
3758 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
3760 if mysettings is None:
3762 mysettings = settings
3763 selinux_enabled = mysettings.selinux_enabled()
3768 except SystemExit, e:
3770 except Exception, e:
3771 print "!!! Stating source file failed... movefile()"
3777 dstat=os.lstat(dest)
3778 except (OSError, IOError):
3779 dstat=os.lstat(os.path.dirname(dest))
3783 # Check that we can actually unset schg etc flags...
3784 # Clear the flags on source and destination; we'll reinstate them after merging
3785 if destexists and dstat.st_flags != 0:
3786 if bsd_chflags.lchflags(dest, 0) < 0:
3787 writemsg("!!! Couldn't clear flags on file being merged: \n ",
3789 # We might have an immutable flag on the parent dir; save and clear.
3790 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
3792 bsd_chflags.lchflags(os.path.dirname(dest), 0)
3794 if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
3795 bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
3796 # This is bad: we can't merge the file with these flags set.
3797 writemsg("!!! Can't merge file "+dest+" because of flags set\n",
3802 if stat.S_ISLNK(dstat[stat.ST_MODE]):
3806 except SystemExit, e:
3808 except Exception, e:
3811 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3813 target=os.readlink(src)
3814 if mysettings and mysettings["D"]:
3815 if target.find(mysettings["D"])==0:
3816 target=target[len(mysettings["D"]):]
3817 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
3820 sid = selinux.get_lsid(src)
3821 selinux.secure_symlink(target,dest,sid)
3823 os.symlink(target,dest)
3824 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3825 return os.lstat(dest)[stat.ST_MTIME]
3826 except SystemExit, e:
3828 except Exception, e:
3829 print "!!! failed to properly create symlink:"
3830 print "!!!",dest,"->",target
3835 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3838 ret=selinux.secure_rename(src,dest)
3840 ret=os.rename(src,dest)
3842 except SystemExit, e:
3844 except Exception, e:
3845 if e[0]!=errno.EXDEV:
3846 # Some random error.
3847 print "!!! Failed to move",src,"to",dest
3850 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3853 if stat.S_ISREG(sstat[stat.ST_MODE]):
3854 try: # For safety copy then move it over.
3856 selinux.secure_copy(src,dest+"#new")
3857 selinux.secure_rename(dest+"#new",dest)
3859 shutil.copyfile(src,dest+"#new")
3860 os.rename(dest+"#new",dest)
3862 except SystemExit, e:
3864 except Exception, e:
3865 print '!!! copy',src,'->',dest,'failed.'
3869 #we don't yet handle special, so we need to fall back to /bin/mv
3871 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3873 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3875 print "!!! Failed to move special file:"
3876 print "!!! '"+src+"' to '"+dest+"'"
3878 return None # failure
3881 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3882 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3884 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3885 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3887 except SystemExit, e:
3889 except Exception, e:
3890 print "!!! Failed to chown/chmod/unlink in movefile()"
3896 os.utime(dest,(newmtime,newmtime))
3898 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3899 newmtime=sstat[stat.ST_MTIME]
3902 # Restore the flags we saved before moving
3903 if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3904 writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
3905 (str(pflags), os.path.dirname(dest)), noiselevel=-1)
3910 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
3911 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
3912 if not os.access(myroot, os.W_OK):
3913 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
3916 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
3918 return mylink.merge(pkgloc, infloc, myroot, myebuild,
3919 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3921 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
3923 cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
3927 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
3928 ldpath_mtimes=ldpath_mtimes)
3929 if retval == os.EX_OK:
3936 def getCPFromCPV(mycpv):
3937 """Calls pkgsplit on a cpv and returns only the cp."""
3938 return pkgsplit(mycpv)[0]
3940 def dep_virtual(mysplit, mysettings):
3941 "Does virtual dependency conversion"
3943 myvirtuals = mysettings.getvirtuals()
3945 if type(x)==types.ListType:
3946 newsplit.append(dep_virtual(x, mysettings))
3949 mychoices = myvirtuals.get(mykey, None)
3951 if len(mychoices) == 1:
3952 a = x.replace(mykey, mychoices[0])
3955 # blocker needs "and" not "or(||)".
3960 a.append(x.replace(mykey, y))
3966 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
3967 trees=None, **kwargs):
3968 """Recursively expand new-style virtuals so as to collapse one or more
3969 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
3970 zero cost regardless of whether or not they are currently installed. Virtual
3971 blockers are supported but only when the virtual expands to a single
3972 atom because it wouldn't necessarily make sense to block all the components
3973 of a compound virtual. When more than one new-style virtual is matched,
3974 the matches are sorted from highest to lowest versions and the atom is
3975 expanded to || ( highest match ... lowest match )."""
3977 # According to GLEP 37, RDEPEND is the only dependency type that is valid
3978 # for new-style virtuals. Repoman should enforce this.
3979 dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
3980 def compare_pkgs(a, b):
3981 return pkgcmp(b[1], a[1])
3982 portdb = trees[myroot]["porttree"].dbapi
3983 if kwargs["use_binaries"]:
3984 portdb = trees[myroot]["bintree"].dbapi
3985 myvirtuals = mysettings.getvirtuals()
3990 elif isinstance(x, list):
3991 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
3992 mysettings, myroot=myroot, trees=trees, **kwargs))
3994 if portage_dep._dep_check_strict and \
3995 not isvalidatom(x, allow_blockers=True):
3996 raise portage_exception.ParseError(
3997 "invalid atom: '%s'" % x)
3998 mykey = dep_getkey(x)
3999 if not mykey.startswith("virtual/"):
4002 mychoices = myvirtuals.get(mykey, [])
4003 isblocker = x.startswith("!")
4008 for cpv in portdb.match(match_atom):
4009 # only use new-style matches
4010 if cpv.startswith("virtual/"):
4011 pkgs[cpv] = (cpv, pkgsplit(cpv), portdb)
4012 if kwargs["use_binaries"] and "vartree" in trees[myroot]:
4013 vardb = trees[myroot]["vartree"].dbapi
4014 for cpv in vardb.match(match_atom):
4015 # only use new-style matches
4016 if cpv.startswith("virtual/"):
4019 pkgs[cpv] = (cpv, pkgsplit(cpv), vardb)
4020 if not (pkgs or mychoices):
4021 # This one couldn't be expanded as a new-style virtual. Old-style
4022 # virtuals have already been expanded by dep_virtual, so this one
4023 # is unavailable and dep_zapdeps will identify it as such. The
4024 # atom is not eliminated here since it may still represent a
4025 # dependency that needs to be satisfied.
4028 if not pkgs and len(mychoices) == 1:
4029 newsplit.append(x.replace(mykey, mychoices[0]))
4031 pkgs = pkgs.values()
4032 pkgs.sort(compare_pkgs) # Prefer higher versions.
4038 depstring = " ".join(y[2].aux_get(y[0], dep_keys))
4040 print "Virtual Parent: ", y[0]
4041 print "Virtual Depstring:", depstring
4042 mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
4043 trees=trees, **kwargs)
4045 raise portage_exception.ParseError(
4046 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
4048 virtual_atoms = [atom for atom in mycheck[1] \
4049 if not atom.startswith("!")]
4050 if len(virtual_atoms) == 1:
4051 # It wouldn't make sense to block all the components of a
4052 # compound virtual, so only a single atom block is allowed.
4053 a.append("!" + virtual_atoms[0])
4055 mycheck[1].append("="+y[0]) # pull in the new-style virtual
4056 a.append(mycheck[1])
4057 # Plain old-style virtuals. New-style virtuals are preferred.
4059 a.append(x.replace(mykey, y))
4060 if isblocker and not a:
4061 # Probably a compound virtual. Pass the atom through unprocessed.
4067 def dep_eval(deplist):
4070 if deplist[0]=="||":
4071 #or list; we just need one "1"
4072 for x in deplist[1:]:
4073 if type(x)==types.ListType:
4078 #XXX: unless there's no available atoms in the list
4079 #in which case we need to assume that everything is
4080 #okay as some ebuilds are relying on an old bug.
4081 if len(deplist) == 1:
4086 if type(x)==types.ListType:
4093 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
4094 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
4095 Returned deplist contains steps that must be taken to satisfy dependencies."""
4099 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
4100 if not reduced or unreduced == ["||"] or dep_eval(reduced):
4103 if unreduced[0] != "||":
4105 for dep, satisfied in izip(unreduced, reduced):
4106 if isinstance(dep, list):
4107 unresolved += dep_zapdeps(dep, satisfied, myroot,
4108 use_binaries=use_binaries, trees=trees)
4110 unresolved.append(dep)
4113 # We're at a ( || atom ... ) type level and need to make a choice
4114 deps = unreduced[1:]
4115 satisfieds = reduced[1:]
4117 # Our preference order is for an the first item that:
4118 # a) contains all unmasked packages with the same key as installed packages
4119 # b) contains all unmasked packages
4120 # c) contains masked installed packages
4121 # d) is the first item
4124 possible_upgrades = []
4127 # Alias the trees we'll be checking availability against
4129 if "vartree" in trees[myroot]:
4130 vardb = trees[myroot]["vartree"].dbapi
4132 mydbapi = trees[myroot]["bintree"].dbapi
4134 mydbapi = trees[myroot]["porttree"].dbapi
4136 # Sort the deps into preferred (installed) and other
4137 # with values of [[required_atom], availablility]
4138 for dep, satisfied in izip(deps, satisfieds):
4139 if isinstance(dep, list):
4140 atoms = dep_zapdeps(dep, satisfied, myroot,
4141 use_binaries=use_binaries, trees=trees)
4145 all_available = True
4147 if not mydbapi.match(atom):
4148 # With --usepkgonly, count installed packages as "available".
4149 # Note that --usepkgonly currently has no package.mask support.
4151 if use_binaries and vardb and vardb.match(atom):
4153 all_available = False
4158 preferred.append((atoms, None, all_available))
4161 """ The package names rather than the exact atoms are used for an
4162 initial rough match against installed packages. More specific
4163 preference selection is handled later via slot and version comparison."""
4164 all_installed = True
4165 for atom in set([dep_getkey(atom) for atom in atoms]):
4166 # New-style virtuals have zero cost to install.
4167 if not vardb.match(atom) and not atom.startswith("virtual/"):
4168 all_installed = False
4171 # Check if the set of atoms will result in a downgrade of
4172 # an installed package. If they will then don't prefer them
4174 has_downgrade = False
4176 if all_installed or all_available:
4178 mykey = dep_getkey(atom)
4179 avail_pkg = best(mydbapi.match(atom))
4182 avail_slot = "%s:%s" % (mykey,
4183 mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
4184 versions[avail_slot] = avail_pkg
4185 inst_pkg = vardb.match(avail_slot)
4188 # emerge guarantees 1 package per slot here (highest counter)
4189 inst_pkg = inst_pkg[0]
4190 if avail_pkg != inst_pkg and \
4191 avail_pkg != best([avail_pkg, inst_pkg]):
4192 has_downgrade = True
4195 this_choice = (atoms, versions, all_available)
4196 if not has_downgrade:
4198 preferred.append(this_choice)
4201 possible_upgrades.append(this_choice)
4203 other.append(this_choice)
4205 # Compare the "all_installed" choices against the "all_available" choices
4206 # for possible missed upgrades. The main purpose of this code is to find
4207 # upgrades of new-style virtuals since _expand_new_virtuals() expands them
4208 # into || ( highest version ... lowest version ). We want to prefer the
4209 # highest all_available version of the new-style virtual when there is a
4210 # lower all_installed version.
4211 for possible_upgrade in list(possible_upgrades):
4212 atoms, versions, all_available = possible_upgrade
4213 myslots = set(versions)
4214 for other_choice in preferred:
4215 o_atoms, o_versions, o_all_available = other_choice
4216 intersecting_slots = myslots.intersection(o_versions)
4217 if not intersecting_slots:
4220 has_downgrade = False
4221 for myslot in intersecting_slots:
4222 myversion = versions[myslot]
4223 o_version = o_versions[myslot]
4224 if myversion != o_version:
4225 if myversion == best([myversion, o_version]):
4228 has_downgrade = True
4230 if has_upgrade and not has_downgrade:
4231 o_index = preferred.index(other_choice)
4232 preferred.insert(o_index, possible_upgrade)
4233 possible_upgrades.remove(possible_upgrade)
4235 preferred.extend(possible_upgrades)
4237 # preferred now contains a) and c) from the order above with
4238 # the masked flag differentiating the two. other contains b)
4239 # and d) so adding other to preferred will give us a suitable
4240 # list to iterate over.
4241 preferred.extend(other)
4243 for allow_masked in (False, True):
4244 for atoms, versions, all_available in preferred:
4245 if all_available or allow_masked:
4248 assert(False) # This point should not be reachable
4251 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
4257 mydep = dep_getcpv(orig_dep)
4258 myindex = orig_dep.index(mydep)
4259 prefix = orig_dep[:myindex]
4260 postfix = orig_dep[myindex+len(mydep):]
4261 return prefix + cpv_expand(
4262 mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
4264 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
4265 use_cache=1, use_binaries=0, myroot="/", trees=None):
4266 """Takes a depend string and parses the condition."""
4267 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
4268 #check_config_instance(mysettings)
4270 trees = globals()["db"]
4274 myusesplit = mysettings["USE"].split()
4277 # We've been given useflags to use.
4278 #print "USE FLAGS PASSED IN."
4280 #if "bindist" in myusesplit:
4281 # print "BINDIST is set!"
4283 # print "BINDIST NOT set."
4285 #we are being run by autouse(), don't consult USE vars yet.
4286 # WE ALSO CANNOT USE SETTINGS
4289 #convert parenthesis to sublists
4290 mysplit = portage_dep.paren_reduce(depstring)
4294 useforce.add(mysettings["ARCH"])
4296 # This masking/forcing is only for repoman. In other cases, relevant
4297 # masking/forcing should have already been applied via
4298 # config.regenerate(). Also, binary or installed packages may have
4299 # been built with flags that are now masked, and it would be
4300 # inconsistent to mask them now. Additionally, myuse may consist of
4301 # flags from a parent package that is being merged to a $ROOT that is
4302 # different from the one that mysettings represents.
4303 mymasks.update(mysettings.usemask)
4304 mymasks.update(mysettings.archlist())
4305 mymasks.discard(mysettings["ARCH"])
4306 useforce.update(mysettings.useforce)
4307 useforce.difference_update(mymasks)
4309 mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
4310 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
4311 except portage_exception.InvalidDependString, e:
4314 # Do the || conversions
4315 mysplit=portage_dep.dep_opconvert(mysplit)
4318 #dependencies were reduced to nothing
4321 # Recursively expand new-style virtuals so as to
4322 # collapse one or more levels of indirection.
4324 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
4325 use=use, mode=mode, myuse=myuse, use_cache=use_cache,
4326 use_binaries=use_binaries, myroot=myroot, trees=trees)
4327 except portage_exception.ParseError, e:
4331 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
4332 if mysplit2 is None:
4333 return [0,"Invalid token"]
4335 writemsg("\n\n\n", 1)
4336 writemsg("mysplit: %s\n" % (mysplit), 1)
4337 writemsg("mysplit2: %s\n" % (mysplit2), 1)
4339 myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
4340 use_binaries=use_binaries, trees=trees)
4341 mylist = flatten(myzaps)
4342 writemsg("myzaps: %s\n" % (myzaps), 1)
4343 writemsg("mylist: %s\n" % (mylist), 1)
4348 writemsg("mydict: %s\n" % (mydict), 1)
4349 return [1,mydict.keys()]
4351 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
4352 "Reduces the deplist to ones and zeros"
4353 deplist=mydeplist[:]
4354 for mypos in xrange(len(deplist)):
4355 if type(deplist[mypos])==types.ListType:
4357 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
4358 elif deplist[mypos]=="||":
4361 mykey = dep_getkey(deplist[mypos])
4362 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
4363 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
4365 elif mydbapi is None:
4366 # Assume nothing is satisfied. This forces dep_zapdeps to
4367 # return all of deps the deps that have been selected
4368 # (excluding those satisfied by package.provided).
4369 deplist[mypos] = False
4372 mydep=mydbapi.xmatch(mode,deplist[mypos])
4374 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
4377 if deplist[mypos][0]=="!":
4381 #encountered invalid string
4385 def cpv_getkey(mycpv):
4386 myslash=mycpv.split("/")
4387 mysplit=pkgsplit(myslash[-1])
4390 return myslash[0]+"/"+mysplit[0]
4396 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
4397 mysplit=mykey.split("/")
4398 if settings is None:
4399 settings = globals()["settings"]
4400 virts = settings.getvirtuals("/")
4401 virts_p = settings.get_virts_p("/")
4403 if mydb and type(mydb)==types.InstanceType:
4404 for x in settings.categories:
4405 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
4407 if virts_p.has_key(mykey):
4408 return(virts_p[mykey][0])
4409 return "null/"+mykey
4411 if type(mydb)==types.InstanceType:
4412 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
4413 return virts[mykey][0]
4416 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
4417 """Given a string (packagename or virtual) expand it into a valid
4418 cat/package string. Virtuals use the mydb to determine which provided
4419 virtual is a valid choice and defaults to the first element when there
4420 are no installed/available candidates."""
4421 myslash=mycpv.split("/")
4422 mysplit=pkgsplit(myslash[-1])
4423 if settings is None:
4424 settings = globals()["settings"]
4425 virts = settings.getvirtuals("/")
4426 virts_p = settings.get_virts_p("/")
4428 # this is illegal case.
4431 elif len(myslash)==2:
4433 mykey=myslash[0]+"/"+mysplit[0]
4436 if mydb and virts and mykey in virts:
4437 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
4438 if type(mydb)==types.InstanceType:
4439 if not mydb.cp_list(mykey, use_cache=use_cache):
4440 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
4441 mykey_orig = mykey[:]
4442 for vkey in virts[mykey]:
4443 if mydb.cp_list(vkey,use_cache=use_cache):
4445 writemsg("virts chosen: %s\n" % (mykey), 1)
4447 if mykey == mykey_orig:
4448 mykey=virts[mykey][0]
4449 writemsg("virts defaulted: %s\n" % (mykey), 1)
4450 #we only perform virtual expansion if we are passed a dbapi
4452 #specific cpv, no category, ie. "foo-1.0"
4461 for x in settings.categories:
4462 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
4463 matches.append(x+"/"+myp)
4464 if len(matches) > 1:
4465 virtual_name_collision = False
4466 if len(matches) == 2:
4468 if not x.startswith("virtual/"):
4469 # Assume that the non-virtual is desired. This helps
4470 # avoid the ValueError for invalid deps that come from
4471 # installed packages (during reverse blocker detection,
4475 virtual_name_collision = True
4476 if not virtual_name_collision:
4477 raise ValueError, matches
4481 if not mykey and type(mydb)!=types.ListType:
4482 if virts_p.has_key(myp):
4483 mykey=virts_p[myp][0]
4484 #again, we only perform virtual expansion if we have a dbapi (not a list)
4488 if mysplit[2]=="r0":
4489 return mykey+"-"+mysplit[1]
4491 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
4495 def getmaskingreason(mycpv, settings=None, portdb=None):
4496 from portage_util import grablines
4497 if settings is None:
4498 settings = globals()["settings"]
4500 portdb = globals()["portdb"]
4501 mysplit = catpkgsplit(mycpv)
4503 raise ValueError("invalid CPV: %s" % mycpv)
4504 if not portdb.cpv_exists(mycpv):
4505 raise KeyError("CPV %s does not exist" % mycpv)
4506 mycp=mysplit[0]+"/"+mysplit[1]
4508 # XXX- This is a temporary duplicate of code from the config constructor.
4509 locations = [os.path.join(settings["PORTDIR"], "profiles")]
4510 locations.extend(settings.profiles)
4511 for ov in settings["PORTDIR_OVERLAY"].split():
4512 profdir = os.path.join(normalize_path(ov), "profiles")
4513 if os.path.isdir(profdir):
4514 locations.append(profdir)
4515 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
4516 USER_CONFIG_PATH.lstrip(os.path.sep)))
4518 pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
4520 while pmasklists: # stack_lists doesn't preserve order so it can't be used
4521 pmasklines.extend(pmasklists.pop(0))
4524 if settings.pmaskdict.has_key(mycp):
4525 for x in settings.pmaskdict[mycp]:
4526 if mycpv in portdb.xmatch("match-all", x):
4530 for i in xrange(len(pmasklines)):
4531 l = pmasklines[i].strip()
4537 comment_valid = i + 1
4539 if comment_valid != i:
4542 elif comment_valid != -1:
4543 # Apparently this comment applies to muliple masks, so
4544 # it remains valid until a blank line is encountered.
4548 def getmaskingstatus(mycpv, settings=None, portdb=None):
4549 if settings is None:
4550 settings = globals()["settings"]
4552 portdb = globals()["portdb"]
4553 mysplit = catpkgsplit(mycpv)
4555 raise ValueError("invalid CPV: %s" % mycpv)
4556 if not portdb.cpv_exists(mycpv):
4557 raise KeyError("CPV %s does not exist" % mycpv)
4558 mycp=mysplit[0]+"/"+mysplit[1]
4563 revmaskdict=settings.prevmaskdict
4564 if revmaskdict.has_key(mycp):
4565 for x in revmaskdict[mycp]:
4570 if not match_to_list(mycpv, [myatom]):
4571 rValue.append("profile")
4574 # package.mask checking
4575 maskdict=settings.pmaskdict
4576 unmaskdict=settings.punmaskdict
4577 if maskdict.has_key(mycp):
4578 for x in maskdict[mycp]:
4579 if mycpv in portdb.xmatch("match-all", x):
4581 if unmaskdict.has_key(mycp):
4582 for z in unmaskdict[mycp]:
4583 if mycpv in portdb.xmatch("match-all",z):
4587 rValue.append("package.mask")
4591 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
4593 # The "depend" phase apparently failed for some reason. An associated
4594 # error message will have already been printed to stderr.
4595 return ["corruption"]
4596 if not eapi_is_supported(eapi):
4597 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
4598 mygroups = mygroups.split()
4599 pgroups = settings["ACCEPT_KEYWORDS"].split()
4600 myarch = settings["ARCH"]
4601 if pgroups and myarch not in pgroups:
4602 """For operating systems other than Linux, ARCH is not necessarily a
4604 myarch = pgroups[0].lstrip("~")
4605 pkgdict = settings.pkeywordsdict
4607 cp = dep_getkey(mycpv)
4608 if pkgdict.has_key(cp):
4609 matches = match_to_list(mycpv, pkgdict[cp].keys())
4610 for match in matches:
4611 pgroups.extend(pkgdict[cp][match])
4615 if x != "-*" and x.startswith("-"):
4617 inc_pgroups.remove(x[1:])
4620 if x not in inc_pgroups:
4621 inc_pgroups.append(x)
4622 pgroups = inc_pgroups
4627 for keyword in pgroups:
4628 if keyword in mygroups:
4637 elif gp=="-"+myarch:
4640 elif gp=="~"+myarch:
4645 rValue.append(kmask+" keyword")
4649 def __init__(self, root="/", virtual=None, clone=None, settings=None):
4652 self.root=clone.root
4653 self.portroot=clone.portroot
4654 self.pkglines=clone.pkglines
4657 if settings is None:
4658 settings = globals()["settings"]
4659 self.settings = settings
4660 self.portroot=settings["PORTDIR"]
4661 self.virtual=virtual
4662 self.dbapi = portdbapi(
4663 settings["PORTDIR"], mysettings=settings)
4665 def dep_bestmatch(self,mydep):
4666 "compatibility method"
4667 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4672 def dep_match(self,mydep):
4673 "compatibility method"
4674 mymatch=self.dbapi.xmatch("match-visible",mydep)
4679 def exists_specific(self,cpv):
4680 return self.dbapi.cpv_exists(cpv)
4682 def getallnodes(self):
4683 """new behavior: these are all *unmasked* nodes. There may or may not be available
4684 masked package for nodes in this nodes list."""
4685 return self.dbapi.cp_all()
4687 def getname(self,pkgname):
4688 "returns file location for this particular package (DEPRECATED)"
4691 mysplit=pkgname.split("/")
4692 psplit=pkgsplit(mysplit[1])
4693 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4695 def resolve_specific(self,myspec):
4696 cps=catpkgsplit(myspec)
4699 mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
4700 settings=self.settings)
4701 mykey=mykey+"-"+cps[2]
4703 mykey=mykey+"-"+cps[3]
4706 def depcheck(self,mycheck,use="yes",myusesplit=None):
4707 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4709 def getslot(self,mycatpkg):
4710 "Get a slot for a catpkg; assume it exists."
4713 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4714 except SystemExit, e:
4716 except Exception, e:
4725 def close_caches(self):
4728 def cp_list(self,cp,use_cache=1):
4733 for cp in self.cp_all():
4734 cpv_list.extend(self.cp_list(cp))
4737 def aux_get(self,mycpv,mylist):
4738 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4739 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4740 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4741 raise NotImplementedError
4743 def match(self,origdep,use_cache=1):
4744 mydep = dep_expand(origdep, mydb=self, settings=self.settings)
4745 mykey=dep_getkey(mydep)
4746 mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4747 myslot = portage_dep.dep_getslot(mydep)
4748 if myslot is not None:
4749 mylist = [cpv for cpv in mylist \
4750 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
4753 def match2(self,mydep,mykey,mylist):
4754 writemsg("DEPRECATED: dbapi.match2\n")
4755 match_from_list(mydep,mylist)
4757 def invalidentry(self, mypath):
4758 if re.search("portage_lockfile$",mypath):
4759 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4760 writemsg("Lockfile removed: %s\n" % mypath, 1)
4761 portage_locks.unlockfile((mypath,None,None))
4763 # Nothing we can do about it. We're probably sandboxed.
4765 elif re.search(".*/-MERGING-(.*)",mypath):
4766 if os.path.exists(mypath):
4767 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
4769 writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
4773 class fakedbapi(dbapi):
4774 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4775 def __init__(self, settings=None):
4778 if settings is None:
4779 settings = globals()["settings"]
4780 self.settings = settings
4781 self._match_cache = {}
4783 def _clear_cache(self):
4784 if self._match_cache:
4785 self._match_cache = {}
4787 def match(self, origdep, use_cache=1):
4788 result = self._match_cache.get(origdep, None)
4789 if result is not None:
4791 result = dbapi.match(self, origdep, use_cache=use_cache)
4792 self._match_cache[origdep] = result
4795 def cpv_exists(self,mycpv):
4796 return self.cpvdict.has_key(mycpv)
4798 def cp_list(self,mycp,use_cache=1):
4799 if not self.cpdict.has_key(mycp):
4802 return self.cpdict[mycp]
4806 for x in self.cpdict.keys():
4807 returnme.extend(self.cpdict[x])
4811 return self.cpvdict.keys()
4813 def cpv_inject(self, mycpv, metadata=None):
4814 """Adds a cpv from the list of available packages."""
4816 mycp=cpv_getkey(mycpv)
4817 self.cpvdict[mycpv] = metadata
4820 myslot = metadata.get("SLOT", None)
4821 if myslot and mycp in self.cpdict:
4822 # If necessary, remove another package in the same SLOT.
4823 for cpv in self.cpdict[mycp]:
4825 other_metadata = self.cpvdict[cpv]
4827 if myslot == other_metadata.get("SLOT", None):
4828 self.cpv_remove(cpv)
4830 if mycp not in self.cpdict:
4831 self.cpdict[mycp] = []
4832 if not mycpv in self.cpdict[mycp]:
4833 self.cpdict[mycp].append(mycpv)
4835 def cpv_remove(self,mycpv):
4836 """Removes a cpv from the list of available packages."""
4838 mycp=cpv_getkey(mycpv)
4839 if self.cpvdict.has_key(mycpv):
4840 del self.cpvdict[mycpv]
4841 if not self.cpdict.has_key(mycp):
4843 while mycpv in self.cpdict[mycp]:
4844 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4845 if not len(self.cpdict[mycp]):
4846 del self.cpdict[mycp]
4848 def aux_get(self, mycpv, wants):
4849 if not self.cpv_exists(mycpv):
4850 raise KeyError(mycpv)
4851 metadata = self.cpvdict[mycpv]
4853 return ["" for x in wants]
4854 return [metadata.get(x, "") for x in wants]
4856 def aux_update(self, cpv, values):
4858 self.cpvdict[cpv].update(values)
4860 class bindbapi(fakedbapi):
4861 def __init__(self, mybintree=None, settings=None):
4862 self.bintree = mybintree
4865 if settings is None:
4866 settings = globals()["settings"]
4867 self.settings = settings
4868 self._match_cache = {}
4869 # Selectively cache metadata in order to optimize dep matching.
4870 self._aux_cache_keys = set(["SLOT"])
4871 self._aux_cache = {}
4873 def match(self, *pargs, **kwargs):
4874 if self.bintree and not self.bintree.populated:
4875 self.bintree.populate()
4876 return fakedbapi.match(self, *pargs, **kwargs)
4878 def aux_get(self,mycpv,wants):
4879 if self.bintree and not self.bintree.populated:
4880 self.bintree.populate()
4882 if not set(wants).difference(self._aux_cache_keys):
4883 aux_cache = self._aux_cache.get(mycpv)
4884 if aux_cache is not None:
4885 return [aux_cache[x] for x in wants]
4887 mysplit = mycpv.split("/")
4889 tbz2name = mysplit[1]+".tbz2"
4890 if self.bintree and not self.bintree.isremote(mycpv):
4891 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4892 getitem = tbz2.getfile
4894 getitem = self.bintree.remotepkgs[tbz2name].get
4898 mykeys = self._aux_cache_keys.union(wants)
4901 # myval is None if the key doesn't exist
4902 # or the tbz2 is corrupt.
4904 mydata[x] = " ".join(myval.split())
4905 if "EAPI" in mykeys:
4906 if not mydata.setdefault("EAPI", "0"):
4907 mydata["EAPI"] = "0"
4910 for x in self._aux_cache_keys:
4911 aux_cache[x] = mydata.get(x, "")
4912 self._aux_cache[mycpv] = aux_cache
4913 return [mydata.get(x, "") for x in wants]
4915 def aux_update(self, cpv, values):
4916 if not self.bintree.populated:
4917 self.bintree.populate()
4918 tbz2path = self.bintree.getname(cpv)
4919 if not os.path.exists(tbz2path):
4921 mytbz2 = xpak.tbz2(tbz2path)
4922 mydata = mytbz2.get_data()
4923 mydata.update(values)
4924 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
4926 def cp_list(self, *pargs, **kwargs):
4927 if not self.bintree.populated:
4928 self.bintree.populate()
4929 return fakedbapi.cp_list(self, *pargs, **kwargs)
4932 if not self.bintree.populated:
4933 self.bintree.populate()
4934 return fakedbapi.cpv_all(self)
4936 class vardbapi(dbapi):
4937 def __init__(self, root, categories=None, settings=None, vartree=None):
4939 #cache for category directory mtimes
4940 self.mtdircache = {}
4941 #cache for dependency checks
4942 self.matchcache = {}
4943 #cache for cp_list results
4945 self.blockers = None
4946 if settings is None:
4947 settings = globals()["settings"]
4948 self.settings = settings
4949 if categories is None:
4950 categories = settings.categories
4951 self.categories = categories[:]
4953 vartree = globals()["db"][root]["vartree"]
4954 self.vartree = vartree
4955 self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
4956 "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
4957 self._aux_cache = None
4958 self._aux_cache_version = "1"
4959 self._aux_cache_filename = os.path.join(self.root,
4960 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
4962 def cpv_exists(self,mykey):
4963 "Tells us whether an actual ebuild exists on disk (no masking)"
4964 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4966 def cpv_counter(self,mycpv):
4967 "This method will grab the COUNTER. Returns a counter value."
4969 return long(self.aux_get(mycpv, ["COUNTER"])[0])
4970 except KeyError, ValueError:
4972 cdir=self.root+VDB_PATH+"/"+mycpv
4973 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4975 # We write our new counter value to a new file that gets moved into
4976 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4978 if os.path.exists(cpath):
4979 cfile=open(cpath, "r")
4981 counter=long(cfile.readline())
4983 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4987 elif os.path.exists(cdir):
4988 mys = pkgsplit(mycpv)
4989 myl = self.match(mys[0],use_cache=0)
4993 # Only one package... Counter doesn't matter.
4994 write_atomic(cpath, "1")
4996 except SystemExit, e:
4998 except Exception, e:
4999 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5001 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
5003 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
5004 writemsg("!!! %s\n" % e, noiselevel=-1)
5007 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
5009 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
5011 writemsg("!!! remerge the package.\n", noiselevel=-1)
5016 # update new global counter file
5017 write_atomic(cpath, str(counter))
5020 def cpv_inject(self,mycpv):
5021 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
5022 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
5023 counter = self.counter_tick(self.root, mycpv=mycpv)
5024 # write local package counter so that emerge clean does the right thing
5025 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
5027 def isInjected(self,mycpv):
5028 if self.cpv_exists(mycpv):
5029 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
5031 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
5035 def move_ent(self,mylist):
5040 for cp in [origcp,newcp]:
5041 if not (isvalidatom(cp) and isjustname(cp)):
5042 raise portage_exception.InvalidPackageName(cp)
5043 origmatches=self.match(origcp,use_cache=0)
5046 for mycpv in origmatches:
5047 mycpsplit=catpkgsplit(mycpv)
5048 mynewcpv=newcp+"-"+mycpsplit[2]
5049 mynewcat=newcp.split("/")[0]
5050 if mycpsplit[3]!="r0":
5051 mynewcpv += "-"+mycpsplit[3]
5052 mycpsplit_new = catpkgsplit(mynewcpv)
5053 origpath=self.root+VDB_PATH+"/"+mycpv
5054 if not os.path.exists(origpath):
5056 writemsg_stdout("@")
5057 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
5058 #create the directory
5059 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
5060 newpath=self.root+VDB_PATH+"/"+mynewcpv
5061 if os.path.exists(newpath):
5062 #dest already exists; keep this puppy where it is.
5064 os.rename(origpath, newpath)
5066 # We need to rename the ebuild now.
5067 old_pf = catsplit(mycpv)[1]
5068 new_pf = catsplit(mynewcpv)[1]
5069 if new_pf != old_pf:
5071 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
5072 os.path.join(newpath, new_pf + ".ebuild"))
5074 if e.errno != errno.ENOENT:
5077 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
5079 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
5080 fixdbentries([mylist], newpath)
5082 def update_ents(self, update_iter):
5083 """Run fixdbentries on all installed packages (time consuming). Like
5084 fixpackages, this should be run from a helper script and display
5085 a progress indicator."""
5086 dbdir = os.path.join(self.root, VDB_PATH)
5087 for catdir in listdir(dbdir):
5088 catdir = dbdir+"/"+catdir
5089 if os.path.isdir(catdir):
5090 for pkgdir in listdir(catdir):
5091 pkgdir = catdir+"/"+pkgdir
5092 if os.path.isdir(pkgdir):
5093 fixdbentries(update_iter, pkgdir)
5095 def move_slot_ent(self,mylist):
5100 if not isvalidatom(pkg):
5101 raise portage_exception.InvalidAtom(pkg)
5103 origmatches=self.match(pkg,use_cache=0)
5107 for mycpv in origmatches:
5108 origpath=self.root+VDB_PATH+"/"+mycpv
5109 if not os.path.exists(origpath):
5112 slot=grabfile(origpath+"/SLOT");
5116 if (slot[0]!=origslot):
5119 writemsg_stdout("s")
5120 write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
5122 def cp_list(self,mycp,use_cache=1):
5123 mysplit=mycp.split("/")
5124 if mysplit[0] == '*':
5125 mysplit[0] = mysplit[0][1:]
5127 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
5130 if use_cache and self.cpcache.has_key(mycp):
5131 cpc=self.cpcache[mycp]
5134 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5140 if x.startswith("."):
5143 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
5147 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5149 if len(mysplit) > 1:
5150 if ps[0]==mysplit[1]:
5151 returnme.append(mysplit[0]+"/"+x)
5153 self.cpcache[mycp]=[mystat,returnme]
5154 elif self.cpcache.has_key(mycp):
5155 del self.cpcache[mycp]
5158 def cpv_all(self,use_cache=1):
5160 basepath = self.root+VDB_PATH+"/"
5162 for x in self.categories:
5163 for y in listdir(basepath+x,EmptyOnError=1):
5164 if y.startswith("."):
5167 # -MERGING- should never be a cpv, nor should files.
5168 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
5169 returnme += [subpath]
5172 def cp_all(self,use_cache=1):
5173 mylist = self.cpv_all(use_cache=use_cache)
5178 mysplit=catpkgsplit(y)
5180 self.invalidentry(self.root+VDB_PATH+"/"+y)
5182 d[mysplit[0]+"/"+mysplit[1]] = None
5185 def checkblockers(self,origdep):
5188 def match(self,origdep,use_cache=1):
5189 "caching match function"
5191 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
5192 mykey=dep_getkey(mydep)
5193 mycat=mykey.split("/")[0]
5195 if self.matchcache.has_key(mycat):
5196 del self.mtdircache[mycat]
5197 del self.matchcache[mycat]
5198 mymatch = match_from_list(mydep,
5199 self.cp_list(mykey, use_cache=use_cache))
5200 myslot = portage_dep.dep_getslot(mydep)
5201 if myslot is not None:
5202 mymatch = [cpv for cpv in mymatch \
5203 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5206 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
5207 except (IOError, OSError):
5210 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
5212 self.mtdircache[mycat]=curmtime
5213 self.matchcache[mycat]={}
5214 if not self.matchcache[mycat].has_key(mydep):
5215 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
5216 myslot = portage_dep.dep_getslot(mydep)
5217 if myslot is not None:
5218 mymatch = [cpv for cpv in mymatch \
5219 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
5220 self.matchcache[mycat][mydep]=mymatch
5221 return self.matchcache[mycat][mydep][:]
5223 def findname(self, mycpv):
5224 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
5226 def flush_cache(self):
5227 """If the current user has permission and the internal aux_get cache has
5228 been updated, save it to disk and mark it unmodified. This is called
5229 by emerge after it has loaded the full vdb for use in dependency
5230 calculations. Currently, the cache is only written if the user has
5231 superuser privileges (since that's required to obtain a lock), but all
5232 users have read access and benefit from faster metadata lookups (as
5233 long as at least part of the cache is still valid)."""
5234 if self._aux_cache is not None and \
5235 self._aux_cache["modified"] and \
5237 valid_nodes = set(self.cpv_all())
5238 for cpv in self._aux_cache["packages"].keys():
5239 if cpv not in valid_nodes:
5240 del self._aux_cache["packages"][cpv]
5241 del self._aux_cache["modified"]
5243 f = atomic_ofstream(self._aux_cache_filename)
5244 cPickle.dump(self._aux_cache, f, -1)
5246 portage_util.apply_secpass_permissions(
5247 self._aux_cache_filename, gid=portage_gid, mode=0644)
5248 except (IOError, OSError), e:
5250 self._aux_cache["modified"] = False
5252 def aux_get(self, mycpv, wants):
5253 """This automatically caches selected keys that are frequently needed
5254 by emerge for dependency calculations. The cached metadata is
5255 considered valid if the mtime of the package directory has not changed
5256 since the data was cached. The cache is stored in a pickled dict
5257 object with the following format:
5259 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
5261 If an error occurs while loading the cache pickle or the version is
5262 unrecognized, the cache will simple be recreated from scratch (it is
5263 completely disposable).
5265 if not self._aux_cache_keys.intersection(wants):
5266 return self._aux_get(mycpv, wants)
5267 if self._aux_cache is None:
5269 f = open(self._aux_cache_filename)
5270 mypickle = cPickle.Unpickler(f)
5271 mypickle.find_global = None
5272 self._aux_cache = mypickle.load()
5275 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
5277 if not self._aux_cache or \
5278 not isinstance(self._aux_cache, dict) or \
5279 self._aux_cache.get("version") != self._aux_cache_version or \
5280 not self._aux_cache.get("packages"):
5281 self._aux_cache = {"version":self._aux_cache_version}
5282 self._aux_cache["packages"] = {}
5283 self._aux_cache["modified"] = False
5284 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5287 mydir_stat = os.stat(mydir)
5289 if e.errno != errno.ENOENT:
5291 raise KeyError(mycpv)
5292 mydir_mtime = long(mydir_stat.st_mtime)
5293 pkg_data = self._aux_cache["packages"].get(mycpv)
5297 cache_mtime, metadata = pkg_data
5298 cache_valid = cache_mtime == mydir_mtime
5299 if cache_valid and set(metadata) != self._aux_cache_keys:
5300 # Allow self._aux_cache_keys to change without a cache version
5304 mydata.update(metadata)
5305 pull_me = set(wants).difference(self._aux_cache_keys)
5307 pull_me = self._aux_cache_keys.union(wants)
5309 # pull any needed data and cache it
5310 aux_keys = list(pull_me)
5311 for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
5315 for aux_key in self._aux_cache_keys:
5316 cache_data[aux_key] = mydata[aux_key]
5317 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
5318 self._aux_cache["modified"] = True
5319 return [mydata[x] for x in wants]
5321 def _aux_get(self, mycpv, wants):
5322 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5324 if not stat.S_ISDIR(os.stat(mydir).st_mode):
5325 raise KeyError(mycpv)
5327 if e.errno == errno.ENOENT:
5328 raise KeyError(mycpv)
5334 myf = open(os.path.join(mydir, x), "r")
5339 myd = " ".join(myd.split())
5342 if x == "EAPI" and not myd:
5348 def aux_update(self, cpv, values):
5349 cat, pkg = cpv.split("/")
5350 mylink = dblink(cat, pkg, self.root, self.settings,
5351 treetype="vartree", vartree=self.vartree)
5352 if not mylink.exists():
5354 for k, v in values.iteritems():
5355 mylink.setfile(k, v)
5357 def counter_tick(self,myroot,mycpv=None):
5358 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
5360 def get_counter_tick_core(self,myroot,mycpv=None):
5361 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
5363 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
5364 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
5365 cpath=myroot+"var/cache/edb/counter"
5369 mysplit = pkgsplit(mycpv)
5370 for x in self.match(mysplit[0],use_cache=0):
5374 old_counter = long(self.aux_get(x,["COUNTER"])[0])
5375 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
5376 except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
5378 writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
5379 if old_counter > min_counter:
5380 min_counter = old_counter
5382 # We write our new counter value to a new file that gets moved into
5383 # place to avoid filesystem corruption.
5384 find_counter = ("find '%s' -type f -name COUNTER | " + \
5385 "while read f; do echo $(<\"${f}\"); done | " + \
5386 "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
5387 if os.path.exists(cpath):
5388 cfile=open(cpath, "r")
5390 counter=long(cfile.readline())
5391 except (ValueError,OverflowError):
5393 counter = long(commands.getoutput(find_counter).strip())
5394 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
5397 except (ValueError,OverflowError):
5398 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
5400 writemsg("!!! corrected/normalized so that portage can operate properly.\n",
5402 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
5407 counter = long(commands.getoutput(find_counter).strip())
5408 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
5410 except ValueError: # Value Error for long(), probably others for commands.getoutput
5411 writemsg("!!! Initializing global counter.\n", noiselevel=-1)
5415 if counter < min_counter:
5416 counter = min_counter+1000
5419 if incrementing or changed:
5423 # update new global counter file
5424 write_atomic(cpath, str(counter))
5427 class vartree(object):
5428 "this tree will scan a var/db/pkg database located at root (passed to init)"
5429 def __init__(self, root="/", virtual=None, clone=None, categories=None,
5432 self.root = clone.root[:]
5433 self.dbapi = copy.deepcopy(clone.dbapi)
5435 self.settings = config(clone=clone.settings)
5438 if settings is None:
5439 settings = globals()["settings"]
5440 self.settings = settings # for key_expand calls
5441 if categories is None:
5442 categories = settings.categories
5443 self.dbapi = vardbapi(self.root, categories=categories,
5444 settings=settings, vartree=self)
5447 def zap(self,mycpv):
5450 def inject(self,mycpv):
5453 def get_provide(self,mycpv):
5457 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
5459 myuse = myuse.split()
5460 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
5461 for myprovide in mylines:
5462 mys = catpkgsplit(myprovide)
5464 mys = myprovide.split("/")
5465 myprovides += [mys[0] + "/" + mys[1]]
5467 except SystemExit, e:
5469 except Exception, e:
5470 mydir = os.path.join(self.root, VDB_PATH, mycpv)
5471 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
5474 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
5476 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
5479 def get_all_provides(self):
5481 for node in self.getallcpv():
5482 for mykey in self.get_provide(node):
5483 if myprovides.has_key(mykey):
5484 myprovides[mykey] += [node]
5486 myprovides[mykey] = [node]
5489 def dep_bestmatch(self,mydep,use_cache=1):
5490 "compatibility method -- all matches, not just visible ones"
5491 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
5492 mymatch = best(self.dbapi.match(
5493 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
5494 use_cache=use_cache))
5500 def dep_match(self,mydep,use_cache=1):
5501 "compatibility method -- we want to see all matches, not just visible ones"
5502 #mymatch=match(mydep,self.dbapi)
5503 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
5509 def exists_specific(self,cpv):
5510 return self.dbapi.cpv_exists(cpv)
5512 def getallcpv(self):
5513 """temporary function, probably to be renamed --- Gets a list of all
5514 category/package-versions installed on the system."""
5515 return self.dbapi.cpv_all()
5517 def getallnodes(self):
5518 """new behavior: these are all *unmasked* nodes. There may or may not be available
5519 masked package for nodes in this nodes list."""
5520 return self.dbapi.cp_all()
5522 def exists_specific_cat(self,cpv,use_cache=1):
5523 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
5524 settings=self.settings)
5528 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
5532 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
5538 def getebuildpath(self,fullpackage):
5539 cat,package=fullpackage.split("/")
5540 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
5542 def getnode(self,mykey,use_cache=1):
5543 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5544 settings=self.settings)
5547 mysplit=mykey.split("/")
5548 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5551 mypsplit=pkgsplit(x)
5553 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5555 if mypsplit[0]==mysplit[1]:
5556 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
5557 returnme.append(appendme)
5561 def getslot(self,mycatpkg):
5562 "Get a slot for a catpkg; assume it exists."
5564 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
5568 def hasnode(self,mykey,use_cache):
5569 """Does the particular node (cat/pkg key) exist?"""
5570 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
5571 settings=self.settings)
5572 mysplit=mykey.split("/")
5573 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
5575 mypsplit=pkgsplit(x)
5577 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
5579 if mypsplit[0]==mysplit[1]:
5587 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
5588 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
5589 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
5590 'PDEPEND', 'PROVIDE', 'EAPI',
5591 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
5592 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
5594 auxdbkeylen=len(auxdbkeys)
5596 def close_portdbapi_caches():
5597 for i in portdbapi.portdbapi_instances:
5601 class portdbapi(dbapi):
5602 """this tree will scan a portage directory located at root (passed to init)"""
5603 portdbapi_instances = []
5605 def __init__(self,porttree_root,mysettings=None):
5606 portdbapi.portdbapi_instances.append(self)
5609 self.mysettings = mysettings
5612 self.mysettings = config(clone=settings)
5614 # This is strictly for use in aux_get() doebuild calls when metadata
5615 # is generated by the depend phase. It's safest to use a clone for
5616 # this purpose because doebuild makes many changes to the config
5617 # instance that is passed in.
5618 self.doebuild_settings = config(clone=self.mysettings)
5620 self.manifestVerifyLevel = None
5621 self.manifestVerifier = None
5622 self.manifestCache = {} # {location: [stat, md5]}
5623 self.manifestMissingCache = []
5625 if "gpg" in self.mysettings.features:
5626 self.manifestVerifyLevel = portage_gpg.EXISTS
5627 if "strict" in self.mysettings.features:
5628 self.manifestVerifyLevel = portage_gpg.MARGINAL
5629 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5630 elif "severe" in self.mysettings.features:
5631 self.manifestVerifyLevel = portage_gpg.TRUSTED
5632 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
5634 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
5636 #self.root=settings["PORTDIR"]
5637 self.porttree_root = os.path.realpath(porttree_root)
5639 self.depcachedir = self.mysettings.depcachedir[:]
5641 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
5642 if self.tmpfs and not os.path.exists(self.tmpfs):
5644 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
5646 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
5649 self.eclassdb = eclass_cache.cache(self.porttree_root,
5650 overlays=self.mysettings["PORTDIR_OVERLAY"].split())
5652 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
5654 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
5658 self.porttrees = [self.porttree_root] + \
5659 [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
5660 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
5662 self._init_cache_dirs()
5663 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
5665 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
5667 from cache import metadata_overlay, volatile
5668 for x in self.porttrees:
5669 db_ro = self.auxdbmodule(self.depcachedir, x,
5670 filtered_auxdbkeys, gid=portage_gid, readonly=True)
5671 self.auxdb[x] = metadata_overlay.database(
5672 self.depcachedir, x, filtered_auxdbkeys,
5673 gid=portage_gid, db_rw=volatile.database,
5676 for x in self.porttrees:
5677 # location, label, auxdbkeys
5678 self.auxdb[x] = self.auxdbmodule(
5679 self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
5680 # Selectively cache metadata in order to optimize dep matching.
5681 self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
5682 self._aux_cache = {}
5684 def _init_cache_dirs(self):
5685 """Create /var/cache/edb/dep and adjust permissions for the portage
5693 for mydir in (self.depcachedir,):
5694 if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
5695 writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
5698 raise # bail out on the first error that occurs during recursion
5699 if not apply_recursive_permissions(mydir,
5700 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
5701 filemode=filemode, filemask=modemask, onerror=onerror):
5702 raise portage_exception.OperationNotPermitted(
5703 "Failed to apply recursive permissions for the portage group.")
5704 except portage_exception.PortageException, e:
5707 def close_caches(self):
5708 for x in self.auxdb.keys():
5709 self.auxdb[x].sync()
5712 def flush_cache(self):
5713 for x in self.auxdb.values():
5716 def finddigest(self,mycpv):
5718 mydig = self.findname2(mycpv)[0]
5721 mydigs = mydig.split("/")[:-1]
5722 mydig = "/".join(mydigs)
5723 mysplit = mycpv.split("/")
5726 return mydig+"/files/digest-"+mysplit[-1]
5728 def findname(self,mycpv):
5729 return self.findname2(mycpv)[0]
5731 def findname2(self, mycpv, mytree=None):
5733 Returns the location of the CPV, and what overlay it was in.
5734 Searches overlays first, then PORTDIR; this allows us to return the first
5735 matching file. As opposed to starting in portdir and then doing overlays
5736 second, we would have to exhaustively search the overlays until we found
5741 mysplit=mycpv.split("/")
5742 psplit=pkgsplit(mysplit[1])
5747 mytrees = self.porttrees[:]
5751 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
5752 if os.access(file, os.R_OK):
5756 def aux_get(self, mycpv, mylist, mytree=None):
5757 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
5758 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
5759 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
5761 if not mytree and not set(mylist).difference(self._aux_cache_keys):
5762 aux_cache = self._aux_cache.get(mycpv)
5763 if aux_cache is not None:
5764 return [aux_cache[x] for x in mylist]
5766 global auxdbkeys,auxdbkeylen
5767 cat,pkg = mycpv.split("/", 1)
5769 myebuild, mylocation = self.findname2(mycpv, mytree)
5772 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
5774 writemsg("!!! %s\n" % myebuild, noiselevel=1)
5775 raise KeyError(mycpv)
5777 myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
5778 if "gpg" in self.mysettings.features:
5780 mys = portage_gpg.fileStats(myManifestPath)
5781 if (myManifestPath in self.manifestCache) and \
5782 (self.manifestCache[myManifestPath] == mys):
5784 elif self.manifestVerifier:
5785 if not self.manifestVerifier.verify(myManifestPath):
5786 # Verification failed the desired level.
5787 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
5789 if ("severe" in self.mysettings.features) and \
5790 (mys != portage_gpg.fileStats(myManifestPath)):
5791 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
5793 except portage_exception.InvalidSignature, e:
5794 if ("strict" in self.mysettings.features) or \
5795 ("severe" in self.mysettings.features):
5797 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
5798 except portage_exception.MissingSignature, e:
5799 if ("severe" in self.mysettings.features):
5801 if ("strict" in self.mysettings.features):
5802 if myManifestPath not in self.manifestMissingCache:
5803 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
5804 self.manifestMissingCache.insert(0,myManifestPath)
5805 except (OSError,portage_exception.FileNotFound), e:
5806 if ("strict" in self.mysettings.features) or \
5807 ("severe" in self.mysettings.features):
5808 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
5809 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
5813 if os.access(myebuild, os.R_OK):
5814 emtime=os.stat(myebuild)[stat.ST_MTIME]
5816 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
5818 writemsg("!!! %s\n" % myebuild,
5823 mydata = self.auxdb[mylocation][mycpv]
5824 if emtime != long(mydata.get("_mtime_", 0)):
5826 elif len(mydata.get("_eclasses_", [])) > 0:
5827 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
5835 try: del self.auxdb[mylocation][mycpv]
5836 except KeyError: pass
5838 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
5841 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
5842 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
5844 self.doebuild_settings.reset()
5846 myret = doebuild(myebuild, "depend",
5847 self.doebuild_settings["ROOT"], self.doebuild_settings,
5848 dbkey=mydata, tree="porttree", mydbapi=self)
5849 if myret != os.EX_OK:
5850 raise KeyError(mycpv)
5852 if "EAPI" not in mydata or not mydata["EAPI"].strip():
5853 mydata["EAPI"] = "0"
5855 if not eapi_is_supported(mydata["EAPI"]):
5856 # if newer version, wipe everything and negate eapi
5857 eapi = mydata["EAPI"]
5859 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
5860 mydata["EAPI"] = "-"+eapi
5862 if mydata.get("INHERITED", False):
5863 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
5865 mydata["_eclasses_"] = {}
5867 del mydata["INHERITED"]
5869 mydata["_mtime_"] = emtime
5871 self.auxdb[mylocation][mycpv] = mydata
5873 if not mydata.setdefault("EAPI", "0"):
5874 mydata["EAPI"] = "0"
5876 #finally, we look at our internal cache entry and return the requested data.
5879 if x == "INHERITED":
5880 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
5882 returnme.append(mydata.get(x,""))
5886 for x in self._aux_cache_keys:
5887 aux_cache[x] = mydata.get(x, "")
5888 self._aux_cache[mycpv] = aux_cache
5892 def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
5893 if mysettings is None:
5894 mysettings = self.mysettings
5896 myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
5898 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
5901 if useflags is None:
5902 useflags = mysettings["USE"].split()
5904 myurilist = portage_dep.paren_reduce(myuris)
5905 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
5906 newuris = flatten(myurilist)
5910 mya = os.path.basename(x)
5911 if not mya in myfiles:
5913 return [newuris, myfiles]
5915 def getfetchsizes(self,mypkg,useflags=None,debug=0):
5916 # returns a filename:size dictionnary of remaining downloads
5917 myebuild = self.findname(mypkg)
5918 pkgdir = os.path.dirname(myebuild)
5919 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5920 checksums = mf.getDigests()
5922 if debug: print "[empty/missing/bad digest]: "+mypkg
5925 if useflags is None:
5926 myuris, myfiles = self.getfetchlist(mypkg,all=1)
5928 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
5929 #XXX: maybe this should be improved: take partial downloads
5930 # into account? check checksums?
5931 for myfile in myfiles:
5932 if myfile not in checksums:
5934 writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
5936 file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
5939 mystat = os.stat(file_path)
5945 existing_size = mystat.st_size
5946 remaining_size = int(checksums[myfile]["size"]) - existing_size
5947 if remaining_size > 0:
5948 # Assume the download is resumable.
5949 filesdict[myfile] = remaining_size
5950 elif remaining_size < 0:
5951 # The existing file is too large and therefore corrupt.
5952 filesdict[myfile] = int(checksums[myfile]["size"])
5955 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5958 useflags = mysettings["USE"].split()
5959 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5960 myebuild = self.findname(mypkg)
5961 pkgdir = os.path.dirname(myebuild)
5962 mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
5963 mysums = mf.getDigests()
5967 if not mysums or x not in mysums:
5969 reason = "digest missing"
5972 ok, reason = portage_checksum.verify_all(
5973 os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
5974 except portage_exception.FileNotFound, e:
5976 reason = "File Not Found: '%s'" % str(e)
5978 failures[x] = reason
5983 def getsize(self,mypkg,useflags=None,debug=0):
5984 # returns the total size of remaining downloads
5986 # we use getfetchsizes() now, so this function would be obsoleted
5988 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5989 if filesdict is None:
5990 return "[empty/missing/bad digest]"
5992 for myfile in filesdict.keys():
5993 mysum+=filesdict[myfile]
5996 def cpv_exists(self,mykey):
5997 "Tells us whether an actual ebuild exists on disk (no masking)"
5998 cps2=mykey.split("/")
5999 cps=catpkgsplit(mykey,silent=0)
6003 if self.findname(cps[0]+"/"+cps2[1]):
6009 "returns a list of all keys in our tree"
6011 for x in self.mysettings.categories:
6012 for oroot in self.porttrees:
6013 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
6019 def p_list(self,mycp):
6021 for oroot in self.porttrees:
6022 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6023 if x[-7:]==".ebuild":
6027 def cp_list(self, mycp, use_cache=1, mytree=None):
6028 mysplit=mycp.split("/")
6033 mytrees = self.porttrees
6034 for oroot in mytrees:
6035 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
6036 if x.endswith(".ebuild"):
6040 writemsg("\nInvalid ebuild name: %s\n" % \
6041 os.path.join(oroot, mycp, x), noiselevel=-1)
6043 d[mysplit[0]+"/"+pf] = None
6047 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
6055 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
6056 "caching match function; very trick stuff"
6057 #if no updates are being made to the tree, we can consult our xcache...
6060 return self.xcache[level][origdep][:]
6065 #this stuff only runs on first call of xmatch()
6066 #create mydep, mykey from origdep
6067 mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
6068 mykey=dep_getkey(mydep)
6070 if level=="list-visible":
6071 #a list of all visible packages, not called directly (just by xmatch())
6072 #myval=self.visible(self.cp_list(mykey))
6073 myval=self.gvisible(self.visible(self.cp_list(mykey)))
6074 elif level=="bestmatch-visible":
6075 #dep match -- best match of all visible packages
6076 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
6077 #get all visible matches (from xmatch()), then choose the best one
6078 elif level=="bestmatch-list":
6079 #dep match -- find best match but restrict search to sublist
6080 myval=best(match_from_list(mydep,mylist))
6081 #no point is calling xmatch again since we're not caching list deps
6082 elif level=="match-list":
6083 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
6084 myval=match_from_list(mydep,mylist)
6085 elif level=="match-visible":
6086 #dep match -- find all visible matches
6087 myval = match_from_list(mydep,
6088 self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
6089 #get all visible packages, then get the matching ones
6090 elif level=="match-all":
6091 #match *all* visible *and* masked packages
6092 myval=match_from_list(mydep,self.cp_list(mykey))
6094 print "ERROR: xmatch doesn't handle",level,"query!"
6096 myslot = portage_dep.dep_getslot(mydep)
6097 if myslot is not None:
6101 if self.aux_get(cpv, ["SLOT"])[0] == myslot:
6102 slotmatches.append(cpv)
6104 pass # ebuild masked by corruption
6106 if self.frozen and (level not in ["match-list","bestmatch-list"]):
6107 self.xcache[level][mydep]=myval
6108 if origdep and origdep != mydep:
6109 self.xcache[level][origdep] = myval
6112 def match(self,mydep,use_cache=1):
6113 return self.xmatch("match-visible",mydep)
6115 def visible(self,mylist):
6116 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
6117 packages file to remove invisible entries, returning remaining items. This function assumes
6118 that all entries in mylist have the same category and package name."""
6119 if (mylist is None) or (len(mylist)==0):
6122 #first, we mask out packages in the package.mask file
6124 cpv=catpkgsplit(mykey)
6127 print "visible(): invalid cat/pkg-v:",mykey
6129 mycp=cpv[0]+"/"+cpv[1]
6130 maskdict=self.mysettings.pmaskdict
6131 unmaskdict=self.mysettings.punmaskdict
6132 if maskdict.has_key(mycp):
6133 for x in maskdict[mycp]:
6134 mymatches=self.xmatch("match-all",x)
6135 if mymatches is None:
6136 #error in package.mask file; print warning and continue:
6137 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
6141 if unmaskdict.has_key(mycp):
6142 for z in unmaskdict[mycp]:
6143 mymatches_unmask=self.xmatch("match-all",z)
6144 if y in mymatches_unmask:
6153 revmaskdict=self.mysettings.prevmaskdict
6154 if revmaskdict.has_key(mycp):
6155 for x in revmaskdict[mycp]:
6156 #important: only match against the still-unmasked entries...
6157 #notice how we pass "newlist" to the xmatch() call below....
6158 #Without this, ~ deps in the packages files are broken.
6159 mymatches=self.xmatch("match-list",x,mylist=newlist)
6160 if mymatches is None:
6161 #error in packages file; print warning and continue:
6162 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
6165 while pos<len(newlist):
6166 if newlist[pos] not in mymatches:
6172 def gvisible(self,mylist):
6173 "strip out group-masked (not in current group) entries"
6179 accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
6180 pkgdict = self.mysettings.pkeywordsdict
6181 for mycpv in mylist:
6183 keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
6186 except portage_exception.PortageException, e:
6187 writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
6188 mycpv, noiselevel=-1)
6189 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6192 mygroups=keys.split()
6193 # Repoman may modify this attribute as necessary.
6194 pgroups = accept_keywords[:]
6196 cp = dep_getkey(mycpv)
6197 if pkgdict.has_key(cp):
6198 matches = match_to_list(mycpv, pkgdict[cp].keys())
6199 for atom in matches:
6200 pgroups.extend(pkgdict[cp][atom])
6204 # The -* special case should be removed once the tree
6205 # is clean of KEYWORDS=-* crap
6206 if x != "-*" and x.startswith("-"):
6208 inc_pgroups.remove(x[1:])
6211 if x not in inc_pgroups:
6212 inc_pgroups.append(x)
6213 pgroups = inc_pgroups
6219 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
6230 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
6232 if match and eapi_is_supported(eapi):
6233 newlist.append(mycpv)
6236 class binarytree(object):
6237 "this tree scans for a list of all packages available in PKGDIR"
6238 def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
6240 # XXX This isn't cloning. It's an instance of the same thing.
6241 self.root=clone.root
6242 self.pkgdir=clone.pkgdir
6243 self.dbapi=clone.dbapi
6244 self.populated=clone.populated
6245 self.tree=clone.tree
6246 self.remotepkgs=clone.remotepkgs
6247 self.invalids=clone.invalids
6248 self.settings = clone.settings
6251 #self.pkgdir=settings["PKGDIR"]
6252 self.pkgdir = normalize_path(pkgdir)
6253 self.dbapi = bindbapi(self, settings=settings)
6258 self.settings = settings
6259 self._pkg_paths = {}
6261 def move_ent(self,mylist):
6262 if not self.populated:
6267 for cp in [origcp,newcp]:
6268 if not (isvalidatom(cp) and isjustname(cp)):
6269 raise portage_exception.InvalidPackageName(cp)
6270 origcat = origcp.split("/")[0]
6271 mynewcat=newcp.split("/")[0]
6272 origmatches=self.dbapi.cp_list(origcp)
6275 for mycpv in origmatches:
6277 mycpsplit=catpkgsplit(mycpv)
6278 mynewcpv=newcp+"-"+mycpsplit[2]
6279 if mycpsplit[3]!="r0":
6280 mynewcpv += "-"+mycpsplit[3]
6281 myoldpkg=mycpv.split("/")[1]
6282 mynewpkg=mynewcpv.split("/")[1]
6284 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
6285 writemsg("!!! Cannot update binary: Destination exists.\n",
6287 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
6290 tbz2path=self.getname(mycpv)
6291 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6292 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6296 #print ">>> Updating data in:",mycpv
6297 writemsg_stdout("%")
6298 mytbz2 = xpak.tbz2(tbz2path)
6299 mydata = mytbz2.get_data()
6300 updated_items = update_dbentries([mylist], mydata)
6301 mydata.update(updated_items)
6302 mydata["CATEGORY"] = mynewcat+"\n"
6303 if mynewpkg != myoldpkg:
6304 mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
6305 del mydata[myoldpkg+".ebuild"]
6306 mydata["PF"] = mynewpkg + "\n"
6307 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6309 self.dbapi.cpv_remove(mycpv)
6310 del self._pkg_paths[mycpv]
6311 new_path = self.getname(mynewcpv)
6312 self._pkg_paths[mynewcpv] = os.path.join(
6313 *new_path.split(os.path.sep)[-2:])
6314 if new_path != mytbz2:
6316 os.makedirs(os.path.dirname(new_path))
6318 if e.errno != errno.EEXIST:
6321 os.rename(tbz2path, new_path)
6322 self._remove_symlink(mycpv)
6323 if new_path.split(os.path.sep)[-2] == "All":
6324 self._create_symlink(mynewcpv)
6325 self.dbapi.cpv_inject(mynewcpv)
6329 def _remove_symlink(self, cpv):
6330 """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
6331 the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
6332 removed if os.path.islink() returns False."""
6333 mycat, mypkg = catsplit(cpv)
6334 mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6335 if os.path.islink(mylink):
6336 """Only remove it if it's really a link so that this method never
6337 removes a real package that was placed here to avoid a collision."""
6340 os.rmdir(os.path.join(self.pkgdir, mycat))
6342 if e.errno not in (errno.ENOENT,
6343 errno.ENOTEMPTY, errno.EEXIST):
6347 def _create_symlink(self, cpv):
6348 """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
6349 ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
6350 exist in the location of the symlink will first be removed."""
6351 mycat, mypkg = catsplit(cpv)
6352 full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
6354 os.makedirs(os.path.dirname(full_path))
6356 if e.errno != errno.EEXIST:
6360 os.unlink(full_path)
6362 if e.errno != errno.ENOENT:
6365 os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
6367 def move_slot_ent(self, mylist):
6368 if not self.populated:
6374 if not isvalidatom(pkg):
6375 raise portage_exception.InvalidAtom(pkg)
6377 origmatches=self.dbapi.match(pkg)
6380 for mycpv in origmatches:
6381 mycpsplit=catpkgsplit(mycpv)
6382 myoldpkg=mycpv.split("/")[1]
6383 tbz2path=self.getname(mycpv)
6384 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6385 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6389 #print ">>> Updating data in:",mycpv
6390 mytbz2 = xpak.tbz2(tbz2path)
6391 mydata = mytbz2.get_data()
6393 slot = mydata["SLOT"]
6397 if (slot[0]!=origslot):
6400 writemsg_stdout("S")
6401 mydata["SLOT"] = newslot+"\n"
6402 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6405 def update_ents(self, update_iter):
6406 if len(update_iter) == 0:
6408 if not self.populated:
6411 for mycpv in self.dbapi.cp_all():
6412 tbz2path=self.getname(mycpv)
6413 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
6414 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
6417 #print ">>> Updating binary data:",mycpv
6418 writemsg_stdout("*")
6419 mytbz2 = xpak.tbz2(tbz2path)
6420 mydata = mytbz2.get_data()
6421 updated_items = update_dbentries(update_iter, mydata)
6422 if len(updated_items) > 0:
6423 mydata.update(updated_items)
6424 mytbz2.recompose_mem(xpak.xpak_mem(mydata))
6427 def prevent_collision(self, cpv):
6428 """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
6429 use for a given cpv. If a collision will occur with an existing
6430 package from another category, the existing package will be bumped to
6431 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
6432 full_path = self.getname(cpv)
6433 if "All" == full_path.split(os.path.sep)[-2]:
6435 """Move a colliding package if it exists. Code below this point only
6436 executes in rare cases."""
6437 mycat, mypkg = catsplit(cpv)
6438 myfile = mypkg + ".tbz2"
6439 mypath = os.path.join("All", myfile)
6440 dest_path = os.path.join(self.pkgdir, mypath)
6441 if os.path.exists(dest_path):
6442 # For invalid packages, other_cat could be None.
6443 other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
6445 other_cat = other_cat.strip()
6446 self._move_from_all(other_cat + "/" + mypkg)
6447 """The file may or may not exist. Move it if necessary and update
6448 internal state for future calls to getname()."""
6449 self._move_to_all(cpv)
6451 def _move_to_all(self, cpv):
6452 """If the file exists, move it. Whether or not it exists, update state
6453 for future getname() calls."""
6454 mycat , mypkg = catsplit(cpv)
6455 myfile = mypkg + ".tbz2"
6456 src_path = os.path.join(self.pkgdir, mycat, myfile)
6458 mystat = os.lstat(src_path)
6461 if mystat and stat.S_ISREG(mystat.st_mode):
6463 os.makedirs(os.path.join(self.pkgdir, "All"))
6465 if e.errno != errno.EEXIST:
6468 os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
6469 self._create_symlink(cpv)
6470 self._pkg_paths[cpv] = os.path.join("All", myfile)
6472 def _move_from_all(self, cpv):
6473 """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
6474 ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
6475 self._remove_symlink(cpv)
6476 mycat , mypkg = catsplit(cpv)
6477 myfile = mypkg + ".tbz2"
6478 mypath = os.path.join(mycat, myfile)
6479 dest_path = os.path.join(self.pkgdir, mypath)
6481 os.makedirs(os.path.dirname(dest_path))
6483 if e.errno != errno.EEXIST:
6486 os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
6487 self._pkg_paths[cpv] = mypath
6489 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
6490 "populates the binarytree"
6491 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
6493 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
6496 if not getbinpkgsonly:
6498 dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
6502 dirs.insert(0, "All")
6504 for myfile in listdir(os.path.join(self.pkgdir, mydir)):
6505 if not myfile.endswith(".tbz2"):
6507 mypath = os.path.join(mydir, myfile)
6508 full_path = os.path.join(self.pkgdir, mypath)
6509 if os.path.islink(full_path):
6511 mytbz2 = xpak.tbz2(full_path)
6512 # For invalid packages, mycat could be None.
6513 mycat = mytbz2.getfile("CATEGORY")
6514 mypf = mytbz2.getfile("PF")
6516 if not mycat or not mypf:
6517 #old-style or corrupt package
6518 writemsg("!!! Invalid binary package: '%s'\n" % full_path,
6520 writemsg("!!! This binary package is not " + \
6521 "recoverable and should be deleted.\n",
6523 self.invalids.append(mypkg)
6525 mycat = mycat.strip()
6526 if mycat != mydir and mydir != "All":
6528 if mypkg != mypf.strip():
6530 mycpv = mycat + "/" + mypkg
6531 if mycpv in pkg_paths:
6532 # All is first, so it's preferred.
6534 pkg_paths[mycpv] = mypath
6535 self.dbapi.cpv_inject(mycpv)
6536 self._pkg_paths = pkg_paths
6538 if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
6539 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
6543 self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
6545 chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
6548 except (ValueError, KeyError):
6551 writemsg(green("Fetching binary packages info...\n"))
6552 self.remotepkgs = getbinpkg.dir_get_metadata(
6553 self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
6554 writemsg(green(" -- DONE!\n\n"))
6556 for mypkg in self.remotepkgs.keys():
6557 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
6558 #old-style or corrupt package
6559 writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
6561 del self.remotepkgs[mypkg]
6563 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
6564 fullpkg=mycat+"/"+mypkg[:-5]
6565 mykey=dep_getkey(fullpkg)
6567 # invalid tbz2's can hurt things.
6568 #print "cpv_inject("+str(fullpkg)+")"
6569 self.dbapi.cpv_inject(fullpkg)
6570 #print " -- Injected"
6571 except SystemExit, e:
6574 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
6576 del self.remotepkgs[mypkg]
6580 def inject(self,cpv):
6581 return self.dbapi.cpv_inject(cpv)
6583 def exists_specific(self,cpv):
6584 if not self.populated:
6586 return self.dbapi.match(
6587 dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6589 def dep_bestmatch(self,mydep):
6590 "compatibility method -- all matches, not just visible ones"
6591 if not self.populated:
6594 writemsg("mydep: %s\n" % mydep, 1)
6595 mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6596 writemsg("mydep: %s\n" % mydep, 1)
6597 mykey=dep_getkey(mydep)
6598 writemsg("mykey: %s\n" % mykey, 1)
6599 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6600 writemsg("mymatch: %s\n" % mymatch, 1)
6605 def getname(self,pkgname):
6606 """Returns a file location for this package. The default location is
6607 ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
6608 in the rare event of a collision. The prevent_collision() method can
6609 be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
6611 if not self.populated:
6614 mypath = self._pkg_paths.get(mycpv, None)
6616 return os.path.join(self.pkgdir, mypath)
6617 mycat, mypkg = catsplit(mycpv)
6618 mypath = os.path.join("All", mypkg + ".tbz2")
6619 if mypath in self._pkg_paths.values():
6620 mypath = os.path.join(mycat, mypkg + ".tbz2")
6621 self._pkg_paths[mycpv] = mypath # cache for future lookups
6622 return os.path.join(self.pkgdir, mypath)
6624 def isremote(self,pkgname):
6625 "Returns true if the package is kept remotely."
6626 mysplit=pkgname.split("/")
6627 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
6630 def get_use(self,pkgname):
6631 mysplit=pkgname.split("/")
6632 if self.isremote(pkgname):
6633 return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
6634 tbz2=xpak.tbz2(self.getname(pkgname))
6635 return tbz2.getfile("USE").split()
6637 def gettbz2(self,pkgname):
6638 "fetches the package from a remote site, if necessary."
6639 print "Fetching '"+str(pkgname)+"'"
6640 mysplit = pkgname.split("/")
6641 tbz2name = mysplit[1]+".tbz2"
6642 if not self.isremote(pkgname):
6643 if (tbz2name not in self.invalids):
6646 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
6648 mydest = self.pkgdir+"/All/"
6650 os.makedirs(mydest, 0775)
6651 except (OSError, IOError):
6653 return getbinpkg.file_get(
6654 self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
6655 mydest, fcmd=self.settings["RESUMECOMMAND"])
6657 def getslot(self,mycatpkg):
6658 "Get a slot for a catpkg; assume it exists."
6661 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
6662 except SystemExit, e:
6664 except Exception, e:
6670 This class provides an interface to the installed package database
6671 At present this is implemented as a text backend in /var/db/pkg.
6673 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
6676 Creates a DBlink object for a given CPV.
6677 The given CPV may not be present in the database already.
6679 @param cat: Category
6681 @param pkg: Package (PV)
6683 @param myroot: Typically ${ROOT}
6684 @type myroot: String (Path)
6685 @param mysettings: Typically portage.config
6686 @type mysettings: An instance of portage.config
6687 @param treetype: one of ['porttree','bintree','vartree']
6688 @type treetype: String
6689 @param vartree: an instance of vartree corresponding to myroot.
6690 @type vartree: vartree
6695 self.mycpv = self.cat+"/"+self.pkg
6696 self.mysplit = pkgsplit(self.mycpv)
6697 self.treetype = treetype
6700 vartree = db[myroot]["vartree"]
6701 self.vartree = vartree
6703 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
6704 self.dbcatdir = self.dbroot+"/"+cat
6705 self.dbpkgdir = self.dbcatdir+"/"+pkg
6706 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
6707 self.dbdir = self.dbpkgdir
6709 self._lock_vdb = None
6711 self.settings = mysettings
6712 if self.settings==1:
6716 protect_obj = portage_util.ConfigProtect(myroot,
6717 mysettings.get("CONFIG_PROTECT","").split(),
6718 mysettings.get("CONFIG_PROTECT_MASK","").split())
6719 self.updateprotect = protect_obj.updateprotect
6720 self._config_protect = protect_obj
6721 self._installed_instance = None
6722 self.contentscache=[]
6723 self._contents_inodes = None
6727 raise AssertionError("Lock already held.")
6728 # At least the parent needs to exist for the lock file.
6729 portage_util.ensure_dirs(self.dbroot)
6730 self._lock_vdb = portage_locks.lockdir(self.dbroot)
6734 portage_locks.unlockdir(self._lock_vdb)
6735 self._lock_vdb = None
6738 "return path to location of db information (for >>> informational display)"
6742 "does the db entry exist? boolean."
6743 return os.path.exists(self.dbdir)
6746 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
6748 This function should never get called (there is no reason to use it).
6750 # XXXXX Delete this eventually
6751 raise Exception, "This is bad. Don't use it."
6752 if not os.path.exists(self.dbdir):
6753 os.makedirs(self.dbdir)
6757 Remove this entry from the database
6759 if not os.path.exists(self.dbdir):
6762 for x in listdir(self.dbdir):
6763 os.unlink(self.dbdir+"/"+x)
6764 os.rmdir(self.dbdir)
6766 print "!!! Unable to remove db entry for this package."
6767 print "!!! It is possible that a directory is in this one. Portage will still"
6768 print "!!! register this package as installed as long as this directory exists."
6769 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
6774 def clearcontents(self):
6776 For a given db entry (self), erase the CONTENTS values.
6778 if os.path.exists(self.dbdir+"/CONTENTS"):
6779 os.unlink(self.dbdir+"/CONTENTS")
6781 def getcontents(self):
6783 Get the installed files of a given package (aka what that package installed)
6785 if not os.path.exists(self.dbdir+"/CONTENTS"):
6787 if self.contentscache != []:
6788 return self.contentscache
6790 myc=open(self.dbdir+"/CONTENTS","r")
6791 mylines=myc.readlines()
6794 contents_file = os.path.join(self.dbdir, "CONTENTS")
6796 for line in mylines:
6798 if null_byte in line:
6799 # Null bytes are a common indication of corruption.
6800 writemsg("!!! Null byte found in contents " + \
6801 "file, line %d: '%s'\n" % (pos, contents_file),
6804 mydat = line.split()
6805 # we do this so we can remove from non-root filesystems
6806 # (use the ROOT var to allow maintenance on other partitions)
6808 mydat[1] = normalize_path(os.path.join(
6809 self.myroot, mydat[1].lstrip(os.path.sep)))
6811 #format: type, mtime, md5sum
6812 pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
6813 elif mydat[0]=="dir":
6815 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6816 elif mydat[0]=="sym":
6817 #format: type, mtime, dest
6819 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
6820 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
6821 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
6831 pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
6832 elif mydat[0]=="dev":
6834 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
6835 elif mydat[0]=="fif":
6837 pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
6840 except (KeyError,IndexError):
6841 print "portage: CONTENTS line",pos,"corrupt!"
6842 self.contentscache=pkgfiles
6845 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
6846 ldpath_mtimes=None):
6849 Unmerges a given package (CPV)
6854 @param pkgfiles: files to unmerge (generally self.getcontents() )
6855 @type pkgfiles: Dictionary
6856 @param trimworld: Remove CPV from world file if True, not if False
6857 @type trimworld: Boolean
6858 @param cleanup: cleanup to pass to doebuild (see doebuild)
6859 @type cleanup: Boolean
6860 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
6861 @type ldpath_mtimes: Dictionary
6864 1. os.EX_OK if everything went well.
6865 2. return code of the failed phase (for prerm, postrm, cleanrm)
6868 The caller must ensure that lockdb() and unlockdb() are called
6869 before and after this method.
6872 contents = self.getcontents()
6873 # Now, don't assume that the name of the ebuild is the same as the
6874 # name of the dir; the package may have been moved.
6876 mystuff = listdir(self.dbdir, EmptyOnError=1)
6878 if x.endswith(".ebuild"):
6879 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
6880 if x[:-7] != self.pkg:
6881 # Clean up after vardbapi.move_ent() breakage in
6882 # portage versions before 2.1.2
6883 os.rename(os.path.join(self.dbdir, x), myebuildpath)
6884 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
6887 self.settings.load_infodir(self.dbdir)
6890 doebuild_environment(myebuildpath, "prerm", self.myroot,
6891 self.settings, 0, 0, self.vartree.dbapi)
6892 except portage_exception.UnsupportedAPIException, e:
6893 # Sometimes this happens due to corruption of the EAPI file.
6894 writemsg("!!! FAILED prerm: %s\n" % \
6895 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
6896 writemsg("%s\n" % str(e), noiselevel=-1)
6898 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
6899 portage_util.ensure_dirs(os.path.dirname(catdir),
6900 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
6901 builddir_lock = None
6905 catdir_lock = portage_locks.lockdir(catdir)
6906 portage_util.ensure_dirs(catdir,
6907 uid=portage_uid, gid=portage_gid,
6909 builddir_lock = portage_locks.lockdir(
6910 self.settings["PORTAGE_BUILDDIR"])
6912 portage_locks.unlockdir(catdir_lock)
6915 # Eventually, we'd like to pass in the saved ebuild env here...
6916 retval = doebuild(myebuildpath, "prerm", self.myroot,
6917 self.settings, cleanup=cleanup, use_cache=0,
6918 mydbapi=self.vartree.dbapi, tree="vartree",
6919 vartree=self.vartree)
6920 # XXX: Decide how to handle failures here.
6921 if retval != os.EX_OK:
6922 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
6925 self._unmerge_pkgfiles(pkgfiles)
6928 retval = doebuild(myebuildpath, "postrm", self.myroot,
6929 self.settings, use_cache=0, tree="vartree",
6930 mydbapi=self.vartree.dbapi, vartree=self.vartree)
6932 # process logs created during pre/postrm
6933 elog_process(self.mycpv, self.settings)
6935 # XXX: Decide how to handle failures here.
6936 if retval != os.EX_OK:
6937 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
6939 doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
6940 tree="vartree", mydbapi=self.vartree.dbapi,
6941 vartree=self.vartree)
6945 portage_locks.unlockdir(builddir_lock)
6947 if myebuildpath and not catdir_lock:
6948 # Lock catdir for removal if empty.
6949 catdir_lock = portage_locks.lockdir(catdir)
6955 if e.errno not in (errno.ENOENT,
6956 errno.ENOTEMPTY, errno.EEXIST):
6959 portage_locks.unlockdir(catdir_lock)
6960 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
6964 def _unmerge_pkgfiles(self, pkgfiles):
6967 Unmerges the contents of a package from the liveFS
6968 Removes the VDB entry for self
6970 @param pkgfiles: typically self.getcontents()
6971 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
6978 writemsg_stdout("No package files given... Grabbing a set.\n")
6979 pkgfiles=self.getcontents()
6982 mykeys=pkgfiles.keys()
6986 #process symlinks second-to-last, directories last.
6988 modprotect="/lib/modules/"
6989 for objkey in mykeys:
6990 obj = normalize_path(objkey)
6995 statobj = os.stat(obj)
7000 lstatobj = os.lstat(obj)
7001 except (OSError, AttributeError):
7003 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
7006 #we skip this if we're dealing with a symlink
7007 #because os.stat() will operate on the
7008 #link target rather than the link itself.
7009 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
7011 # next line includes a tweak to protect modules from being unmerged,
7012 # but we don't protect modules from being overwritten if they are
7013 # upgraded. We effectively only want one half of the config protection
7014 # functionality for /lib/modules. For portage-ng both capabilities
7015 # should be able to be independently specified.
7016 if obj.startswith(modprotect):
7017 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
7020 lmtime=str(lstatobj[stat.ST_MTIME])
7021 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
7022 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
7025 if pkgfiles[objkey][0]=="dir":
7026 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
7027 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
7030 elif pkgfiles[objkey][0]=="sym":
7032 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
7036 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
7037 except (OSError,IOError),e:
7038 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
7039 elif pkgfiles[objkey][0]=="obj":
7040 if statobj is None or not stat.S_ISREG(statobj.st_mode):
7041 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
7045 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
7046 except portage_exception.FileNotFound, e:
7047 # the file has disappeared between now and our stat call
7048 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
7051 # string.lower is needed because db entries used to be in upper-case. The
7052 # string.lower allows for backwards compatibility.
7053 if mymd5 != pkgfiles[objkey][2].lower():
7054 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
7058 except (OSError,IOError),e:
7060 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
7061 elif pkgfiles[objkey][0]=="fif":
7062 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
7063 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
7065 writemsg_stdout("--- %s %s\n" % ("fif",obj))
7066 elif pkgfiles[objkey][0]=="dev":
7067 writemsg_stdout("--- %s %s\n" % ("dev",obj))
7075 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
7076 except (OSError, IOError):
7077 writemsg_stdout("--- !empty dir %s\n" % obj)
7079 #remove self from vartree database so that our own virtual gets zapped if we're the last node
7080 self.vartree.zap(self.mycpv)
7082 def isowner(self,filename,destroot):
7084 Check if filename is a new file or belongs to this package
7085 (for this or a previous version)
7093 1. True if this package owns the file.
7094 2. False if this package does not own the file.
7096 destfile = normalize_path(
7097 os.path.join(destroot, filename.lstrip(os.path.sep)))
7099 mylstat = os.lstat(destfile)
7100 except (OSError, IOError):
7103 pkgfiles = self.getcontents()
7104 if pkgfiles and filename in pkgfiles:
7107 if self._contents_inodes is None:
7108 self._contents_inodes = set()
7112 self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
7115 if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
7120 def isprotected(self, filename):
7121 """In cases where an installed package in the same slot owns a
7122 protected file that will be merged, bump the mtime on the installed
7123 file in order to ensure that it isn't unmerged."""
7124 if not self._config_protect.isprotected(filename):
7126 if self._installed_instance is None:
7128 mydata = self._installed_instance.getcontents().get(filename, None)
7132 # Bump the mtime in order to ensure that the old config file doesn't
7133 # get unmerged. The user will have an opportunity to merge the new
7134 # config with the old one.
7136 os.utime(filename, None)
7138 if e.errno != errno.ENOENT:
7141 # The file has disappeared, so it's not protected.
7145 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
7146 mydbapi=None, prev_mtimes=None):
7149 This function does the following:
7151 Collision Protection.
7152 calls doebuild(mydo=pkg_preinst)
7153 Merges the package to the livefs
7154 unmerges old version (if required)
7155 calls doebuild(mydo=pkg_postinst)
7158 @param srcroot: Typically this is ${D}
7159 @type srcroot: String (Path)
7160 @param destroot: Path to merge to (usually ${ROOT})
7161 @type destroot: String (Path)
7162 @param inforoot: root of the vardb entry ?
7163 @type inforoot: String (Path)
7164 @param myebuild: path to the ebuild that we are processing
7165 @type myebuild: String (Path)
7166 @param mydbapi: dbapi which is handed to doebuild.
7167 @type mydbapi: portdbapi instance
7168 @param prev_mtimes: { Filename:mtime } mapping for env_update
7169 @type prev_mtimes: Dictionary
7175 secondhand is a list of symlinks that have been skipped due to their target
7176 not existing; we will merge these symlinks at a later time.
7178 if not os.path.isdir(srcroot):
7179 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
7183 if not os.path.exists(self.dbcatdir):
7184 os.makedirs(self.dbcatdir)
7187 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
7188 otherversions.append(v.split("/")[1])
7190 slot_matches = self.vartree.dbapi.match(
7191 "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
7193 # Used by self.isprotected().
7194 self._installed_instance = dblink(self.cat,
7195 catsplit(slot_matches[0])[1], destroot, self.settings,
7196 vartree=self.vartree)
7198 # check for package collisions
7199 if "collision-protect" in self.settings.features:
7200 collision_ignore = set([normalize_path(myignore) for myignore in \
7201 self.settings.get("COLLISION_IGNORE", "").split()])
7202 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
7204 # the linkcheck only works if we are in srcroot
7207 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
7208 myfilelist.extend(mysymlinks)
7209 mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
7214 starttime=time.time()
7220 if self.pkg in otherversions:
7221 otherversions.remove(self.pkg) # we already checked this package
7223 myslot = self.settings["SLOT"]
7224 for v in otherversions:
7225 # only allow versions with same slot to overwrite files
7226 if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
7228 dblink(self.cat, v, destroot, self.settings,
7229 vartree=self.vartree))
7233 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
7234 for f in myfilelist:
7236 # listdir isn't intelligent enough to exclude symlinked dirs,
7237 # so we have to do it ourself
7238 for s in mysymlinked_directories:
7246 print str(i)+" files checked ..."
7250 for ver in [self]+mypkglist:
7251 if (ver.isowner(f, destroot) or ver.isprotected(f)):
7255 collisions.append(f)
7256 print "existing file "+f+" is not owned by this package"
7258 if collision_ignore:
7259 if f in collision_ignore:
7262 for myignore in collision_ignore:
7263 if f.startswith(myignore + os.path.sep):
7266 #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
7268 print red("*")+" This package is blocked because it wants to overwrite"
7269 print red("*")+" files belonging to other packages (see messages above)."
7270 print red("*")+" If you have no clue what this is all about report it "
7271 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
7273 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
7276 print "Searching all installed packages for file collisions..."
7277 print "Press Ctrl-C to Stop"
7279 """ Note: The isowner calls result in a stat call for *every*
7280 single installed file, since the inode numbers are used to work
7281 around the problem of ambiguous paths caused by symlinked files
7282 and/or directories. Though it is slow, it is as accurate as
7285 for cpv in self.vartree.dbapi.cpv_all():
7286 cat, pkg = catsplit(cpv)
7287 mylink = dblink(cat, pkg, destroot, self.settings,
7288 vartree=self.vartree)
7290 for f in collisions:
7291 if mylink.isowner(f, destroot):
7292 mycollisions.append(f)
7295 print " * %s:" % cpv
7297 for f in mycollisions:
7299 os.path.join(destroot, f.lstrip(os.path.sep))
7302 print "None of the installed packages claim the above file(s)."
7310 if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
7311 """ The merge process may move files out of the image directory,
7312 which causes invalidation of the .installed flag."""
7314 os.unlink(os.path.join(
7315 os.path.dirname(normalize_path(srcroot)), ".installed"))
7317 if e.errno != errno.ENOENT:
7321 # get old contents info for later unmerging
7322 oldcontents = self.getcontents()
7324 self.dbdir = self.dbtmpdir
7326 if not os.path.exists(self.dbtmpdir):
7327 os.makedirs(self.dbtmpdir)
7329 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
7331 # run preinst script
7332 if myebuild is None:
7333 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
7334 a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
7335 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
7336 vartree=self.vartree)
7338 # XXX: Decide how to handle failures here.
7340 writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
7343 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
7344 for x in listdir(inforoot):
7345 self.copyfile(inforoot+"/"+x)
7347 # get current counter value (counter_tick also takes care of incrementing it)
7348 # XXX Need to make this destroot, but it needs to be initialized first. XXX
7349 # XXX bis: leads to some invalidentry() call through cp_all().
7350 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
7351 # write local package counter for recording
7352 lcfile = open(self.dbtmpdir+"/COUNTER","w")
7353 lcfile.write(str(counter))
7356 # open CONTENTS file (possibly overwriting old one) for recording
7357 outfile=open(self.dbtmpdir+"/CONTENTS","w")
7359 self.updateprotect()
7361 #if we have a file containing previously-merged config file md5sums, grab it.
7362 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
7363 cfgfiledict = grabdict(conf_mem_file)
7364 if self.settings.has_key("NOCONFMEM"):
7365 cfgfiledict["IGNORE"]=1
7367 cfgfiledict["IGNORE"]=0
7369 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
7370 mymtime = long(time.time())
7371 prevmask = os.umask(0)
7374 # we do a first merge; this will recurse through all files in our srcroot but also build up a
7375 # "second hand" of symlinks to merge later
7376 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
7379 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
7380 # broken symlinks. We'll merge them too.
7382 while len(secondhand) and len(secondhand)!=lastlen:
7383 # clear the thirdhand. Anything from our second hand that
7384 # couldn't get merged will be added to thirdhand.
7387 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
7390 lastlen=len(secondhand)
7392 # our thirdhand now becomes our secondhand. It's ok to throw
7393 # away secondhand since thirdhand contains all the stuff that
7394 # couldn't be merged.
7395 secondhand = thirdhand
7398 # force merge of remaining symlinks (broken or circular; oh well)
7399 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
7404 #if we opened it, close it
7408 if os.path.exists(self.dbpkgdir):
7409 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
7410 self.dbdir = self.dbpkgdir
7411 self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
7412 self.dbdir = self.dbtmpdir
7413 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
7415 # We hold both directory locks.
7416 self.dbdir = self.dbpkgdir
7418 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
7419 contents = self.getcontents()
7421 #write out our collection of md5sums
7422 if cfgfiledict.has_key("IGNORE"):
7423 del cfgfiledict["IGNORE"]
7425 my_private_path = os.path.join(destroot, PRIVATE_PATH)
7426 if not os.path.exists(my_private_path):
7427 os.makedirs(my_private_path)
7428 os.chown(my_private_path, os.getuid(), portage_gid)
7429 os.chmod(my_private_path, 02770)
7431 writedict(cfgfiledict, conf_mem_file)
7435 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
7436 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7438 # XXX: Decide how to handle failures here.
7440 writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
7444 for v in otherversions:
7445 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
7448 #update environment settings, library paths. DO NOT change symlinks.
7449 env_update(makelinks=(not downgrade),
7450 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
7452 #dircache may break autoclean because it remembers the -MERGING-pkg file
7454 if dircache.has_key(self.dbcatdir):
7455 del dircache[self.dbcatdir]
7456 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
7458 # Process ebuild logfiles
7459 elog_process(self.mycpv, self.settings)
7460 if "noclean" not in self.settings.features:
7461 doebuild(myebuild, "clean", destroot, self.settings,
7462 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
7465 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
7468 This function handles actual merging of the package contents to the livefs.
7469 It also handles config protection.
7471 @param srcroot: Where are we copying files from (usually ${D})
7472 @type srcroot: String (Path)
7473 @param destroot: Typically ${ROOT}
7474 @type destroot: String (Path)
7475 @param outfile: File to log operations to
7476 @type outfile: File Object
7477 @param secondhand: A set of items to merge in pass two (usually
7478 or symlinks that point to non-existing files that may get merged later)
7479 @type secondhand: List
7480 @param stufftomerge: Either a diretory to merge, or a list of items.
7481 @type stufftomerge: String or List
7482 @param cfgfiledict: { File:mtime } mapping for config_protected files
7483 @type cfgfiledict: Dictionary
7484 @param thismtime: The current time (typically long(time.time())
7485 @type thismtime: Long
7486 @rtype: None or Boolean
7492 from os.path import sep, join
7493 srcroot = normalize_path(srcroot).rstrip(sep) + sep
7494 destroot = normalize_path(destroot).rstrip(sep) + sep
7495 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
7496 if type(stufftomerge)==types.StringType:
7497 #A directory is specified. Figure out protection paths, listdir() it and process it.
7498 mergelist = listdir(join(srcroot, stufftomerge))
7501 mergelist=stufftomerge
7504 mysrc = join(srcroot, offset, x)
7505 mydest = join(destroot, offset, x)
7506 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
7507 myrealdest = join(sep, offset, x)
7508 # stat file once, test using S_* macros many times (faster that way)
7510 mystat=os.lstat(mysrc)
7513 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
7514 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
7515 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
7516 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
7517 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
7518 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
7520 except Exception, e:
7522 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
7523 writemsg(red("!!! A stat call returned the following error for the following file:"))
7524 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
7525 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
7526 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
7527 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
7531 mymode=mystat[stat.ST_MODE]
7532 # handy variables; mydest is the target object on the live filesystems;
7533 # mysrc is the source object in the temporary install dir
7535 mydmode = os.lstat(mydest).st_mode
7537 if e.errno != errno.ENOENT:
7540 #dest file doesn't exist
7543 if stat.S_ISLNK(mymode):
7544 # we are merging a symbolic link
7545 myabsto=abssymlink(mysrc)
7546 if myabsto.startswith(srcroot):
7547 myabsto=myabsto[len(srcroot):]
7548 myabsto = myabsto.lstrip(sep)
7549 myto=os.readlink(mysrc)
7550 if self.settings and self.settings["D"]:
7551 if myto.startswith(self.settings["D"]):
7552 myto=myto[len(self.settings["D"]):]
7553 # myrealto contains the path of the real file to which this symlink points.
7554 # we can simply test for existence of this file to see if the target has been merged yet
7555 myrealto = normalize_path(os.path.join(destroot, myabsto))
7558 if not stat.S_ISLNK(mydmode):
7559 if stat.S_ISDIR(mydmode):
7560 # directory in the way: we can't merge a symlink over a directory
7561 # we won't merge this, continue with next file...
7564 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
7565 # Kill file blocking installation of symlink to dir #71787
7567 elif self.isprotected(mydest):
7568 # Use md5 of the target in ${D} if it exists...
7570 newmd5 = portage_checksum.perform_md5(
7571 join(srcroot, myabsto))
7572 except portage_exception.FileNotFound:
7573 # Maybe the target is merged already.
7575 newmd5 = portage_checksum.perform_md5(
7577 except portage_exception.FileNotFound:
7579 mydest = new_protect_filename(mydest,newmd5=newmd5)
7581 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
7582 if (secondhand!=None) and (not os.path.exists(myrealto)):
7583 # either the target directory doesn't exist yet or the target file doesn't exist -- or
7584 # the target is a broken symlink. We will add this file to our "second hand" and merge
7586 secondhand.append(mysrc[len(srcroot):])
7588 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
7589 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7591 writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
7592 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
7594 print "!!! Failed to move file."
7595 print "!!!",mydest,"->",myto
7597 elif stat.S_ISDIR(mymode):
7598 # we are merging a directory
7600 # destination exists
7603 # Save then clear flags on dest.
7604 dflags=bsd_chflags.lgetflags(mydest)
7605 if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
7606 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
7609 if not os.access(mydest, os.W_OK):
7610 pkgstuff = pkgsplit(self.pkg)
7611 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
7612 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
7613 writemsg("!!! You may start the merge process again by using ebuild:\n")
7614 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
7615 writemsg("!!! And finish by running this: env-update\n\n")
7618 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
7619 # a symlink to an existing directory will work for us; keep it:
7620 writemsg_stdout("--- %s/\n" % mydest)
7622 bsd_chflags.lchflags(mydest, dflags)
7624 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
7625 if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
7627 print "bak",mydest,mydest+".backup"
7628 #now create our directory
7629 if self.settings.selinux_enabled():
7630 sid = selinux.get_sid(mysrc)
7631 selinux.secure_mkdir(mydest,sid)
7635 bsd_chflags.lchflags(mydest, dflags)
7636 os.chmod(mydest,mystat[0])
7637 os.chown(mydest,mystat[4],mystat[5])
7638 writemsg_stdout(">>> %s/\n" % mydest)
7640 #destination doesn't exist
7641 if self.settings.selinux_enabled():
7642 sid = selinux.get_sid(mysrc)
7643 selinux.secure_mkdir(mydest,sid)
7646 os.chmod(mydest,mystat[0])
7647 os.chown(mydest,mystat[4],mystat[5])
7648 writemsg_stdout(">>> %s/\n" % mydest)
7649 outfile.write("dir "+myrealdest+"\n")
7650 # recurse and merge this directory
7651 if self.mergeme(srcroot, destroot, outfile, secondhand,
7652 join(offset, x), cfgfiledict, thismtime):
7654 elif stat.S_ISREG(mymode):
7655 # we are merging a regular file
7656 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
7657 # calculate config file protection stuff
7658 mydestdir=os.path.dirname(mydest)
7662 # destination file exists
7663 if stat.S_ISDIR(mydmode):
7664 # install of destination is blocked by an existing directory with the same name
7666 writemsg_stdout("!!! %s\n" % mydest)
7667 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
7669 # install of destination is blocked by an existing regular file,
7670 # or by a symlink to an existing regular file;
7671 # now, config file management may come into play.
7672 # we only need to tweak mydest if cfg file management is in play.
7673 if self.isprotected(mydest):
7674 # we have a protection path; enable config file management.
7675 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
7677 #file already in place; simply update mtimes of destination
7678 os.utime(mydest,(thismtime,thismtime))
7682 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
7683 """ An identical update has previously been
7684 merged. Skip it unless the user has chosen
7687 moveme = cfgfiledict["IGNORE"]
7688 cfgprot = cfgfiledict["IGNORE"]
7693 # Merging a new file, so update confmem.
7694 cfgfiledict[myrealdest] = [mymd5]
7695 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
7696 """A previously remembered update has been
7697 accepted, so it is removed from confmem."""
7698 del cfgfiledict[myrealdest]
7700 mydest = new_protect_filename(mydest, newmd5=mymd5)
7702 # whether config protection or not, we merge the new file the
7703 # same way. Unless moveme=0 (blocking directory)
7705 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
7711 # We need to touch the destination so that on --update the
7712 # old package won't yank the file with it. (non-cfgprot related)
7713 os.utime(mydest,(thismtime,thismtime))
7715 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
7717 # XXX kludge, can be killed when portage stops relying on
7718 # md5+mtime, and uses refcounts
7719 # alright, we've fooled w/ mtime on the file; this pisses off static archives
7720 # basically internal mtime != file's mtime, so the linker (falsely) thinks
7721 # the archive is stale, and needs to have it's toc rebuilt.
7723 myf = open(mydest, "r+")
7725 # ar mtime field is digits padded with spaces, 12 bytes.
7726 lms=str(thismtime+5).ljust(12)
7729 if magic != "!<arch>\n":
7730 # not an archive (dolib.a from portage.py makes it here fex)
7733 st = os.stat(mydest)
7734 while myf.tell() < st.st_size - 12:
7741 # skip uid/gid/mperm
7744 # read the archive member's size
7745 x=long(myf.read(10))
7747 # skip the trailing newlines, and add the potential
7748 # extra padding byte if it's not an even size
7749 myf.seek(x + 2 + (x % 2),1)
7751 # and now we're at the end. yay.
7753 mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
7754 os.utime(mydest,(thismtime,thismtime))
7758 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
7759 writemsg_stdout("%s %s\n" % (zing,mydest))
7761 # we are merging a fifo or device node
7764 # destination doesn't exist
7765 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
7769 if stat.S_ISFIFO(mymode):
7770 outfile.write("fif %s\n" % myrealdest)
7772 outfile.write("dev %s\n" % myrealdest)
7773 writemsg_stdout(zing+" "+mydest+"\n")
7775 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
7776 mydbapi=None, prev_mtimes=None):
7779 return self.treewalk(mergeroot, myroot, inforoot, myebuild,
7780 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7784 def getstring(self,name):
7785 "returns contents of a file with whitespace converted to spaces"
7786 if not os.path.exists(self.dbdir+"/"+name):
7788 myfile=open(self.dbdir+"/"+name,"r")
7789 mydata=myfile.read().split()
7791 return " ".join(mydata)
7793 def copyfile(self,fname):
7794 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
7796 def getfile(self,fname):
7797 if not os.path.exists(self.dbdir+"/"+fname):
7799 myfile=open(self.dbdir+"/"+fname,"r")
7800 mydata=myfile.read()
7804 def setfile(self,fname,data):
7805 write_atomic(os.path.join(self.dbdir, fname), data)
7807 def getelements(self,ename):
7808 if not os.path.exists(self.dbdir+"/"+ename):
7810 myelement=open(self.dbdir+"/"+ename,"r")
7811 mylines=myelement.readlines()
7814 for y in x[:-1].split():
7819 def setelements(self,mylist,ename):
7820 myelement=open(self.dbdir+"/"+ename,"w")
7822 myelement.write(x+"\n")
7825 def isregular(self):
7826 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
7827 return os.path.exists(self.dbdir+"/CATEGORY")
7829 class FetchlistDict(UserDict.DictMixin):
7830 """This provide a mapping interface to retrieve fetch lists. It's used
7831 to allow portage_manifest.Manifest to access fetch lists via a standard
7832 mapping interface rather than use the dbapi directly."""
7833 def __init__(self, pkgdir, settings, mydbapi):
7834 """pkgdir is a directory containing ebuilds and settings is passed into
7835 portdbapi.getfetchlist for __getitem__ calls."""
7836 self.pkgdir = pkgdir
7837 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
7838 self.settings = settings
7839 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
7840 self.portdb = mydbapi
7841 def __getitem__(self, pkg_key):
7842 """Returns the complete fetch list for a given package."""
7843 return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
7844 all=True, mytree=self.mytree)[1]
7845 def has_key(self, pkg_key):
7846 """Returns true if the given package exists within pkgdir."""
7847 return pkg_key in self.keys()
7849 """Returns keys for all packages within pkgdir"""
7850 return self.portdb.cp_list(self.cp, mytree=self.mytree)
7852 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
7853 """will merge a .tbz2 file, returning a list of runtime dependencies
7854 that must be satisfied, or None if there was a merge error. This
7855 code assumes the package exists."""
7858 mydbapi = db[myroot]["bintree"].dbapi
7860 vartree = db[myroot]["vartree"]
7861 if mytbz2[-5:]!=".tbz2":
7862 print "!!! Not a .tbz2 file"
7866 builddir_lock = None
7869 """ Don't lock the tbz2 file because the filesytem could be readonly or
7870 shared by a cluster."""
7871 #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
7873 mypkg = os.path.basename(mytbz2)[:-5]
7874 xptbz2 = xpak.tbz2(mytbz2)
7875 mycat = xptbz2.getfile("CATEGORY")
7877 writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
7880 mycat = mycat.strip()
7882 # These are the same directories that would be used at build time.
7883 builddir = os.path.join(
7884 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
7885 catdir = os.path.dirname(builddir)
7886 pkgloc = os.path.join(builddir, "image")
7887 infloc = os.path.join(builddir, "build-info")
7888 myebuild = os.path.join(
7889 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
7890 portage_util.ensure_dirs(os.path.dirname(catdir),
7891 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7892 catdir_lock = portage_locks.lockdir(catdir)
7893 portage_util.ensure_dirs(catdir,
7894 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
7895 builddir_lock = portage_locks.lockdir(builddir)
7897 portage_locks.unlockdir(catdir_lock)
7901 shutil.rmtree(builddir)
7902 except (IOError, OSError), e:
7903 if e.errno != errno.ENOENT:
7906 for mydir in (builddir, pkgloc, infloc):
7907 portage_util.ensure_dirs(mydir, uid=portage_uid,
7908 gid=portage_gid, mode=0755)
7909 writemsg_stdout(">>> Extracting info\n")
7910 xptbz2.unpackinfo(infloc)
7911 mysettings.load_infodir(infloc)
7912 # Store the md5sum in the vdb.
7913 fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
7914 fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
7917 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
7919 # Eventually we'd like to pass in the saved ebuild env here.
7920 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
7921 tree="bintree", mydbapi=mydbapi, vartree=vartree)
7922 if retval != os.EX_OK:
7923 writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
7926 writemsg_stdout(">>> Extracting %s\n" % mypkg)
7927 retval = portage_exec.spawn_bash(
7928 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
7929 env=mysettings.environ())
7930 if retval != os.EX_OK:
7931 writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
7933 #portage_locks.unlockfile(tbz2_lock)
7936 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
7938 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
7939 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7943 portage_locks.unlockfile(tbz2_lock)
7946 shutil.rmtree(builddir)
7947 except (IOError, OSError), e:
7948 if e.errno != errno.ENOENT:
7951 portage_locks.unlockdir(builddir_lock)
7954 # Lock catdir for removal if empty.
7955 catdir_lock = portage_locks.lockdir(catdir)
7961 if e.errno not in (errno.ENOENT,
7962 errno.ENOTEMPTY, errno.EEXIST):
7965 portage_locks.unlockdir(catdir_lock)
7967 def deprecated_profile_check():
7968 if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
7970 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
7971 dcontent = deprecatedfile.readlines()
7972 deprecatedfile.close()
7973 newprofile = dcontent[0]
7974 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
7976 writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
7978 writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
7979 if len(dcontent) > 1:
7980 writemsg("To upgrade do the following steps:\n", noiselevel=-1)
7981 for myline in dcontent[1:]:
7982 writemsg(myline, noiselevel=-1)
7983 writemsg("\n\n", noiselevel=-1)
7986 # gets virtual package settings
7987 def getvirtuals(myroot):
7989 writemsg("--- DEPRECATED call to getvirtual\n")
7990 return settings.getvirtuals(myroot)
7992 def commit_mtimedb(mydict=None, filename=None):
7995 if "mtimedb" not in globals() or mtimedb is None:
7999 if filename is None:
8001 filename = mtimedbfile
8002 mydict["version"] = VERSION
8003 d = {} # for full backward compat, pickle it as a plain dict object.
8006 f = atomic_ofstream(filename)
8007 cPickle.dump(d, f, -1)
8009 portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
8010 except (IOError, OSError), e:
8014 global uid,portage_gid,portdb,db
8015 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
8016 close_portdbapi_caches()
8019 atexit_register(portageexit)
8021 def global_updates(mysettings, trees, prev_mtimes):
8023 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8025 @param mysettings: A config instance for ROOT="/".
8026 @type mysettings: config
8027 @param trees: A dictionary containing portage trees.
8029 @param prev_mtimes: A dictionary containing mtimes of files located in
8030 $PORTDIR/profiles/updates/.
8031 @type prev_mtimes: dict
8032 @rtype: None or List
8033 @return: None if no were no updates, otherwise a list of update commands
8034 that have been performed.
8036 # only do this if we're root and not running repoman/ebuild digest
8038 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8040 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8043 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8044 update_data = grab_updates(updpath)
8046 update_data = grab_updates(updpath, prev_mtimes)
8047 except portage_exception.DirectoryNotFound:
8048 writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
8051 if len(update_data) > 0:
8052 do_upgrade_packagesmessage = 0
8055 for mykey, mystat, mycontent in update_data:
8056 writemsg_stdout("\n\n")
8057 writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
8058 writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
8059 writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
8060 valid_updates, errors = parse_updates(mycontent)
8061 myupd.extend(valid_updates)
8062 writemsg_stdout(len(valid_updates) * "." + "\n")
8063 if len(errors) == 0:
8064 # Update our internal mtime since we
8065 # processed all of our directives.
8066 timestamps[mykey] = long(mystat.st_mtime)
8069 writemsg("%s\n" % msg, noiselevel=-1)
8071 update_config_files("/",
8072 mysettings.get("CONFIG_PROTECT","").split(),
8073 mysettings.get("CONFIG_PROTECT_MASK","").split(),
8076 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8077 settings=mysettings)
8078 for update_cmd in myupd:
8079 if update_cmd[0] == "move":
8080 trees["/"]["vartree"].dbapi.move_ent(update_cmd)
8081 trees["/"]["bintree"].move_ent(update_cmd)
8082 elif update_cmd[0] == "slotmove":
8083 trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
8084 trees["/"]["bintree"].move_slot_ent(update_cmd)
8086 # The above global updates proceed quickly, so they
8087 # are considered a single mtimedb transaction.
8088 if len(timestamps) > 0:
8089 # We do not update the mtime in the mtimedb
8090 # until after _all_ of the above updates have
8091 # been processed because the mtimedb will
8092 # automatically commit when killed by ctrl C.
8093 for mykey, mtime in timestamps.iteritems():
8094 prev_mtimes[mykey] = mtime
8096 # We gotta do the brute force updates for these now.
8097 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8098 "fixpackages" in mysettings.features:
8099 trees["/"]["bintree"].update_ents(myupd)
8101 do_upgrade_packagesmessage = 1
8103 # Update progress above is indicated by characters written to stdout so
8104 # we print a couple new lines here to separate the progress output from
8109 if do_upgrade_packagesmessage and \
8110 listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
8111 writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
8112 writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
8113 writemsg_stdout("\n")
8117 #continue setting up other trees
8119 class MtimeDB(dict):
8120 def __init__(self, filename):
8122 self.filename = filename
8123 self._load(filename)
8125 def _load(self, filename):
8128 mypickle = cPickle.Unpickler(f)
8129 mypickle.find_global = None
8133 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
8137 d["updates"] = d["old"]
8142 d.setdefault("starttime", 0)
8143 d.setdefault("version", "")
8144 for k in ("info", "ldpath", "updates"):
8147 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8148 "starttime", "updates", "version"))
8151 if k not in mtimedbkeys:
8152 writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
8155 self._clean_data = copy.deepcopy(d)
8158 if not self.filename:
8162 # Only commit if the internal state has changed.
8163 if d != self._clean_data:
8164 commit_mtimedb(mydict=d, filename=self.filename)
8165 self._clean_data = copy.deepcopy(d)
8167 def create_trees(config_root=None, target_root=None, trees=None):
8171 # clean up any existing portdbapi instances
8172 for myroot in trees:
8173 portdb = trees[myroot]["porttree"].dbapi
8174 portdb.close_caches()
8175 portdbapi.portdbapi_instances.remove(portdb)
8176 del trees[myroot]["porttree"], myroot, portdb
8178 settings = config(config_root=config_root, target_root=target_root,
8179 config_incrementals=portage_const.INCREMENTALS)
8183 myroots = [(settings["ROOT"], settings)]
8184 if settings["ROOT"] != "/":
8185 settings = config(config_root=None, target_root=None,
8186 config_incrementals=portage_const.INCREMENTALS)
8189 myroots.append((settings["ROOT"], settings))
8191 for myroot, mysettings in myroots:
8192 trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
8193 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8194 trees[myroot].addLazySingleton(
8195 "vartree", vartree, myroot, categories=mysettings.categories,
8196 settings=mysettings)
8197 trees[myroot].addLazySingleton("porttree",
8198 portagetree, myroot, settings=mysettings)
8199 trees[myroot].addLazySingleton("bintree",
8200 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8203 # Initialization of legacy globals. No functions/classes below this point
8204 # please! When the above functions and classes become independent of the
8205 # below global variables, it will be possible to make the below code
8206 # conditional on a backward compatibility flag (backward compatibility could
8207 # be disabled via an environment variable, for example). This will enable new
8208 # code that is aware of this flag to import portage without the unnecessary
8209 # overhead (and other issues!) of initializing the legacy globals.
8211 def init_legacy_globals():
8212 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8213 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8214 profiledir, flushmtimedb
8216 # Portage needs to ensure a sane umask for the files it creates.
8220 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8221 kwargs[k] = os.environ.get(envvar, "/")
8223 db = create_trees(**kwargs)
8225 settings = db["/"]["vartree"].settings
8226 portdb = db["/"]["porttree"].dbapi
8230 settings = db[myroot]["vartree"].settings
8231 portdb = db[myroot]["porttree"].dbapi
8234 root = settings["ROOT"]
8236 mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8237 mtimedb = MtimeDB(mtimedbfile)
8239 # ========================================================================
8241 # These attributes should not be used
8242 # within Portage under any circumstances.
8243 # ========================================================================
8244 archlist = settings.archlist()
8245 features = settings.features
8246 groups = settings["ACCEPT_KEYWORDS"].split()
8247 pkglines = settings.packages
8248 selinux_enabled = settings.selinux_enabled()
8249 thirdpartymirrors = settings.thirdpartymirrors()
8250 usedefaults = settings.use_defs
8252 if os.path.isdir(PROFILE_PATH):
8253 profiledir = PROFILE_PATH
8254 def flushmtimedb(record):
8255 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8256 # ========================================================================
8258 # These attributes should not be used
8259 # within Portage under any circumstances.
8260 # ========================================================================
8263 # The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
8264 # use within Portage. External use of this variable is unsupported because
8265 # it is experimental and it's behavior is likely to change.
8266 if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
8267 init_legacy_globals()
8272 # ============================================================================
8273 # ============================================================================