1 # portage.py -- core Portage functionality
2 # Copyright 1998-2009 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
6 from __future__ import print_function
8 VERSION="$Rev$"[6:-2] + "-svn"
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
19 if not hasattr(errno, 'ESTALE'):
20 # ESTALE may not be defined on some systems, such as interix.
27 import cPickle as pickle
33 from subprocess import getstatusoutput as subprocess_getstatusoutput
35 from commands import getstatusoutput as subprocess_getstatusoutput
36 from time import sleep
37 from random import shuffle
38 from itertools import chain
42 # Temporarily delete these imports, to ensure that only the
43 # wrapped versions are imported by portage internals.
49 except ImportError as e:
50 sys.stderr.write("\n\n")
51 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
52 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
55 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
57 sys.stderr.write(" "+str(e)+"\n\n");
61 from portage.cache.cache_errors import CacheError
62 import portage.proxy.lazyimport
63 import portage.proxy as proxy
64 proxy.lazyimport.lazyimport(globals(),
66 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
69 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
70 'uid,userland,userpriv_groups,wheelgid',
72 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
73 'get_operator,isjustname,isspecific,isvalidatom,' + \
74 'match_from_list,match_to_list',
75 'portage.eclass_cache',
76 'portage.env.loaders',
80 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
83 'portage.output:bold,colorize',
85 'portage.process:atexit_register,run_exitfuncs',
86 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
87 'parse_updates,update_config_files,update_dbentries,' + \
90 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
91 'apply_recursive_permissions,dump_traceback,getconfig,' + \
92 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
93 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
94 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
95 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
96 'writemsg_stdout,write_atomic',
98 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \
99 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
104 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
105 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
106 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
107 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
108 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
109 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
110 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
111 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
113 from portage.localization import _
115 except ImportError as e:
116 sys.stderr.write("\n\n")
117 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122 sys.stderr.write("!!! a recovery of portage.\n")
123 sys.stderr.write(" "+str(e)+"\n\n")
126 if sys.hexversion >= 0x3000000:
130 # Assume utf_8 fs encoding everywhere except in merge code, where the
131 # user's locale is respected.
135 'merge' : sys.getfilesystemencoding(),
136 'repo.content' : 'utf_8',
140 # This can happen if python is built with USE=build (stage 1).
141 if _encodings['merge'] is None:
142 _encodings['merge'] = 'ascii'
144 if sys.hexversion >= 0x3000000:
145 def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
146 if isinstance(s, str):
147 s = s.encode(encoding, errors)
150 def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
151 if isinstance(s, bytes):
152 s = str(s, encoding=encoding, errors=errors)
155 def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
156 if isinstance(s, unicode):
157 s = s.encode(encoding, errors)
160 def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
161 if isinstance(s, bytes):
162 s = unicode(s, encoding=encoding, errors=errors)
165 class _unicode_func_wrapper(object):
167 Wraps a function, converts arguments from unicode to bytes,
168 and return values to unicode from bytes. Function calls
169 will raise UnicodeEncodeError if an argument fails to be
170 encoded with the required encoding. Return values that
171 are single strings are decoded with errors='replace'. Return
172 values that are lists of strings are decoded with errors='strict'
173 and elements that fail to be decoded are omitted from the returned
176 __slots__ = ('_func', '_encoding')
178 def __init__(self, func, encoding=_encodings['fs']):
180 self._encoding = encoding
182 def __call__(self, *args, **kwargs):
184 encoding = self._encoding
185 wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
188 wrapped_kwargs = dict(
189 (k, _unicode_encode(v, encoding=encoding, errors='strict'))
190 for k, v in kwargs.items())
194 rval = self._func(*wrapped_args, **wrapped_kwargs)
196 if isinstance(rval, (list, tuple)):
200 x = _unicode_decode(x, encoding=encoding, errors='strict')
201 except UnicodeDecodeError:
204 decoded_rval.append(x)
206 if isinstance(rval, tuple):
207 rval = tuple(decoded_rval)
211 rval = _unicode_decode(rval, encoding=encoding, errors='replace')
215 class _unicode_module_wrapper(object):
217 Wraps a module and wraps all functions with _unicode_func_wrapper.
219 __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
221 def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
222 object.__setattr__(self, '_mod', mod)
223 object.__setattr__(self, '_encoding', encoding)
224 object.__setattr__(self, '_overrides', overrides)
229 object.__setattr__(self, '_cache', cache)
231 def __getattribute__(self, attr):
232 cache = object.__getattribute__(self, '_cache')
233 if cache is not None:
234 result = cache.get(attr)
235 if result is not None:
237 result = getattr(object.__getattribute__(self, '_mod'), attr)
238 encoding = object.__getattribute__(self, '_encoding')
239 overrides = object.__getattribute__(self, '_overrides')
241 if overrides is not None:
242 override = overrides.get(id(result))
243 if override is not None:
245 elif isinstance(result, type):
247 elif type(result) is types.ModuleType:
248 result = _unicode_module_wrapper(result,
249 encoding=encoding, overrides=overrides)
250 elif hasattr(result, '__call__'):
251 result = _unicode_func_wrapper(result, encoding=encoding)
252 if cache is not None:
258 id(_os.fdopen) : _os.fdopen,
259 id(_os.popen) : _os.popen,
260 id(_os.read) : _os.read,
261 id(_os.statvfs) : _os.statvfs,
262 id(_os.system) : _os.system,
265 os = _unicode_module_wrapper(_os, overrides=_os_overrides,
266 encoding=_encodings['fs'])
267 _os_merge = _unicode_module_wrapper(_os,
268 encoding=_encodings['merge'], overrides=_os_overrides)
270 import shutil as _shutil
271 shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
273 # Imports below this point rely on the above unicode wrapper definitions.
276 _selinux_merge = None
278 import portage._selinux
279 selinux = _unicode_module_wrapper(_selinux,
280 encoding=_encodings['fs'])
281 _selinux_merge = _unicode_module_wrapper(_selinux,
282 encoding=_encodings['merge'])
284 sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
289 from portage.manifest import Manifest
291 # ===========================================================================
292 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
293 # ===========================================================================
295 def _gen_missing_encodings(missing_encodings):
299 if 'ascii' in missing_encodings:
301 class AsciiIncrementalEncoder(codecs.IncrementalEncoder):
302 def encode(self, input, final=False):
303 return codecs.ascii_encode(input, self.errors)[0]
305 class AsciiIncrementalDecoder(codecs.IncrementalDecoder):
306 def decode(self, input, final=False):
307 return codecs.ascii_decode(input, self.errors)[0]
309 class AsciiStreamWriter(codecs.StreamWriter):
310 encode = codecs.ascii_encode
312 class AsciiStreamReader(codecs.StreamReader):
313 decode = codecs.ascii_decode
315 codec_info = codecs.CodecInfo(
317 encode=codecs.ascii_encode,
318 decode=codecs.ascii_decode,
319 incrementalencoder=AsciiIncrementalEncoder,
320 incrementaldecoder=AsciiIncrementalDecoder,
321 streamwriter=AsciiStreamWriter,
322 streamreader=AsciiStreamReader,
325 for alias in ('ascii', '646', 'ansi_x3.4_1968', 'ansi_x3_4_1968',
326 'ansi_x3.4_1986', 'cp367', 'csascii', 'ibm367', 'iso646_us',
327 'iso_646.irv_1991', 'iso_ir_6', 'us', 'us_ascii'):
328 encodings[alias] = codec_info
330 if 'utf_8' in missing_encodings:
332 def utf8decode(input, errors='strict'):
333 return codecs.utf_8_decode(input, errors, True)
335 class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
336 def encode(self, input, final=False):
337 return codecs.utf_8_encode(input, self.errors)[0]
339 class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
340 _buffer_decode = codecs.utf_8_decode
342 class Utf8StreamWriter(codecs.StreamWriter):
343 encode = codecs.utf_8_encode
345 class Utf8StreamReader(codecs.StreamReader):
346 decode = codecs.utf_8_decode
348 codec_info = codecs.CodecInfo(
350 encode=codecs.utf_8_encode,
352 incrementalencoder=Utf8IncrementalEncoder,
353 incrementaldecoder=Utf8IncrementalDecoder,
354 streamreader=Utf8StreamReader,
355 streamwriter=Utf8StreamWriter,
358 for alias in ('utf_8', 'u8', 'utf', 'utf8', 'utf8_ucs2', 'utf8_ucs4'):
359 encodings[alias] = codec_info
363 def _ensure_default_encoding():
365 The python that's inside stage 1 or 2 is built with a minimal
366 configuration which does not include the /usr/lib/pythonX.Y/encodings
367 directory. This results in error like the following:
369 LookupError: no codec search functions registered: can't find encoding
371 In order to solve this problem, detect it early and manually register
372 a search function for the ascii and utf_8 codecs. Starting with python-3.0
373 this problem is more noticeable because of stricter handling of encoding
374 and decoding between strings of characters and bytes.
377 default_fallback = 'utf_8'
378 default_encoding = sys.getdefaultencoding().lower().replace('-', '_')
379 filesystem_encoding = _encodings['merge'].lower().replace('-', '_')
380 required_encodings = set(['ascii', 'utf_8'])
381 required_encodings.add(default_encoding)
382 required_encodings.add(filesystem_encoding)
383 missing_encodings = set()
384 for codec_name in required_encodings:
386 codecs.lookup(codec_name)
388 missing_encodings.add(codec_name)
390 if not missing_encodings:
393 encodings = _gen_missing_encodings(missing_encodings)
395 if default_encoding in missing_encodings and \
396 default_encoding not in encodings:
397 # Make the fallback codec correspond to whatever name happens
398 # to be returned by sys.getfilesystemencoding().
401 encodings[default_encoding] = codecs.lookup(default_fallback)
403 encodings[default_encoding] = encodings[default_fallback]
405 if filesystem_encoding in missing_encodings and \
406 filesystem_encoding not in encodings:
407 # Make the fallback codec correspond to whatever name happens
408 # to be returned by sys.getdefaultencoding().
411 encodings[filesystem_encoding] = codecs.lookup(default_fallback)
413 encodings[filesystem_encoding] = encodings[default_fallback]
415 def search_function(name):
417 name = name.replace('-', '_')
418 codec_info = encodings.get(name)
419 if codec_info is not None:
420 return codecs.CodecInfo(
421 name=codec_info.name,
422 encode=codec_info.encode,
423 decode=codec_info.decode,
424 incrementalencoder=codec_info.incrementalencoder,
425 incrementaldecoder=codec_info.incrementaldecoder,
426 streamreader=codec_info.streamreader,
427 streamwriter=codec_info.streamwriter,
431 codecs.register(search_function)
433 del codec_name, default_encoding, default_fallback, \
434 filesystem_encoding, missing_encodings, \
435 required_encodings, search_function
437 # Do this ASAP since writemsg() might not work without it.
438 _ensure_default_encoding()
442 Quote a string in double-quotes and use backslashes to
443 escape any backslashes, double-quotes, dollar signs, or
444 backquotes in the string.
446 for letter in "\\\"$`":
448 s = s.replace(letter, "\\" + letter)
453 if platform.system() in ('FreeBSD',):
455 class bsd_chflags(object):
458 def chflags(cls, path, flags, opts=""):
459 cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
460 status, output = subprocess_getstatusoutput(cmd)
461 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
463 # Try to generate an ENOENT error if appropriate.
465 _os_merge.lstat(path)
468 # Make sure the binary exists.
469 if not portage.process.find_binary('chflags'):
470 raise portage.exception.CommandNotFound('chflags')
471 # Now we're not sure exactly why it failed or what
472 # the real errno was, so just report EPERM.
473 e = OSError(errno.EPERM, output)
474 e.errno = errno.EPERM
480 def lchflags(cls, path, flags):
481 return cls.chflags(path, flags, opts='-h')
484 modname = ".".join(name.split(".")[:-1])
485 mod = __import__(modname)
486 components = name.split('.')
487 for comp in components[1:]:
488 mod = getattr(mod, comp)
491 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
493 if x in top_dict and key in top_dict[x]:
495 return copy.deepcopy(top_dict[x][key])
497 return top_dict[x][key]
501 raise KeyError("Key not found in list; '%s'" % key)
504 "this fixes situations where the current directory doesn't exist"
507 except OSError: #dir doesn't exist
512 def abssymlink(symlink):
513 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
514 mylink=os.readlink(symlink)
516 mydir=os.path.dirname(symlink)
517 mylink=mydir+"/"+mylink
518 return os.path.normpath(mylink)
524 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
525 global cacheHit,cacheMiss,cacheStale
526 mypath = normalize_path(my_original_path)
527 if mypath in dircache:
529 cached_mtime, list, ftype = dircache[mypath]
532 cached_mtime, list, ftype = -1, [], []
534 pathstat = os.stat(mypath)
535 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
536 mtime = pathstat.st_mtime
538 raise portage.exception.DirectoryNotFound(mypath)
539 except EnvironmentError as e:
540 if e.errno == portage.exception.PermissionDenied.errno:
541 raise portage.exception.PermissionDenied(mypath)
546 except portage.exception.PortageException:
550 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
551 if mtime != cached_mtime or time.time() - mtime < 4:
552 if mypath in dircache:
555 list = os.listdir(mypath)
556 except EnvironmentError as e:
557 if e.errno != errno.EACCES:
560 raise portage.exception.PermissionDenied(mypath)
565 pathstat = os.stat(mypath+"/"+x)
567 pathstat = os.lstat(mypath+"/"+x)
569 if stat.S_ISREG(pathstat[stat.ST_MODE]):
571 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
573 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
577 except (IOError, OSError):
579 dircache[mypath] = mtime, list, ftype
583 for x in range(0, len(list)):
584 if list[x] in ignorelist:
587 if list[x][:2] != ".#":
588 ret_list.append(list[x])
589 ret_ftype.append(ftype[x])
591 ret_list.append(list[x])
592 ret_ftype.append(ftype[x])
594 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
595 return ret_list, ret_ftype
597 _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git')
599 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
600 EmptyOnError=False, dirsonly=False):
602 Portage-specific implementation of os.listdir
604 @param mypath: Path whose contents you wish to list
606 @param recursive: Recursively scan directories contained within mypath
607 @type recursive: Boolean
608 @param filesonly; Only return files, not more directories
609 @type filesonly: Boolean
610 @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
611 @type ignorecvs: Boolean
612 @param ignorelist: List of filenames/directories to exclude
613 @type ignorelist: List
614 @param followSymlinks: Follow Symlink'd files and directories
615 @type followSymlinks: Boolean
616 @param EmptyOnError: Return [] if an error occurs.
617 @type EmptyOnError: Boolean
618 @param dirsonly: Only return directories.
619 @type dirsonly: Boolean
621 @returns: A list of files and directories (or just files or just directories) or an empty list.
624 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
631 if not (filesonly or dirsonly or recursive):
637 if ftype[x] == 1 and not \
638 (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs):
639 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
643 for y in range(0,len(l)):
644 l[y]=list[x]+"/"+l[y]
650 for x in range(0,len(ftype)):
652 rlist=rlist+[list[x]]
655 for x in range(0, len(ftype)):
657 rlist = rlist + [list[x]]
663 def flatten(mytokens):
664 """this function now turns a [1,[2,3]] list into
665 a [1,2,3] list and returns it."""
668 if isinstance(x, list):
669 newlist.extend(flatten(x))
674 #beautiful directed graph object
676 class digraph(object):
678 """Create an empty digraph"""
680 # { node : ( { child : priority } , { parent : priority } ) }
684 def add(self, node, parent, priority=0):
685 """Adds the specified node with the specified parent.
687 If the dep is a soft-dep and the node already has a hard
688 relationship to the parent, the relationship is left as hard."""
690 if node not in self.nodes:
691 self.nodes[node] = ({}, {}, node)
692 self.order.append(node)
697 if parent not in self.nodes:
698 self.nodes[parent] = ({}, {}, parent)
699 self.order.append(parent)
701 priorities = self.nodes[node][1].get(parent)
702 if priorities is None:
704 self.nodes[node][1][parent] = priorities
705 self.nodes[parent][0][node] = priorities
706 priorities.append(priority)
709 def remove(self, node):
710 """Removes the specified node from the digraph, also removing
711 and ties to other nodes in the digraph. Raises KeyError if the
712 node doesn't exist."""
714 if node not in self.nodes:
717 for parent in self.nodes[node][1]:
718 del self.nodes[parent][0][node]
719 for child in self.nodes[node][0]:
720 del self.nodes[child][1][node]
723 self.order.remove(node)
725 def difference_update(self, t):
727 Remove all given nodes from node_set. This is more efficient
728 than multiple calls to the remove() method.
730 if isinstance(t, (list, tuple)) or \
731 not hasattr(t, "__contains__"):
734 for node in self.order:
738 for parent in self.nodes[node][1]:
739 del self.nodes[parent][0][node]
740 for child in self.nodes[node][0]:
741 del self.nodes[child][1][node]
745 def remove_edge(self, child, parent):
747 Remove edge in the direction from child to parent. Note that it is
748 possible for a remaining edge to exist in the opposite direction.
749 Any endpoint vertices that become isolated will remain in the graph.
752 # Nothing should be modified when a KeyError is raised.
753 for k in parent, child:
754 if k not in self.nodes:
757 # Make sure the edge exists.
758 if child not in self.nodes[parent][0]:
759 raise KeyError(child)
760 if parent not in self.nodes[child][1]:
761 raise KeyError(parent)
764 del self.nodes[child][1][parent]
765 del self.nodes[parent][0][child]
768 return iter(self.order)
770 def contains(self, node):
771 """Checks if the digraph contains mynode"""
772 return node in self.nodes
774 def get(self, key, default=None):
775 node_data = self.nodes.get(key, self)
776 if node_data is self:
781 """Return a list of all nodes in the graph"""
784 def child_nodes(self, node, ignore_priority=None):
785 """Return all children of the specified node"""
786 if ignore_priority is None:
787 return list(self.nodes[node][0])
789 if hasattr(ignore_priority, '__call__'):
790 for child, priorities in self.nodes[node][0].items():
791 for priority in priorities:
792 if not ignore_priority(priority):
793 children.append(child)
796 for child, priorities in self.nodes[node][0].items():
797 if ignore_priority < priorities[-1]:
798 children.append(child)
801 def parent_nodes(self, node, ignore_priority=None):
802 """Return all parents of the specified node"""
803 if ignore_priority is None:
804 return list(self.nodes[node][1])
806 if hasattr(ignore_priority, '__call__'):
807 for parent, priorities in self.nodes[node][1].items():
808 for priority in priorities:
809 if not ignore_priority(priority):
810 parents.append(parent)
813 for parent, priorities in self.nodes[node][1].items():
814 if ignore_priority < priorities[-1]:
815 parents.append(parent)
818 def leaf_nodes(self, ignore_priority=None):
819 """Return all nodes that have no children
821 If ignore_soft_deps is True, soft deps are not counted as
822 children in calculations."""
825 if ignore_priority is None:
826 for node in self.order:
827 if not self.nodes[node][0]:
828 leaf_nodes.append(node)
829 elif hasattr(ignore_priority, '__call__'):
830 for node in self.order:
832 for child, priorities in self.nodes[node][0].items():
833 for priority in priorities:
834 if not ignore_priority(priority):
840 leaf_nodes.append(node)
842 for node in self.order:
844 for child, priorities in self.nodes[node][0].items():
845 if ignore_priority < priorities[-1]:
849 leaf_nodes.append(node)
852 def root_nodes(self, ignore_priority=None):
853 """Return all nodes that have no parents.
855 If ignore_soft_deps is True, soft deps are not counted as
856 parents in calculations."""
859 if ignore_priority is None:
860 for node in self.order:
861 if not self.nodes[node][1]:
862 root_nodes.append(node)
863 elif hasattr(ignore_priority, '__call__'):
864 for node in self.order:
866 for parent, priorities in self.nodes[node][1].items():
867 for priority in priorities:
868 if not ignore_priority(priority):
874 root_nodes.append(node)
876 for node in self.order:
878 for parent, priorities in self.nodes[node][1].items():
879 if ignore_priority < priorities[-1]:
883 root_nodes.append(node)
887 """Checks if the digraph is empty"""
888 return len(self.nodes) == 0
894 for children, parents, node in self.nodes.values():
896 for child, priorities in children.items():
897 priorities_clone = memo.get(id(priorities))
898 if priorities_clone is None:
899 priorities_clone = priorities[:]
900 memo[id(priorities)] = priorities_clone
901 children_clone[child] = priorities_clone
903 for parent, priorities in parents.items():
904 priorities_clone = memo.get(id(priorities))
905 if priorities_clone is None:
906 priorities_clone = priorities[:]
907 memo[id(priorities)] = priorities_clone
908 parents_clone[parent] = priorities_clone
909 clone.nodes[node] = (children_clone, parents_clone, node)
910 clone.order = self.order[:]
913 # Backward compatibility
916 allzeros = leaf_nodes
918 __contains__ = contains
922 def delnode(self, node):
929 leaf_nodes = self.leaf_nodes()
934 def hasallzeros(self, ignore_priority=None):
935 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
938 def debug_print(self):
940 writemsg(s, noiselevel=-1)
941 for node in self.nodes:
942 output("%s " % (node,))
943 if self.nodes[node][0]:
944 output("depends on\n")
946 output("(no children)\n")
947 for child, priorities in self.nodes[node][0].items():
948 output(" %s (%s)\n" % (child, priorities[-1],))
950 #parse /etc/env.d and generate /etc/profile.env
952 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
953 env=None, writemsg_level=None):
954 if writemsg_level is None:
955 writemsg_level = portage.util.writemsg_level
956 if target_root is None:
958 target_root = settings["ROOT"]
959 if prev_mtimes is None:
961 prev_mtimes = mtimedb["ldpath"]
964 envd_dir = os.path.join(target_root, "etc", "env.d")
965 portage.util.ensure_dirs(envd_dir, mode=0o755)
966 fns = listdir(envd_dir, EmptyOnError=1)
972 if not x[0].isdigit() or not x[1].isdigit():
974 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
980 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
981 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
982 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
983 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
984 "PYTHONPATH", "ROOTPATH"])
989 file_path = os.path.join(envd_dir, x)
991 myconfig = getconfig(file_path, expand=False)
992 except portage.exception.ParseError as e:
993 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
997 # broken symlink or file removed by a concurrent process
998 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
1001 config_list.append(myconfig)
1002 if "SPACE_SEPARATED" in myconfig:
1003 space_separated.update(myconfig["SPACE_SEPARATED"].split())
1004 del myconfig["SPACE_SEPARATED"]
1005 if "COLON_SEPARATED" in myconfig:
1006 colon_separated.update(myconfig["COLON_SEPARATED"].split())
1007 del myconfig["COLON_SEPARATED"]
1011 for var in space_separated:
1013 for myconfig in config_list:
1015 for item in myconfig[var].split():
1016 if item and not item in mylist:
1018 del myconfig[var] # prepare for env.update(myconfig)
1020 env[var] = " ".join(mylist)
1021 specials[var] = mylist
1023 for var in colon_separated:
1025 for myconfig in config_list:
1027 for item in myconfig[var].split(":"):
1028 if item and not item in mylist:
1030 del myconfig[var] # prepare for env.update(myconfig)
1032 env[var] = ":".join(mylist)
1033 specials[var] = mylist
1035 for myconfig in config_list:
1036 """Cumulative variables have already been deleted from myconfig so that
1037 they won't be overwritten by this dict.update call."""
1038 env.update(myconfig)
1040 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
1042 myld = codecs.open(_unicode_encode(ldsoconf_path,
1043 encoding=_encodings['fs'], errors='strict'),
1044 mode='r', encoding=_encodings['content'], errors='replace')
1045 myldlines=myld.readlines()
1049 #each line has at least one char (a newline)
1052 oldld.append(x[:-1])
1053 except (IOError, OSError) as e:
1054 if e.errno != errno.ENOENT:
1058 ld_cache_update=False
1060 newld = specials["LDPATH"]
1062 #ld.so.conf needs updating and ldconfig needs to be run
1063 myfd = atomic_ofstream(ldsoconf_path)
1064 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
1065 myfd.write("# contents of /etc/env.d directory\n")
1066 for x in specials["LDPATH"]:
1069 ld_cache_update=True
1071 # Update prelink.conf if we are prelink-enabled
1073 newprelink = atomic_ofstream(
1074 os.path.join(target_root, "etc", "prelink.conf"))
1075 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
1076 newprelink.write("# contents of /etc/env.d directory\n")
1078 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
1079 newprelink.write("-l "+x+"\n");
1080 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
1086 for y in specials["PRELINK_PATH_MASK"]:
1095 newprelink.write("-h "+x+"\n")
1096 for x in specials["PRELINK_PATH_MASK"]:
1097 newprelink.write("-b "+x+"\n")
1100 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
1101 # granularity is possible. In order to avoid the potential ambiguity of
1102 # mtimes that differ by less than 1 second, sleep here if any of the
1103 # directories have been modified during the current second.
1104 sleep_for_mtime_granularity = False
1105 current_time = long(time.time())
1106 mtime_changed = False
1108 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
1109 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
1111 newldpathtime = long(os.stat(x).st_mtime)
1112 lib_dirs.add(normalize_path(x))
1113 except OSError as oe:
1114 if oe.errno == errno.ENOENT:
1119 # ignore this path because it doesn't exist
1122 if newldpathtime == current_time:
1123 sleep_for_mtime_granularity = True
1124 if x in prev_mtimes:
1125 if prev_mtimes[x] == newldpathtime:
1128 prev_mtimes[x] = newldpathtime
1129 mtime_changed = True
1131 prev_mtimes[x] = newldpathtime
1132 mtime_changed = True
1135 ld_cache_update = True
1138 not ld_cache_update and \
1139 contents is not None:
1140 libdir_contents_changed = False
1141 for mypath, mydata in contents.items():
1142 if mydata[0] not in ("obj","sym"):
1144 head, tail = os.path.split(mypath)
1145 if head in lib_dirs:
1146 libdir_contents_changed = True
1148 if not libdir_contents_changed:
1151 ldconfig = "/sbin/ldconfig"
1152 if "CHOST" in env and "CBUILD" in env and \
1153 env["CHOST"] != env["CBUILD"]:
1154 from portage.process import find_binary
1155 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
1157 # Only run ldconfig as needed
1158 if (ld_cache_update or makelinks) and ldconfig:
1159 # ldconfig has very different behaviour between FreeBSD and Linux
1160 if ostype=="Linux" or ostype.lower().endswith("gnu"):
1161 # We can't update links if we haven't cleaned other versions first, as
1162 # an older package installed ON TOP of a newer version will cause ldconfig
1163 # to overwrite the symlinks we just made. -X means no links. After 'clean'
1164 # we can safely create links.
1165 writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
1168 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
1170 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
1171 elif ostype in ("FreeBSD","DragonFly"):
1172 writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
1174 os.system(("cd / ; %s -elf -i " + \
1175 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
1176 (ldconfig, target_root, target_root))
1178 del specials["LDPATH"]
1180 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
1181 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
1182 cenvnotice = penvnotice[:]
1183 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
1184 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
1186 #create /etc/profile.env for bash support
1187 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
1188 outfile.write(penvnotice)
1190 env_keys = [ x for x in env if x != "LDPATH" ]
1194 if v.startswith('$') and not v.startswith('${'):
1195 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
1197 outfile.write("export %s='%s'\n" % (k, v))
1200 #create /etc/csh.env for (t)csh support
1201 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
1202 outfile.write(cenvnotice)
1204 outfile.write("setenv %s '%s'\n" % (x, env[x]))
1207 if sleep_for_mtime_granularity:
1208 while current_time == long(time.time()):
1211 def ExtractKernelVersion(base_dir):
1213 Try to figure out what kernel version we are running
1214 @param base_dir: Path to sources (usually /usr/src/linux)
1215 @type base_dir: string
1216 @rtype: tuple( version[string], error[string])
1218 1. tuple( version[string], error[string])
1219 Either version or error is populated (but never both)
1223 pathname = os.path.join(base_dir, 'Makefile')
1225 f = codecs.open(_unicode_encode(pathname,
1226 encoding=_encodings['fs'], errors='strict'), mode='r',
1227 encoding=_encodings['content'], errors='replace')
1228 except OSError as details:
1229 return (None, str(details))
1230 except IOError as details:
1231 return (None, str(details))
1235 lines.append(f.readline())
1236 except OSError as details:
1237 return (None, str(details))
1238 except IOError as details:
1239 return (None, str(details))
1241 lines = [l.strip() for l in lines]
1245 #XXX: The following code relies on the ordering of vars within the Makefile
1247 # split on the '=' then remove annoying whitespace
1248 items = line.split("=")
1249 items = [i.strip() for i in items]
1250 if items[0] == 'VERSION' or \
1251 items[0] == 'PATCHLEVEL':
1254 elif items[0] == 'SUBLEVEL':
1256 elif items[0] == 'EXTRAVERSION' and \
1257 items[-1] != items[0]:
1260 # Grab a list of files named localversion* and sort them
1261 localversions = os.listdir(base_dir)
1262 for x in range(len(localversions)-1,-1,-1):
1263 if localversions[x][:12] != "localversion":
1264 del localversions[x]
1265 localversions.sort()
1267 # Append the contents of each to the version string, stripping ALL whitespace
1268 for lv in localversions:
1269 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
1271 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
1272 kernelconfig = getconfig(base_dir+"/.config")
1273 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
1274 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
1276 return (version,None)
1278 def autouse(myvartree, use_cache=1, mysettings=None):
1280 autuse returns a list of USE variables auto-enabled to packages being installed
1282 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
1283 @type myvartree: vartree
1284 @param use_cache: read values from cache
1285 @type use_cache: Boolean
1286 @param mysettings: Instance of config
1287 @type mysettings: config
1289 @returns: A string containing a list of USE variables that are enabled via use.defaults
1291 if mysettings is None:
1293 mysettings = settings
1294 if mysettings.profile_path is None:
1297 usedefaults = mysettings.use_defs
1298 for myuse in usedefaults:
1300 for mydep in usedefaults[myuse]:
1301 if not myvartree.dep_match(mydep,use_cache=True):
1305 myusevars += " "+myuse
1308 def check_config_instance(test):
1309 if not isinstance(test, config):
1310 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
1312 def _lazy_iuse_regex(iuse_implicit):
1314 The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
1315 and the value is only used when an ebuild phase needs to be executed
1316 (it's used only to generate QA notices).
1318 # Escape anything except ".*" which is supposed to pass through from
1319 # _get_implicit_iuse().
1320 regex = sorted(re.escape(x) for x in iuse_implicit)
1321 regex = "^(%s)$" % "|".join(regex)
1322 regex = regex.replace("\\.\\*", ".*")
1325 class _local_repo_config(object):
1326 __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',)
1327 def __init__(self, name, repo_opts):
1330 aliases = repo_opts.get('aliases')
1331 if aliases is not None:
1332 aliases = tuple(aliases.split())
1333 self.aliases = aliases
1335 eclass_overrides = repo_opts.get('eclass-overrides')
1336 if eclass_overrides is not None:
1337 eclass_overrides = tuple(eclass_overrides.split())
1338 self.eclass_overrides = eclass_overrides
1340 masters = repo_opts.get('masters')
1341 if masters is not None:
1342 masters = tuple(masters.split())
1343 self.masters = masters
1345 class config(object):
1347 This class encompasses the main portage configuration. Data is pulled from
1348 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
1349 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
1352 Generally if you need data like USE flags, FEATURES, environment variables,
1353 virtuals ...etc you look in here.
1356 # Don't include anything that could be extremely long here (like SRC_URI)
1357 # since that could cause execve() calls to fail with E2BIG errors. For
1358 # example, see bug #262647.
1359 _setcpv_aux_keys = ('SLOT', 'RESTRICT', 'LICENSE',
1360 'KEYWORDS', 'INHERITED', 'IUSE', 'PROVIDE', 'EAPI',
1361 'PROPERTIES', 'DEFINED_PHASES', 'repository')
1364 "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
1365 "EBUILD_PHASE", "EMERGE_FROM", "HOMEPAGE", "INHERITED", "IUSE",
1366 "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
1367 "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
1368 "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
1369 "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
1370 "ROOT", "SLOT", "SRC_URI"
1373 _environ_whitelist = []
1375 # Whitelisted variables are always allowed to enter the ebuild
1376 # environment. Generally, this only includes special portage
1377 # variables. Ebuilds can unset variables that are not whitelisted
1378 # and rely on them remaining unset for future phases, without them
1379 # leaking back in from various locations (bug #189417). It's very
1380 # important to set our special BASH_ENV variable in the ebuild
1381 # environment in order to prevent sandbox from sourcing /etc/profile
1382 # in it's bashrc (causing major leakage).
1383 _environ_whitelist += [
1384 "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
1385 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
1386 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
1387 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
1388 "FEATURES", "FILESDIR", "HOME", "NOCOLOR", "PATH",
1390 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
1391 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
1393 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
1394 "PORTAGE_BINPKG_TMPFILE",
1396 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
1397 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
1398 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
1400 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
1401 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
1402 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
1403 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
1404 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
1405 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
1406 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
1407 "USE_EXPAND", "USE_ORDER", "WORKDIR",
1411 # user config variables
1412 _environ_whitelist += [
1413 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
1416 _environ_whitelist += [
1417 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
1420 # misc variables inherited from the calling environment
1421 _environ_whitelist += [
1422 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
1423 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
1424 "TERM", "TERMCAP", "USER",
1427 # other variables inherited from the calling environment
1428 _environ_whitelist += [
1429 "CVS_RSH", "ECHANGELOG_USER",
1431 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
1432 "STY", "WINDOW", "XAUTHORITY",
1435 _environ_whitelist = frozenset(_environ_whitelist)
1437 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
1439 # Filter selected variables in the config.environ() method so that
1440 # they don't needlessly propagate down into the ebuild environment.
1441 _environ_filter = []
1443 # misc variables inherited from the calling environment
1444 _environ_filter += [
1445 "INFOPATH", "MANPATH",
1448 # variables that break bash
1449 _environ_filter += [
1450 "HISTFILE", "POSIXLY_CORRECT",
1453 # portage config variables and variables set directly by portage
1454 _environ_filter += [
1455 "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
1456 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1457 "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
1459 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1460 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1461 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1462 "PORTAGE_BACKGROUND",
1463 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1464 "PORTAGE_ELOG_CLASSES",
1465 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1466 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1467 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1469 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1470 "PORTAGE_PACKAGE_EMPTY_ABORT",
1471 "PORTAGE_REPO_DUPLICATE_WARN",
1472 "PORTAGE_RO_DISTDIRS",
1473 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1474 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1475 "QUICKPKG_DEFAULT_OPTS",
1476 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1477 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1480 _environ_filter = frozenset(_environ_filter)
1482 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1483 config_incrementals=None, config_root=None, target_root=None,
1484 local_config=True, env=None):
1486 @param clone: If provided, init will use deepcopy to copy by value the instance.
1487 @type clone: Instance of config class.
1488 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1489 and then calling instance.setcpv(mycpv).
1491 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1492 @type config_profile_path: String
1493 @param config_incrementals: List of incremental variables
1494 (defaults to portage.const.INCREMENTALS)
1495 @type config_incrementals: List
1496 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1497 @type config_root: String
1498 @param target_root: __init__ override of $ROOT env variable.
1499 @type target_root: String
1500 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1501 ignore local config (keywording and unmasking)
1502 @type local_config: Boolean
1503 @param env: The calling environment which is used to override settings.
1504 Defaults to os.environ if unspecified.
1508 # When initializing the global portage.settings instance, avoid
1509 # raising exceptions whenever possible since exceptions thrown
1510 # from 'import portage' or 'import portage.exceptions' statements
1511 # can practically render the api unusable for api consumers.
1512 tolerant = "_initializing_globals" in globals()
1514 self.already_in_regenerate = 0
1518 self._setcpv_args_hash = None
1520 self.modifiedkeys = []
1522 self._accept_chost_re = None
1523 self._accept_license = None
1524 self._accept_license_str = None
1525 self._license_groups = {}
1526 self._accept_properties = None
1530 self.dirVirtuals = None
1533 # Virtuals obtained from the vartree
1534 self.treeVirtuals = {}
1535 # Virtuals by user specification. Includes negatives.
1536 self.userVirtuals = {}
1537 # Virtual negatives from user specifications.
1538 self.negVirtuals = {}
1539 # Virtuals added by the depgraph via self.setinst().
1540 self._depgraphVirtuals = {}
1542 self.user_profile_dir = None
1543 self.local_config = local_config
1544 self._local_repo_configs = None
1545 self._local_repo_conf_path = None
1548 self.incrementals = copy.deepcopy(clone.incrementals)
1549 self.profile_path = copy.deepcopy(clone.profile_path)
1550 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1551 self.local_config = copy.deepcopy(clone.local_config)
1552 self._local_repo_configs = \
1553 copy.deepcopy(clone._local_repo_configs)
1554 self._local_repo_conf_path = \
1555 copy.deepcopy(clone._local_repo_conf_path)
1557 self.module_priority = copy.deepcopy(clone.module_priority)
1558 self.modules = copy.deepcopy(clone.modules)
1560 self.depcachedir = copy.deepcopy(clone.depcachedir)
1562 self.packages = copy.deepcopy(clone.packages)
1563 self.virtuals = copy.deepcopy(clone.virtuals)
1565 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1566 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1567 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1568 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1569 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1571 self.use_defs = copy.deepcopy(clone.use_defs)
1572 self.usemask = copy.deepcopy(clone.usemask)
1573 self.usemask_list = copy.deepcopy(clone.usemask_list)
1574 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1575 self.useforce = copy.deepcopy(clone.useforce)
1576 self.useforce_list = copy.deepcopy(clone.useforce_list)
1577 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1578 self.puse = copy.deepcopy(clone.puse)
1579 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1580 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1581 self.mycpv = copy.deepcopy(clone.mycpv)
1582 self._setcpv_args_hash = copy.deepcopy(clone._setcpv_args_hash)
1584 self.configdict = copy.deepcopy(clone.configdict)
1586 self.configdict['env.d'],
1587 self.configdict['pkginternal'],
1588 self.configdict['globals'],
1589 self.configdict['defaults'],
1590 self.configdict['conf'],
1591 self.configdict['pkg'],
1592 self.configdict['auto'],
1593 self.configdict['env'],
1595 self.lookuplist = self.configlist[:]
1596 self.lookuplist.reverse()
1597 self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
1598 self.profiles = copy.deepcopy(clone.profiles)
1599 self.backupenv = self.configdict["backupenv"]
1600 self.pusedict = copy.deepcopy(clone.pusedict)
1601 self.categories = copy.deepcopy(clone.categories)
1602 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1603 self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1604 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1605 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1606 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1607 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1608 self.features = copy.deepcopy(clone.features)
1610 self._accept_license = copy.deepcopy(clone._accept_license)
1611 self._plicensedict = copy.deepcopy(clone._plicensedict)
1612 self._license_groups = copy.deepcopy(clone._license_groups)
1613 self._accept_properties = copy.deepcopy(clone._accept_properties)
1614 self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
1617 def check_var_directory(varname, var):
1618 if not os.path.isdir(var):
1619 writemsg(_("!!! Error: %s='%s' is not a directory. "
1620 "Please correct this.\n") % (varname, var),
1622 raise portage.exception.DirectoryNotFound(var)
1624 if config_root is None:
1627 config_root = normalize_path(os.path.abspath(
1628 config_root)).rstrip(os.path.sep) + os.path.sep
1630 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1632 self.depcachedir = DEPCACHE_PATH
1634 if not config_profile_path:
1635 config_profile_path = \
1636 os.path.join(config_root, PROFILE_PATH)
1637 if os.path.isdir(config_profile_path):
1638 self.profile_path = config_profile_path
1640 self.profile_path = None
1642 self.profile_path = config_profile_path[:]
1644 if config_incrementals is None:
1645 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1647 self.incrementals = copy.deepcopy(config_incrementals)
1649 self.module_priority = ["user","default"]
1651 modules_loader = portage.env.loaders.KeyValuePairFileLoader(
1652 os.path.join(config_root, MODULES_FILE_PATH), None, None)
1653 modules_dict, modules_errors = modules_loader.load()
1654 self.modules["user"] = modules_dict
1655 if self.modules["user"] is None:
1656 self.modules["user"] = {}
1657 self.modules["default"] = {
1658 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1659 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1665 # back up our incremental variables:
1667 self._use_expand_dict = {}
1668 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1669 self.configlist.append({})
1670 self.configdict["env.d"] = self.configlist[-1]
1672 self.configlist.append({})
1673 self.configdict["pkginternal"] = self.configlist[-1]
1675 # The symlink might not exist or might not be a symlink.
1676 if self.profile_path is None:
1680 def addProfile(currentPath):
1681 parentsFile = os.path.join(currentPath, "parent")
1682 eapi_file = os.path.join(currentPath, "eapi")
1684 eapi = codecs.open(_unicode_encode(eapi_file,
1685 encoding=_encodings['fs'], errors='strict'),
1686 mode='r', encoding=_encodings['content'], errors='replace'
1687 ).readline().strip()
1691 if not eapi_is_supported(eapi):
1692 raise portage.exception.ParseError(_(
1693 "Profile contains unsupported "
1694 "EAPI '%s': '%s'") % \
1695 (eapi, os.path.realpath(eapi_file),))
1696 if os.path.exists(parentsFile):
1697 parents = grabfile(parentsFile)
1699 raise portage.exception.ParseError(
1700 _("Empty parent file: '%s'") % parentsFile)
1701 for parentPath in parents:
1702 parentPath = normalize_path(os.path.join(
1703 currentPath, parentPath))
1704 if os.path.exists(parentPath):
1705 addProfile(parentPath)
1707 raise portage.exception.ParseError(
1708 _("Parent '%s' not found: '%s'") % \
1709 (parentPath, parentsFile))
1710 self.profiles.append(currentPath)
1712 addProfile(os.path.realpath(self.profile_path))
1713 except portage.exception.ParseError as e:
1714 writemsg(_("!!! Unable to parse profile: '%s'\n") % \
1715 self.profile_path, noiselevel=-1)
1716 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1719 if local_config and self.profiles:
1720 custom_prof = os.path.join(
1721 config_root, CUSTOM_PROFILE_PATH)
1722 if os.path.exists(custom_prof):
1723 self.user_profile_dir = custom_prof
1724 self.profiles.append(custom_prof)
1727 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1728 self.packages = stack_lists(self.packages_list, incremental=1)
1729 del self.packages_list
1730 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1733 self.prevmaskdict={}
1734 for x in self.packages:
1735 # Negative atoms are filtered by the above stack_lists() call.
1736 if not isinstance(x, dep.Atom):
1737 x = dep.Atom(x.lstrip('*'))
1738 self.prevmaskdict.setdefault(x.cp, []).append(x)
1740 self._pkeywords_list = []
1741 rawpkeywords = [grabdict_package(
1742 os.path.join(x, "package.keywords"), recursive=1) \
1743 for x in self.profiles]
1744 for pkeyworddict in rawpkeywords:
1746 for k, v in pkeyworddict.items():
1747 cpdict.setdefault(k.cp, {})[k] = v
1748 self._pkeywords_list.append(cpdict)
1750 # get profile-masked use flags -- INCREMENTAL Child over parent
1751 self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1752 recursive=1) for x in self.profiles]
1753 self.usemask = set(stack_lists(
1754 self.usemask_list, incremental=True))
1755 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1756 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1759 self.pusemask_list = []
1760 rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1761 recursive=1) for x in self.profiles]
1762 for pusemaskdict in rawpusemask:
1764 for k, v in pusemaskdict.items():
1765 cpdict.setdefault(k.cp, {})[k] = v
1766 self.pusemask_list.append(cpdict)
1769 self.pkgprofileuse = []
1770 rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1771 juststrings=True, recursive=1) for x in self.profiles]
1772 for rawpusedict in rawprofileuse:
1774 for k, v in rawpusedict.items():
1775 cpdict.setdefault(k.cp, {})[k] = v
1776 self.pkgprofileuse.append(cpdict)
1779 self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1780 recursive=1) for x in self.profiles]
1781 self.useforce = set(stack_lists(
1782 self.useforce_list, incremental=True))
1784 self.puseforce_list = []
1785 rawpuseforce = [grabdict_package(
1786 os.path.join(x, "package.use.force"), recursive=1) \
1787 for x in self.profiles]
1788 for rawpusefdict in rawpuseforce:
1790 for k, v in rawpusefdict.items():
1791 cpdict.setdefault(k.cp, {})[k] = v
1792 self.puseforce_list.append(cpdict)
1795 make_conf = getconfig(
1796 os.path.join(config_root, MAKE_CONF_FILE),
1797 tolerant=tolerant, allow_sourcing=True)
1798 if make_conf is None:
1801 # Allow ROOT setting to come from make.conf if it's not overridden
1802 # by the constructor argument (from the calling environment).
1803 if target_root is None and "ROOT" in make_conf:
1804 target_root = make_conf["ROOT"]
1805 if not target_root.strip():
1807 if target_root is None:
1810 target_root = normalize_path(os.path.abspath(
1811 target_root)).rstrip(os.path.sep) + os.path.sep
1813 portage.util.ensure_dirs(target_root)
1814 check_var_directory("ROOT", target_root)
1816 # The expand_map is used for variable substitution
1817 # in getconfig() calls, and the getconfig() calls
1818 # update expand_map with the value of each variable
1819 # assignment that occurs. Variable substitution occurs
1820 # in the following order, which corresponds to the
1821 # order of appearance in self.lookuplist:
1828 # Notably absent is "env", since we want to avoid any
1829 # interaction with the calling environment that might
1830 # lead to unexpected results.
1833 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1835 # env_d will be None if profile.env doesn't exist.
1837 self.configdict["env.d"].update(env_d)
1838 expand_map.update(env_d)
1840 # backupenv is used for calculating incremental variables.
1844 # Avoid potential UnicodeDecodeError exceptions later.
1845 env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
1846 for k, v in env.items())
1848 self.backupenv = env_unicode
1851 # Remove duplicate values so they don't override updated
1852 # profile.env values later (profile.env is reloaded in each
1853 # call to self.regenerate).
1854 for k, v in env_d.items():
1856 if self.backupenv[k] == v:
1857 del self.backupenv[k]
1862 self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1864 # make.globals should not be relative to config_root
1865 # because it only contains constants.
1866 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1867 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1872 if self.mygcfg is None:
1875 self.configlist.append(self.mygcfg)
1876 self.configdict["globals"]=self.configlist[-1]
1878 self.make_defaults_use = []
1881 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1882 expand=expand_map) for x in self.profiles]
1884 for cfg in mygcfg_dlists:
1886 self.make_defaults_use.append(cfg.get("USE", ""))
1888 self.make_defaults_use.append("")
1889 self.mygcfg = stack_dicts(mygcfg_dlists,
1890 incrementals=portage.const.INCREMENTALS)
1891 if self.mygcfg is None:
1893 self.configlist.append(self.mygcfg)
1894 self.configdict["defaults"]=self.configlist[-1]
1896 self.mygcfg = getconfig(
1897 os.path.join(config_root, MAKE_CONF_FILE),
1898 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1899 if self.mygcfg is None:
1902 # Don't allow the user to override certain variables in make.conf
1903 profile_only_variables = self.configdict["defaults"].get(
1904 "PROFILE_ONLY_VARIABLES", "").split()
1905 for k in profile_only_variables:
1906 self.mygcfg.pop(k, None)
1908 self.configlist.append(self.mygcfg)
1909 self.configdict["conf"]=self.configlist[-1]
1911 self.configlist.append(util.LazyItemsDict())
1912 self.configdict["pkg"]=self.configlist[-1]
1915 self.configlist.append({})
1916 self.configdict["auto"]=self.configlist[-1]
1918 self.configdict["backupenv"] = self.backupenv
1920 # Don't allow the user to override certain variables in the env
1921 for k in profile_only_variables:
1922 self.backupenv.pop(k, None)
1924 self.configlist.append(self.configdict["env"])
1926 # make lookuplist for loading package.*
1927 self.lookuplist=self.configlist[:]
1928 self.lookuplist.reverse()
1930 # Blacklist vars that could interfere with portage internals.
1931 for blacklisted in self._env_blacklist:
1932 for cfg in self.lookuplist:
1933 cfg.pop(blacklisted, None)
1934 self.backupenv.pop(blacklisted, None)
1935 del blacklisted, cfg
1937 self["PORTAGE_CONFIGROOT"] = config_root
1938 self.backup_changes("PORTAGE_CONFIGROOT")
1939 self["ROOT"] = target_root
1940 self.backup_changes("ROOT")
1943 self.pkeywordsdict = {}
1944 self._plicensedict = {}
1945 self._ppropertiesdict = {}
1946 self.punmaskdict = {}
1947 abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
1949 # locations for "categories" and "arch.list" files
1950 locations = [os.path.join(self["PORTDIR"], "profiles")]
1951 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1952 pmask_locations.extend(self.profiles)
1954 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1955 special cases are needed here."""
1956 overlay_profiles = []
1957 for ov in self["PORTDIR_OVERLAY"].split():
1958 ov = normalize_path(ov)
1959 profiles_dir = os.path.join(ov, "profiles")
1960 if os.path.isdir(profiles_dir):
1961 overlay_profiles.append(profiles_dir)
1962 locations += overlay_profiles
1964 pmask_locations.extend(overlay_profiles)
1967 locations.append(abs_user_config)
1968 pmask_locations.append(abs_user_config)
1969 pusedict = grabdict_package(
1970 os.path.join(abs_user_config, "package.use"), recursive=1)
1971 for k, v in pusedict.items():
1972 self.pusedict.setdefault(k.cp, {})[k] = v
1975 pkgdict = grabdict_package(
1976 os.path.join(abs_user_config, "package.keywords"),
1978 for k, v in pkgdict.items():
1979 # default to ~arch if no specific keyword is given
1982 if self.configdict["defaults"] and \
1983 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1984 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1987 for keyword in groups:
1988 if not keyword[0] in "~-":
1989 mykeywordlist.append("~"+keyword)
1991 self.pkeywordsdict.setdefault(k.cp, {})[k] = v
1994 licdict = grabdict_package(os.path.join(
1995 abs_user_config, "package.license"), recursive=1)
1996 for k, v in licdict.items():
1998 cp_dict = self._plicensedict.get(cp)
2001 self._plicensedict[cp] = cp_dict
2002 cp_dict[k] = self.expandLicenseTokens(v)
2005 propdict = grabdict_package(os.path.join(
2006 abs_user_config, "package.properties"), recursive=1)
2007 for k, v in propdict.items():
2009 cp_dict = self._ppropertiesdict.get(cp)
2012 self._ppropertiesdict[cp] = cp_dict
2015 self._local_repo_configs = {}
2016 self._local_repo_conf_path = \
2017 os.path.join(abs_user_config, 'repos.conf')
2019 from configparser import SafeConfigParser, ParsingError
2021 from ConfigParser import SafeConfigParser, ParsingError
2022 repo_conf_parser = SafeConfigParser()
2024 repo_conf_parser.readfp(
2026 _unicode_encode(self._local_repo_conf_path,
2027 encoding=_encodings['fs'], errors='strict'),
2028 mode='r', encoding=_encodings['content'], errors='replace')
2030 except EnvironmentError as e:
2031 if e.errno != errno.ENOENT:
2034 except ParsingError as e:
2035 portage.util.writemsg_level(
2036 _("!!! Error parsing '%s': %s\n") % \
2037 (self._local_repo_conf_path, e),
2038 level=logging.ERROR, noiselevel=-1)
2041 repo_defaults = repo_conf_parser.defaults()
2043 self._local_repo_configs['DEFAULT'] = \
2044 _local_repo_config('DEFAULT', repo_defaults)
2045 for repo_name in repo_conf_parser.sections():
2046 repo_opts = repo_defaults.copy()
2047 for opt_name in repo_conf_parser.options(repo_name):
2048 repo_opts[opt_name] = \
2049 repo_conf_parser.get(repo_name, opt_name)
2050 self._local_repo_configs[repo_name] = \
2051 _local_repo_config(repo_name, repo_opts)
2053 #getting categories from an external file now
2054 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
2055 self.categories = tuple(sorted(
2056 stack_lists(categories, incremental=1)))
2059 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
2060 archlist = stack_lists(archlist, incremental=1)
2061 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
2063 # package.mask and package.unmask
2066 for x in pmask_locations:
2067 pkgmasklines.append(grabfile_package(
2068 os.path.join(x, "package.mask"), recursive=1))
2069 pkgunmasklines.append(grabfile_package(
2070 os.path.join(x, "package.unmask"), recursive=1))
2071 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
2072 pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
2075 for x in pkgmasklines:
2076 self.pmaskdict.setdefault(x.cp, []).append(x)
2078 for x in pkgunmasklines:
2079 self.punmaskdict.setdefault(x.cp, []).append(x)
2081 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
2082 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
2083 has_invalid_data = False
2084 for x in range(len(pkgprovidedlines)-1, -1, -1):
2085 myline = pkgprovidedlines[x]
2086 if not isvalidatom("=" + myline):
2087 writemsg(_("Invalid package name in package.provided: %s\n") % \
2088 myline, noiselevel=-1)
2089 has_invalid_data = True
2090 del pkgprovidedlines[x]
2092 cpvr = catpkgsplit(pkgprovidedlines[x])
2093 if not cpvr or cpvr[0] == "null":
2094 writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
2096 has_invalid_data = True
2097 del pkgprovidedlines[x]
2099 if cpvr[0] == "virtual":
2100 writemsg(_("Virtual package in package.provided: %s\n") % \
2101 myline, noiselevel=-1)
2102 has_invalid_data = True
2103 del pkgprovidedlines[x]
2105 if has_invalid_data:
2106 writemsg(_("See portage(5) for correct package.provided usage.\n"),
2108 self.pprovideddict = {}
2109 for x in pkgprovidedlines:
2113 mycatpkg = cpv_getkey(x)
2114 if mycatpkg in self.pprovideddict:
2115 self.pprovideddict[mycatpkg].append(x)
2117 self.pprovideddict[mycatpkg]=[x]
2119 # parse licensegroups
2121 self._license_groups.update(
2122 grabdict(os.path.join(x, "license_groups")))
2124 # reasonable defaults; this is important as without USE_ORDER,
2125 # USE will always be "" (nothing set)!
2126 if "USE_ORDER" not in self:
2127 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
2129 self["PORTAGE_GID"] = str(portage_gid)
2130 self.backup_changes("PORTAGE_GID")
2132 if self.get("PORTAGE_DEPCACHEDIR", None):
2133 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
2134 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
2135 self.backup_changes("PORTAGE_DEPCACHEDIR")
2137 overlays = self.get("PORTDIR_OVERLAY","").split()
2141 ov = normalize_path(ov)
2142 if os.path.isdir(ov):
2145 writemsg(_("!!! Invalid PORTDIR_OVERLAY"
2146 " (not a dir): '%s'\n") % ov, noiselevel=-1)
2147 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
2148 self.backup_changes("PORTDIR_OVERLAY")
2150 if "CBUILD" not in self and "CHOST" in self:
2151 self["CBUILD"] = self["CHOST"]
2152 self.backup_changes("CBUILD")
2154 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
2155 self.backup_changes("PORTAGE_BIN_PATH")
2156 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
2157 self.backup_changes("PORTAGE_PYM_PATH")
2159 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
2161 self[var] = str(int(self.get(var, "0")))
2163 writemsg(_("!!! %s='%s' is not a valid integer. "
2164 "Falling back to '0'.\n") % (var, self[var]),
2167 self.backup_changes(var)
2169 # initialize self.features
2172 if not portage.process.sandbox_capable and \
2173 ("sandbox" in self.features or "usersandbox" in self.features):
2174 if self.profile_path is not None and \
2175 os.path.realpath(self.profile_path) == \
2176 os.path.realpath(os.path.join(config_root, PROFILE_PATH)):
2177 """ Don't show this warning when running repoman and the
2178 sandbox feature came from a profile that doesn't belong to
2180 writemsg(colorize("BAD", _("!!! Problem with sandbox"
2181 " binary. Disabling...\n\n")), noiselevel=-1)
2182 if "sandbox" in self.features:
2183 self.features.remove("sandbox")
2184 if "usersandbox" in self.features:
2185 self.features.remove("usersandbox")
2188 self.features.add('chflags')
2190 self["FEATURES"] = " ".join(sorted(self.features))
2191 self.backup_changes("FEATURES")
2192 global _glep_55_enabled, _validate_cache_for_unsupported_eapis
2193 if 'parse-eapi-ebuild-head' in self.features:
2194 _validate_cache_for_unsupported_eapis = False
2195 if 'parse-eapi-glep-55' in self.features:
2196 _validate_cache_for_unsupported_eapis = False
2197 _glep_55_enabled = True
2204 def _init_dirs(self):
2206 Create a few directories that are critical to portage operation
2208 if not os.access(self["ROOT"], os.W_OK):
2211 # gid, mode, mask, preserve_perms
2213 "tmp" : ( -1, 0o1777, 0, True),
2214 "var/tmp" : ( -1, 0o1777, 0, True),
2215 PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
2216 CACHE_PATH : (portage_gid, 0o755, 0o2, False)
2219 for mypath, (gid, mode, modemask, preserve_perms) \
2220 in dir_mode_map.items():
2221 mydir = os.path.join(self["ROOT"], mypath)
2222 if preserve_perms and os.path.isdir(mydir):
2223 # Only adjust permissions on some directories if
2224 # they don't exist yet. This gives freedom to the
2225 # user to adjust permissions to suit their taste.
2228 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
2229 except portage.exception.PortageException as e:
2230 writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
2232 writemsg("!!! %s\n" % str(e),
2235 def expandLicenseTokens(self, tokens):
2236 """ Take a token from ACCEPT_LICENSE or package.license and expand it
2237 if it's a group token (indicated by @) or just return it if it's not a
2238 group. If a group is negated then negate all group elements."""
2239 expanded_tokens = []
2241 expanded_tokens.extend(self._expandLicenseToken(x, None))
2242 return expanded_tokens
2244 def _expandLicenseToken(self, token, traversed_groups):
2247 if token.startswith("-"):
2249 license_name = token[1:]
2251 license_name = token
2252 if not license_name.startswith("@"):
2253 rValue.append(token)
2255 group_name = license_name[1:]
2256 if not traversed_groups:
2257 traversed_groups = set()
2258 license_group = self._license_groups.get(group_name)
2259 if group_name in traversed_groups:
2260 writemsg(_("Circular license group reference"
2261 " detected in '%s'\n") % group_name, noiselevel=-1)
2262 rValue.append("@"+group_name)
2264 traversed_groups.add(group_name)
2265 for l in license_group:
2266 if l.startswith("-"):
2267 writemsg(_("Skipping invalid element %s"
2268 " in license group '%s'\n") % (l, group_name),
2271 rValue.extend(self._expandLicenseToken(l, traversed_groups))
2273 writemsg(_("Undefined license group '%s'\n") % group_name,
2275 rValue.append("@"+group_name)
2277 rValue = ["-" + token for token in rValue]
2281 """Validate miscellaneous settings and display warnings if necessary.
2282 (This code was previously in the global scope of portage.py)"""
2284 groups = self["ACCEPT_KEYWORDS"].split()
2285 archlist = self.archlist()
2287 writemsg(_("--- 'profiles/arch.list' is empty or "
2288 "not available. Empty portage tree?\n"), noiselevel=1)
2290 for group in groups:
2291 if group not in archlist and \
2292 not (group.startswith("-") and group[1:] in archlist) and \
2293 group not in ("*", "~*", "**"):
2294 writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
2297 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
2299 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
2300 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
2301 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
2302 writemsg(_("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
2304 writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
2305 writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
2307 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
2309 if os.path.exists(abs_user_virtuals):
2310 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
2311 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
2312 writemsg("!!! this new location.\n\n")
2314 if "fakeroot" in self.features and \
2315 not portage.process.fakeroot_capable:
2316 writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
2317 "fakeroot binary is not installed.\n"), noiselevel=-1)
2319 def loadVirtuals(self,root):
2320 """Not currently used by portage."""
2321 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
2322 self.getvirtuals(root)
2324 def load_best_module(self,property_string):
2325 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
2328 mod = load_mod(best_mod)
2330 if best_mod.startswith("cache."):
2331 best_mod = "portage." + best_mod
2333 mod = load_mod(best_mod)
2346 def modifying(self):
2348 raise Exception(_("Configuration is locked."))
2350 def backup_changes(self,key=None):
2352 if key and key in self.configdict["env"]:
2353 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
2355 raise KeyError(_("No such key defined in environment: %s") % key)
2357 def reset(self,keeping_pkg=0,use_cache=1):
2359 Restore environment from self.backupenv, call self.regenerate()
2360 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
2361 @type keeping_pkg: Boolean
2362 @param use_cache: Should self.regenerate use the cache or not
2363 @type use_cache: Boolean
2367 self.configdict["env"].clear()
2368 self.configdict["env"].update(self.backupenv)
2370 self.modifiedkeys = []
2374 self.configdict["pkg"].clear()
2375 self.configdict["pkginternal"].clear()
2376 self.configdict["defaults"]["USE"] = \
2377 " ".join(self.make_defaults_use)
2378 self.usemask = set(stack_lists(
2379 self.usemask_list, incremental=True))
2380 self.useforce = set(stack_lists(
2381 self.useforce_list, incremental=True))
2382 self.regenerate(use_cache=use_cache)
2384 def load_infodir(self,infodir):
2385 warnings.warn("portage.config.load_infodir() is deprecated",
2389 class _lazy_vars(object):
2391 __slots__ = ('built_use', 'settings', 'values')
2393 def __init__(self, built_use, settings):
2394 self.built_use = built_use
2395 self.settings = settings
2398 def __getitem__(self, k):
2399 if self.values is None:
2400 self.values = self._init_values()
2401 return self.values[k]
2403 def _init_values(self):
2405 settings = self.settings
2406 use = self.built_use
2408 use = frozenset(settings['PORTAGE_USE'].split())
2409 values['ACCEPT_LICENSE'] = self._accept_license(use, settings)
2410 values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
2413 def _accept_license(self, use, settings):
2415 Generate a pruned version of ACCEPT_LICENSE, by intersection with
2416 LICENSE. This is required since otherwise ACCEPT_LICENSE might be
2417 too big (bigger than ARG_MAX), causing execve() calls to fail with
2418 E2BIG errors as in bug #262647.
2421 licenses = set(flatten(
2422 dep.use_reduce(dep.paren_reduce(
2423 settings['LICENSE']),
2425 except exception.InvalidDependString:
2427 licenses.discard('||')
2428 if settings._accept_license:
2429 acceptable_licenses = set()
2430 for x in settings._accept_license:
2432 acceptable_licenses.update(licenses)
2434 acceptable_licenses.clear()
2436 acceptable_licenses.discard(x[1:])
2438 acceptable_licenses.add(x)
2440 licenses = acceptable_licenses
2441 return ' '.join(sorted(licenses))
2443 def _restrict(self, use, settings):
2445 restrict = set(flatten(
2446 dep.use_reduce(dep.paren_reduce(
2447 settings['RESTRICT']),
2449 except exception.InvalidDependString:
2451 return ' '.join(sorted(restrict))
2453 class _lazy_use_expand(object):
2455 Lazily evaluate USE_EXPAND variables since they are only needed when
2456 an ebuild shell is spawned. Variables values are made consistent with
2457 the previously calculated USE settings.
2460 def __init__(self, use, usemask, iuse_implicit,
2461 use_expand_split, use_expand_dict):
2463 self._usemask = usemask
2464 self._iuse_implicit = iuse_implicit
2465 self._use_expand_split = use_expand_split
2466 self._use_expand_dict = use_expand_dict
2468 def __getitem__(self, key):
2469 prefix = key.lower() + '_'
2470 prefix_len = len(prefix)
2471 expand_flags = set( x[prefix_len:] for x in self._use \
2472 if x[:prefix_len] == prefix )
2473 var_split = self._use_expand_dict.get(key, '').split()
2474 # Preserve the order of var_split because it can matter for things
2476 var_split = [ x for x in var_split if x in expand_flags ]
2477 var_split.extend(expand_flags.difference(var_split))
2478 has_wildcard = '*' in expand_flags
2480 var_split = [ x for x in var_split if x != "*" ]
2482 for x in self._iuse_implicit:
2483 if x[:prefix_len] == prefix:
2484 has_iuse.add(x[prefix_len:])
2486 # * means to enable everything in IUSE that's not masked
2488 usemask = self._usemask
2489 for suffix in has_iuse:
2491 if x not in usemask:
2492 if suffix not in expand_flags:
2493 var_split.append(suffix)
2495 # If there is a wildcard and no matching flags in IUSE then
2496 # LINGUAS should be unset so that all .mo files are
2499 # Make the flags unique and filter them according to IUSE.
2500 # Also, continue to preserve order for things like LINGUAS
2501 # and filter any duplicates that variable may contain.
2502 filtered_var_split = []
2503 remaining = has_iuse.intersection(var_split)
2507 filtered_var_split.append(x)
2508 var_split = filtered_var_split
2511 value = ' '.join(var_split)
2513 # Don't export empty USE_EXPAND vars unless the user config
2514 # exports them as empty. This is required for vars such as
2515 # LINGUAS, where unset and empty have different meanings.
2517 # ebuild.sh will see this and unset the variable so
2518 # that things like LINGUAS work properly
2524 # It's not in IUSE, so just allow the variable content
2525 # to pass through if it is defined somewhere. This
2526 # allows packages that support LINGUAS but don't
2527 # declare it in IUSE to use the variable outside of the
2528 # USE_EXPAND context.
2533 def setcpv(self, mycpv, use_cache=1, mydb=None):
2535 Load a particular CPV into the config, this lets us see the
2536 Default USE flags for a particular ebuild as well as the USE
2537 flags from package.use.
2539 @param mycpv: A cpv to load
2541 @param use_cache: Enables caching
2542 @type use_cache: Boolean
2543 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
2544 @type mydb: dbapi or derivative.
2552 if not isinstance(mycpv, basestring):
2556 args_hash = (mycpv, id(pkg))
2558 built_use = pkg.use.enabled
2560 args_hash = (mycpv, id(mydb))
2562 if args_hash == self._setcpv_args_hash:
2564 self._setcpv_args_hash = args_hash
2568 cat, pf = catsplit(mycpv)
2569 cp = dep_getkey(mycpv)
2570 cpv_slot = self.mycpv
2573 pkg_configdict = self.configdict["pkg"]
2574 previous_iuse = pkg_configdict.get("IUSE")
2576 aux_keys = self._setcpv_aux_keys
2578 # Discard any existing metadata from the previous package, but
2579 # preserve things like USE_EXPAND values and PORTAGE_USE which
2582 pkg_configdict.pop(k, None)
2584 pkg_configdict["CATEGORY"] = cat
2585 pkg_configdict["PF"] = pf
2587 if not hasattr(mydb, "aux_get"):
2590 # Make these lazy, since __getitem__ triggers
2591 # evaluation of USE conditionals which can't
2592 # occur until PORTAGE_USE is calculated below.
2593 pkg_configdict.addLazySingleton(k,
2594 mydb.__getitem__, k)
2596 for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
2597 pkg_configdict[k] = v
2598 repository = pkg_configdict.pop("repository", None)
2599 if repository is not None:
2600 pkg_configdict["PORTAGE_REPO_NAME"] = repository
2601 slot = pkg_configdict["SLOT"]
2602 iuse = pkg_configdict["IUSE"]
2604 cpv_slot = "%s:%s" % (self.mycpv, slot)
2608 for x in iuse.split():
2609 if x.startswith("+"):
2610 pkginternaluse.append(x[1:])
2611 elif x.startswith("-"):
2612 pkginternaluse.append(x)
2613 pkginternaluse = " ".join(pkginternaluse)
2614 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
2615 self.configdict["pkginternal"]["USE"] = pkginternaluse
2620 for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
2621 cpdict = pkgprofileuse_dict.get(cp)
2625 bestmatch = best_match_to_list(cpv_slot, keys)
2627 keys.remove(bestmatch)
2628 defaults.insert(pos, cpdict[bestmatch])
2632 if self.make_defaults_use[i]:
2633 defaults.insert(pos, self.make_defaults_use[i])
2635 defaults = " ".join(defaults)
2636 if defaults != self.configdict["defaults"].get("USE",""):
2637 self.configdict["defaults"]["USE"] = defaults
2640 useforce = self._getUseForce(cpv_slot)
2641 if useforce != self.useforce:
2642 self.useforce = useforce
2645 usemask = self._getUseMask(cpv_slot)
2646 if usemask != self.usemask:
2647 self.usemask = usemask
2651 cpdict = self.pusedict.get(cp)
2655 self.pusekey = best_match_to_list(cpv_slot, keys)
2657 keys.remove(self.pusekey)
2658 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2662 if oldpuse != self.puse:
2664 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2665 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2668 self.reset(keeping_pkg=1,use_cache=use_cache)
2670 # Ensure that "pkg" values are always preferred over "env" values.
2671 # This must occur _after_ the above reset() call, since reset()
2672 # copies values from self.backupenv.
2673 env_configdict = self.configdict['env']
2674 for k in pkg_configdict:
2676 env_configdict.pop(k, None)
2678 lazy_vars = self._lazy_vars(built_use, self)
2679 env_configdict.addLazySingleton('ACCEPT_LICENSE',
2680 lazy_vars.__getitem__, 'ACCEPT_LICENSE')
2681 env_configdict.addLazySingleton('PORTAGE_RESTRICT',
2682 lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
2684 # If reset() has not been called, it's safe to return
2685 # early if IUSE has not changed.
2686 if not has_changed and previous_iuse == iuse:
2689 # Filter out USE flags that aren't part of IUSE. This has to
2690 # be done for every setcpv() call since practically every
2691 # package has different IUSE.
2692 use = set(self["USE"].split())
2693 iuse_implicit = self._get_implicit_iuse()
2694 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2696 # PORTAGE_IUSE is not always needed so it's lazily evaluated.
2697 self.configdict["pkg"].addLazySingleton(
2698 "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
2700 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2701 if ebuild_force_test and \
2702 not hasattr(self, "_ebuild_force_test_msg_shown"):
2703 self._ebuild_force_test_msg_shown = True
2704 writemsg(_("Forcing test.\n"), noiselevel=-1)
2705 if "test" in self.features and "test" in iuse_implicit:
2706 if "test" in self.usemask and not ebuild_force_test:
2707 # "test" is in IUSE and USE=test is masked, so execution
2708 # of src_test() probably is not reliable. Therefore,
2709 # temporarily disable FEATURES=test just for this package.
2710 self["FEATURES"] = " ".join(x for x in self.features \
2715 if ebuild_force_test:
2716 self.usemask.discard("test")
2718 # Allow _* flags from USE_EXPAND wildcards to pass through here.
2719 use.difference_update([x for x in use \
2720 if x not in iuse_implicit and x[-2:] != '_*'])
2722 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2723 # that they are consistent. For optimal performance, use slice
2724 # comparison instead of startswith().
2725 use_expand_split = set(x.lower() for \
2726 x in self.get('USE_EXPAND', '').split())
2727 lazy_use_expand = self._lazy_use_expand(use, self.usemask,
2728 iuse_implicit, use_expand_split, self._use_expand_dict)
2730 use_expand_iuses = {}
2731 for x in iuse_implicit:
2732 x_split = x.split('_')
2733 if len(x_split) == 1:
2735 for i in range(len(x_split) - 1):
2736 k = '_'.join(x_split[:i+1])
2737 if k in use_expand_split:
2738 v = use_expand_iuses.get(k)
2741 use_expand_iuses[k] = v
2745 # If it's not in IUSE, variable content is allowed
2746 # to pass through if it is defined somewhere. This
2747 # allows packages that support LINGUAS but don't
2748 # declare it in IUSE to use the variable outside of the
2749 # USE_EXPAND context.
2750 for k, use_expand_iuse in use_expand_iuses.items():
2752 use.update( x for x in use_expand_iuse if x not in usemask )
2754 self.configdict['env'].addLazySingleton(k,
2755 lazy_use_expand.__getitem__, k)
2757 # Filtered for the ebuild environment. Store this in a separate
2758 # attribute since we still want to be able to see global USE
2759 # settings for things like emerge --info.
2761 self.configdict["pkg"]["PORTAGE_USE"] = \
2762 " ".join(sorted(x for x in use if x[-2:] != '_*'))
2764 def _get_implicit_iuse(self):
2766 Some flags are considered to
2767 be implicit members of IUSE:
2768 * Flags derived from ARCH
2769 * Flags derived from USE_EXPAND_HIDDEN variables
2770 * Masked flags, such as those from {,package}use.mask
2771 * Forced flags, such as those from {,package}use.force
2772 * build and bootstrap flags used by bootstrap.sh
2774 iuse_implicit = set()
2775 # Flags derived from ARCH.
2776 arch = self.configdict["defaults"].get("ARCH")
2778 iuse_implicit.add(arch)
2779 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2781 # Flags derived from USE_EXPAND_HIDDEN variables
2782 # such as ELIBC, KERNEL, and USERLAND.
2783 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2784 for x in use_expand_hidden:
2785 iuse_implicit.add(x.lower() + "_.*")
2787 # Flags that have been masked or forced.
2788 iuse_implicit.update(self.usemask)
2789 iuse_implicit.update(self.useforce)
2791 # build and bootstrap flags used by bootstrap.sh
2792 iuse_implicit.add("build")
2793 iuse_implicit.add("bootstrap")
2794 return iuse_implicit
2796 def _getUseMask(self, pkg):
2797 cp = getattr(pkg, "cp", None)
2799 cp = dep_getkey(pkg)
2802 for i, pusemask_dict in enumerate(self.pusemask_list):
2803 cpdict = pusemask_dict.get(cp)
2807 best_match = best_match_to_list(pkg, keys)
2809 keys.remove(best_match)
2810 usemask.insert(pos, cpdict[best_match])
2814 if self.usemask_list[i]:
2815 usemask.insert(pos, self.usemask_list[i])
2817 return set(stack_lists(usemask, incremental=True))
2819 def _getUseForce(self, pkg):
2820 cp = getattr(pkg, "cp", None)
2822 cp = dep_getkey(pkg)
2825 for i, puseforce_dict in enumerate(self.puseforce_list):
2826 cpdict = puseforce_dict.get(cp)
2830 best_match = best_match_to_list(pkg, keys)
2832 keys.remove(best_match)
2833 useforce.insert(pos, cpdict[best_match])
2837 if self.useforce_list[i]:
2838 useforce.insert(pos, self.useforce_list[i])
2840 return set(stack_lists(useforce, incremental=True))
2842 def _getMaskAtom(self, cpv, metadata):
2844 Take a package and return a matching package.mask atom, or None if no
2845 such atom exists or it has been cancelled by package.unmask. PROVIDE
2846 is not checked, so atoms will not be found for old-style virtuals.
2848 @param cpv: The package name
2850 @param metadata: A dictionary of raw package metadata
2851 @type metadata: dict
2853 @return: An matching atom string or None if one is not found.
2856 cp = cpv_getkey(cpv)
2857 mask_atoms = self.pmaskdict.get(cp)
2859 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2860 unmask_atoms = self.punmaskdict.get(cp)
2861 for x in mask_atoms:
2862 if not match_from_list(x, pkg_list):
2865 for y in unmask_atoms:
2866 if match_from_list(y, pkg_list):
2871 def _getProfileMaskAtom(self, cpv, metadata):
2873 Take a package and return a matching profile atom, or None if no
2874 such atom exists. Note that a profile atom may or may not have a "*"
2875 prefix. PROVIDE is not checked, so atoms will not be found for
2878 @param cpv: The package name
2880 @param metadata: A dictionary of raw package metadata
2881 @type metadata: dict
2883 @return: An matching profile atom string or None if one is not found.
2886 cp = cpv_getkey(cpv)
2887 profile_atoms = self.prevmaskdict.get(cp)
2889 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2890 for x in profile_atoms:
2891 if match_from_list(x, pkg_list):
2896 def _getKeywords(self, cpv, metadata):
2897 cp = cpv_getkey(cpv)
2898 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2899 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2901 for pkeywords_dict in self._pkeywords_list:
2902 cpdict = pkeywords_dict.get(cp)
2906 best_match = best_match_to_list(pkg, keys)
2908 keys.remove(best_match)
2909 keywords.insert(pos, cpdict[best_match])
2913 return stack_lists(keywords, incremental=True)
2915 def _getMissingKeywords(self, cpv, metadata):
2917 Take a package and return a list of any KEYWORDS that the user may
2918 may need to accept for the given package. If the KEYWORDS are empty
2919 and the the ** keyword has not been accepted, the returned list will
2920 contain ** alone (in order to distiguish from the case of "none
2923 @param cpv: The package name (for package.keywords support)
2925 @param metadata: A dictionary of raw package metadata
2926 @type metadata: dict
2928 @return: A list of KEYWORDS that have not been accepted.
2931 # Hack: Need to check the env directly here as otherwise stacking
2932 # doesn't work properly as negative values are lost in the config
2933 # object (bug #139600)
2934 egroups = self.configdict["backupenv"].get(
2935 "ACCEPT_KEYWORDS", "").split()
2936 mygroups = self._getKeywords(cpv, metadata)
2937 # Repoman may modify this attribute as necessary.
2938 pgroups = self["ACCEPT_KEYWORDS"].split()
2940 cp = cpv_getkey(cpv)
2941 pkgdict = self.pkeywordsdict.get(cp)
2944 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2945 for atom, pkgkeywords in pkgdict.items():
2946 if match_from_list(atom, cpv_slot_list):
2948 pgroups.extend(pkgkeywords)
2949 if matches or egroups:
2950 pgroups.extend(egroups)
2953 if x.startswith("-"):
2957 inc_pgroups.discard(x[1:])
2960 pgroups = inc_pgroups
2965 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2966 writemsg(_("--- WARNING: Package '%(cpv)s' uses"
2967 " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, noiselevel=-1)
2974 elif gp.startswith("~"):
2976 elif not gp.startswith("-"):
2979 ((hastesting and "~*" in pgroups) or \
2980 (hasstable and "*" in pgroups) or "**" in pgroups):
2986 # If KEYWORDS is empty then we still have to return something
2987 # in order to distiguish from the case of "none missing".
2988 mygroups.append("**")
2992 def _getMissingLicenses(self, cpv, metadata):
2994 Take a LICENSE string and return a list any licenses that the user may
2995 may need to accept for the given package. The returned list will not
2996 contain any licenses that have already been accepted. This method
2997 can throw an InvalidDependString exception.
2999 @param cpv: The package name (for package.license support)
3001 @param metadata: A dictionary of raw package metadata
3002 @type metadata: dict
3004 @return: A list of licenses that have not been accepted.
3006 if not self._accept_license:
3008 accept_license = self._accept_license
3009 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
3011 accept_license = list(self._accept_license)
3012 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3013 for atom in match_to_list(cpv_slot, list(cpdict)):
3014 accept_license.extend(cpdict[atom])
3016 licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
3017 metadata["LICENSE"]), matchall=1)))
3018 licenses.discard('||')
3020 acceptable_licenses = set()
3021 for x in accept_license:
3023 acceptable_licenses.update(licenses)
3025 acceptable_licenses.clear()
3027 acceptable_licenses.discard(x[1:])
3029 acceptable_licenses.add(x)
3031 license_str = metadata["LICENSE"]
3032 if "?" in license_str:
3033 use = metadata["USE"].split()
3037 license_struct = portage.dep.use_reduce(
3038 portage.dep.paren_reduce(license_str), uselist=use)
3039 license_struct = portage.dep.dep_opconvert(license_struct)
3040 return self._getMaskedLicenses(license_struct, acceptable_licenses)
3042 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
3043 if not license_struct:
3045 if license_struct[0] == "||":
3047 for element in license_struct[1:]:
3048 if isinstance(element, list):
3050 ret.append(self._getMaskedLicenses(
3051 element, acceptable_licenses))
3055 if element in acceptable_licenses:
3058 # Return all masked licenses, since we don't know which combination
3059 # (if any) the user will decide to unmask.
3063 for element in license_struct:
3064 if isinstance(element, list):
3066 ret.extend(self._getMaskedLicenses(element,
3067 acceptable_licenses))
3069 if element not in acceptable_licenses:
3073 def _getMissingProperties(self, cpv, metadata):
3075 Take a PROPERTIES string and return a list of any properties the user may
3076 may need to accept for the given package. The returned list will not
3077 contain any properties that have already been accepted. This method
3078 can throw an InvalidDependString exception.
3080 @param cpv: The package name (for package.properties support)
3082 @param metadata: A dictionary of raw package metadata
3083 @type metadata: dict
3085 @return: A list of properties that have not been accepted.
3087 if not self._accept_properties:
3089 accept_properties = self._accept_properties
3090 cpdict = self._ppropertiesdict.get(dep_getkey(cpv), None)
3092 accept_properties = list(self._accept_properties)
3093 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3094 for atom in match_to_list(cpv_slot, list(cpdict)):
3095 accept_properties.extend(cpdict[atom])
3097 properties = set(flatten(dep.use_reduce(dep.paren_reduce(
3098 metadata["PROPERTIES"]), matchall=1)))
3099 properties.discard('||')
3101 acceptable_properties = set()
3102 for x in accept_properties:
3104 acceptable_properties.update(properties)
3106 acceptable_properties.clear()
3108 acceptable_properties.discard(x[1:])
3110 acceptable_properties.add(x)
3112 properties_str = metadata["PROPERTIES"]
3113 if "?" in properties_str:
3114 use = metadata["USE"].split()
3118 properties_struct = portage.dep.use_reduce(
3119 portage.dep.paren_reduce(properties_str), uselist=use)
3120 properties_struct = portage.dep.dep_opconvert(properties_struct)
3121 return self._getMaskedProperties(properties_struct, acceptable_properties)
3123 def _getMaskedProperties(self, properties_struct, acceptable_properties):
3124 if not properties_struct:
3126 if properties_struct[0] == "||":
3128 for element in properties_struct[1:]:
3129 if isinstance(element, list):
3131 ret.append(self._getMaskedProperties(
3132 element, acceptable_properties))
3136 if element in acceptable_properties:
3139 # Return all masked properties, since we don't know which combination
3140 # (if any) the user will decide to unmask
3144 for element in properties_struct:
3145 if isinstance(element, list):
3147 ret.extend(self._getMaskedProperties(element,
3148 acceptable_properties))
3150 if element not in acceptable_properties:
3154 def _accept_chost(self, cpv, metadata):
3156 @return True if pkg CHOST is accepted, False otherwise.
3158 if self._accept_chost_re is None:
3159 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
3160 if not accept_chost:
3161 chost = self.get("CHOST")
3163 accept_chost.append(chost)
3164 if not accept_chost:
3165 self._accept_chost_re = re.compile(".*")
3166 elif len(accept_chost) == 1:
3168 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
3169 except re.error as e:
3170 writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3171 (accept_chost[0], e), noiselevel=-1)
3172 self._accept_chost_re = re.compile("^$")
3175 self._accept_chost_re = re.compile(
3176 r'^(%s)$' % "|".join(accept_chost))
3177 except re.error as e:
3178 writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3179 (" ".join(accept_chost), e), noiselevel=-1)
3180 self._accept_chost_re = re.compile("^$")
3182 return self._accept_chost_re.match(
3183 metadata.get('CHOST', '')) is not None
3185 def setinst(self,mycpv,mydbapi):
3186 """This updates the preferences for old-style virtuals,
3187 affecting the behavior of dep_expand() and dep_check()
3188 calls. It can change dbapi.match() behavior since that
3189 calls dep_expand(). However, dbapi instances have
3190 internal match caches that are not invalidated when
3191 preferences are updated here. This can potentially
3192 lead to some inconsistency (relevant to bug #1343)."""
3194 if len(self.virtuals) == 0:
3196 # Grab the virtuals this package provides and add them into the tree virtuals.
3197 if not hasattr(mydbapi, "aux_get"):
3198 provides = mydbapi["PROVIDE"]
3200 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
3203 if isinstance(mydbapi, portdbapi):
3204 self.setcpv(mycpv, mydb=mydbapi)
3205 myuse = self["PORTAGE_USE"]
3206 elif not hasattr(mydbapi, "aux_get"):
3207 myuse = mydbapi["USE"]
3209 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
3210 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
3213 cp = dep.Atom(cpv_getkey(mycpv))
3215 virt = dep_getkey(virt)
3216 providers = self.virtuals.get(virt)
3217 if providers and cp in providers:
3219 providers = self._depgraphVirtuals.get(virt)
3220 if providers is None:
3222 self._depgraphVirtuals[virt] = providers
3223 if cp not in providers:
3224 providers.append(cp)
3228 self.virtuals = self.__getvirtuals_compile()
3231 """Reload things like /etc/profile.env that can change during runtime."""
3232 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
3233 self.configdict["env.d"].clear()
3234 env_d = getconfig(env_d_filename, expand=False)
3236 # env_d will be None if profile.env doesn't exist.
3237 self.configdict["env.d"].update(env_d)
3239 def regenerate(self,useonly=0,use_cache=1):
3242 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
3243 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
3244 variables. This also updates the env.d configdict; useful in case an ebuild
3245 changes the environment.
3247 If FEATURES has already stacked, it is not stacked twice.
3249 @param useonly: Only regenerate USE flags (not any other incrementals)
3250 @type useonly: Boolean
3251 @param use_cache: Enable Caching (only for autouse)
3252 @type use_cache: Boolean
3257 if self.already_in_regenerate:
3258 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
3259 writemsg("!!! Looping in regenerate.\n",1)
3262 self.already_in_regenerate = 1
3265 myincrementals=["USE"]
3267 myincrementals = self.incrementals
3268 myincrementals = set(myincrementals)
3269 # If self.features exists, it has already been stacked and may have
3270 # been mutated, so don't stack it again or else any mutations will be
3272 if "FEATURES" in myincrementals and hasattr(self, "features"):
3273 myincrementals.remove("FEATURES")
3275 if "USE" in myincrementals:
3276 # Process USE last because it depends on USE_EXPAND which is also
3278 myincrementals.remove("USE")
3280 mydbs = self.configlist[:-1]
3281 mydbs.append(self.backupenv)
3283 # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
3284 # used to match all licenses without every having to explicitly expand
3285 # it to all licenses.
3286 if self.local_config:
3289 mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
3290 accept_license_str = ' '.join(mysplit)
3291 if accept_license_str:
3292 self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
3293 if accept_license_str != self._accept_license_str:
3294 self._accept_license_str = accept_license_str
3295 self._accept_license = tuple(self.expandLicenseTokens(mysplit))
3297 # repoman will accept any license
3298 self._accept_license = ()
3300 # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
3301 if self.local_config:
3304 mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
3306 self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
3307 if tuple(mysplit) != self._accept_properties:
3308 self._accept_properties = tuple(mysplit)
3310 # repoman will accept any property
3311 self._accept_properties = ()
3313 for mykey in myincrementals:
3317 if mykey not in curdb:
3319 #variables are already expanded
3320 mysplit = curdb[mykey].split()
3324 # "-*" is a special "minus" var that means "unset all settings".
3325 # so USE="-* gnome" will have *just* gnome enabled.
3330 # Not legal. People assume too much. Complain.
3331 writemsg(colorize("BAD",
3332 _("USE flags should not start with a '+': %s") % x) \
3333 + "\n", noiselevel=-1)
3339 if (x[1:] in myflags):
3341 del myflags[myflags.index(x[1:])]
3344 # We got here, so add it now.
3345 if x not in myflags:
3349 #store setting in last element of configlist, the original environment:
3350 if myflags or mykey in self:
3351 self.configlist[-1][mykey] = " ".join(myflags)
3354 # Do the USE calculation last because it depends on USE_EXPAND.
3355 if "auto" in self["USE_ORDER"].split(":"):
3356 self.configdict["auto"]["USE"] = autouse(
3357 vartree(root=self["ROOT"], categories=self.categories,
3359 use_cache=use_cache, mysettings=self)
3361 self.configdict["auto"]["USE"] = ""
3363 use_expand = self.get("USE_EXPAND", "").split()
3364 use_expand_dict = self._use_expand_dict
3365 use_expand_dict.clear()
3366 for k in use_expand:
3369 use_expand_dict[k] = v
3372 for x in self["USE_ORDER"].split(":"):
3373 if x in self.configdict:
3374 self.uvlist.append(self.configdict[x])
3375 self.uvlist.reverse()
3377 # For optimal performance, use slice
3378 # comparison instead of startswith().
3380 for curdb in self.uvlist:
3381 cur_use_expand = [x for x in use_expand if x in curdb]
3382 mysplit = curdb.get("USE", "").split()
3383 if not mysplit and not cur_use_expand:
3391 writemsg(colorize("BAD", _("USE flags should not start "
3392 "with a '+': %s\n") % x), noiselevel=-1)
3398 myflags.discard(x[1:])
3403 for var in cur_use_expand:
3404 var_lower = var.lower()
3405 is_not_incremental = var not in myincrementals
3406 if is_not_incremental:
3407 prefix = var_lower + "_"
3408 prefix_len = len(prefix)
3409 for x in list(myflags):
3410 if x[:prefix_len] == prefix:
3412 for x in curdb[var].split():
3414 if is_not_incremental:
3415 writemsg(colorize("BAD", _("Invalid '+' "
3416 "operator in non-incremental variable "
3417 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3420 writemsg(colorize("BAD", _("Invalid '+' "
3421 "operator in incremental variable "
3422 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3425 if is_not_incremental:
3426 writemsg(colorize("BAD", _("Invalid '-' "
3427 "operator in non-incremental variable "
3428 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3430 myflags.discard(var_lower + "_" + x[1:])
3432 myflags.add(var_lower + "_" + x)
3434 if hasattr(self, "features"):
3435 self.features.clear()
3437 self.features = set()
3438 self.features.update(self.configlist[-1].get('FEATURES', '').split())
3439 self['FEATURES'] = ' '.join(sorted(self.features))
3441 myflags.update(self.useforce)
3442 arch = self.configdict["defaults"].get("ARCH")
3446 myflags.difference_update(self.usemask)
3447 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
3449 self.already_in_regenerate = 0
3451 def get_virts_p(self, myroot=None):
3454 virts = self.getvirtuals()
3457 vkeysplit = x.split("/")
3458 if vkeysplit[1] not in self.virts_p:
3459 self.virts_p[vkeysplit[1]] = virts[x]
3462 def getvirtuals(self, myroot=None):
3463 """myroot is now ignored because, due to caching, it has always been
3464 broken for all but the first call."""
3465 myroot = self["ROOT"]
3467 return self.virtuals
3470 for x in self.profiles:
3471 virtuals_file = os.path.join(x, "virtuals")
3472 virtuals_dict = grabdict(virtuals_file)
3474 for k, v in virtuals_dict.items():
3476 virt_atom = portage.dep.Atom(k)
3477 except portage.exception.InvalidAtom:
3480 if virt_atom.blocker or \
3481 str(virt_atom) != str(virt_atom.cp):
3483 if virt_atom is None:
3484 writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
3485 (virtuals_file, k), noiselevel=-1)
3491 # allow incrementals
3494 atom = portage.dep.Atom(atom)
3495 except portage.exception.InvalidAtom:
3501 writemsg(_("--- Invalid atom in %s: %s\n") % \
3502 (virtuals_file, myatom), noiselevel=-1)
3504 if atom_orig == str(atom):
3505 # normal atom, so return as Atom instance
3506 providers.append(atom)
3508 # atom has special prefix, so return as string
3509 providers.append(atom_orig)
3511 atoms_dict[virt_atom] = providers
3513 virtuals_list.append(atoms_dict)
3515 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
3518 for virt in self.dirVirtuals:
3519 # Preference for virtuals decreases from left to right.
3520 self.dirVirtuals[virt].reverse()
3522 # Repoman does not use user or tree virtuals.
3523 if self.local_config and not self.treeVirtuals:
3524 temp_vartree = vartree(myroot, None,
3525 categories=self.categories, settings=self)
3526 self._populate_treeVirtuals(temp_vartree)
3528 self.virtuals = self.__getvirtuals_compile()
3529 return self.virtuals
3531 def _populate_treeVirtuals(self, vartree):
3532 """Reduce the provides into a list by CP."""
3533 for provide, cpv_list in vartree.get_all_provides().items():
3535 provide = dep.Atom(provide)
3536 except exception.InvalidAtom:
3538 self.treeVirtuals[provide.cp] = \
3539 [dep.Atom(cpv_getkey(cpv)) for cpv in cpv_list]
3541 def __getvirtuals_compile(self):
3542 """Stack installed and profile virtuals. Preference for virtuals
3543 decreases from left to right.
3544 Order of preference:
3545 1. installed and in profile
3550 # Virtuals by profile+tree preferences.
3553 for virt, installed_list in self.treeVirtuals.items():
3554 profile_list = self.dirVirtuals.get(virt, None)
3555 if not profile_list:
3557 for cp in installed_list:
3558 if cp in profile_list:
3559 ptVirtuals.setdefault(virt, [])
3560 ptVirtuals[virt].append(cp)
3562 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
3563 self.dirVirtuals, self._depgraphVirtuals])
3566 def __delitem__(self,mykey):
3568 for x in self.lookuplist:
3573 def __getitem__(self,mykey):
3574 for d in self.lookuplist:
3577 return '' # for backward compat, don't raise KeyError
3579 def get(self, k, x=None):
3580 for d in self.lookuplist:
3585 def pop(self, key, *args):
3588 "pop expected at most 2 arguments, got " + \
3589 repr(1 + len(args)))
3591 for d in reversed(self.lookuplist):
3599 def has_key(self,mykey):
3600 warnings.warn("portage.config.has_key() is deprecated, "
3601 "use the in operator instead",
3603 return mykey in self
3605 def __contains__(self, mykey):
3606 """Called to implement membership test operators (in and not in)."""
3607 for d in self.lookuplist:
3612 def setdefault(self, k, x=None):
3625 for d in self.lookuplist:
3632 def iteritems(self):
3637 return list(self.iteritems())
3639 def __setitem__(self,mykey,myvalue):
3640 "set a value; will be thrown away at reset() time"
3641 if not isinstance(myvalue, basestring):
3642 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
3644 # Avoid potential UnicodeDecodeError exceptions later.
3645 mykey = _unicode_decode(mykey)
3646 myvalue = _unicode_decode(myvalue)
3649 self.modifiedkeys.append(mykey)
3650 self.configdict["env"][mykey]=myvalue
3653 "return our locally-maintained environment"
3655 environ_filter = self._environ_filter
3657 phase = self.get('EBUILD_PHASE')
3658 filter_calling_env = False
3659 if phase not in ('clean', 'cleanrm', 'depend'):
3660 temp_dir = self.get('T')
3661 if temp_dir is not None and \
3662 os.path.exists(os.path.join(temp_dir, 'environment')):
3663 filter_calling_env = True
3665 environ_whitelist = self._environ_whitelist
3666 env_d = self.configdict["env.d"]
3668 if x in environ_filter:
3671 if not isinstance(myvalue, basestring):
3672 writemsg(_("!!! Non-string value in config: %s=%s\n") % \
3673 (x, myvalue), noiselevel=-1)
3675 if filter_calling_env and \
3676 x not in environ_whitelist and \
3677 not self._environ_whitelist_re.match(x):
3678 # Do not allow anything to leak into the ebuild
3679 # environment unless it is explicitly whitelisted.
3680 # This ensures that variables unset by the ebuild
3684 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
3685 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
3686 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
3688 if filter_calling_env:
3692 whitelist.append("RPMDIR")
3698 # Filtered by IUSE and implicit IUSE.
3699 mydict["USE"] = self.get("PORTAGE_USE", "")
3701 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
3702 # so we have to back it up and restore it.
3703 rootpath = mydict.get("ROOTPATH")
3705 mydict["PORTAGE_ROOTPATH"] = rootpath
3709 def thirdpartymirrors(self):
3710 if getattr(self, "_thirdpartymirrors", None) is None:
3711 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3712 for x in self["PORTDIR_OVERLAY"].split():
3713 profileroots.insert(0, os.path.join(x, "profiles"))
3714 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3715 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3716 return self._thirdpartymirrors
3719 return flatten([[myarch, "~" + myarch] \
3720 for myarch in self["PORTAGE_ARCHLIST"].split()])
3722 def selinux_enabled(self):
3723 if getattr(self, "_selinux_enabled", None) is None:
3724 self._selinux_enabled = 0
3725 if "selinux" in self["USE"].split():
3727 if selinux.is_selinux_enabled() == 1:
3728 self._selinux_enabled = 1
3730 self._selinux_enabled = 0
3732 writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
3734 self._selinux_enabled = 0
3736 return self._selinux_enabled
3738 if sys.hexversion >= 0x3000000:
3742 # In some cases, openpty can be slow when it fails. Therefore,
3743 # stop trying to use it after the first failure.
3744 _disable_openpty = False
3746 if sys.hexversion >= 0x3000000:
3747 # This is a temporary workaround for http://bugs.python.org/issue5380.
3748 _disable_openpty = True
3750 def _create_pty_or_pipe(copy_term_size=None):
3752 Try to create a pty and if then fails then create a normal
3755 @param copy_term_size: If a tty file descriptor is given
3756 then the term size will be copied to the pty.
3757 @type copy_term_size: int
3759 @returns: A tuple of (is_pty, master_fd, slave_fd) where
3760 is_pty is True if a pty was successfully allocated, and
3761 False if a normal pipe was allocated.
3766 global _disable_openpty
3767 if _disable_openpty:
3768 master_fd, slave_fd = os.pipe()
3770 from pty import openpty
3772 master_fd, slave_fd = openpty()
3774 except EnvironmentError as e:
3775 _disable_openpty = True
3776 writemsg("openpty failed: '%s'\n" % str(e),
3779 master_fd, slave_fd = os.pipe()
3782 # Disable post-processing of output since otherwise weird
3783 # things like \n -> \r\n transformations may occur.
3785 mode = termios.tcgetattr(slave_fd)
3786 mode[1] &= ~termios.OPOST
3787 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3790 copy_term_size is not None and \
3791 os.isatty(copy_term_size):
3792 from portage.output import get_term_size, set_term_size
3793 rows, columns = get_term_size()
3794 set_term_size(rows, columns, slave_fd)
3796 return (got_pty, master_fd, slave_fd)
3798 # XXX This would be to replace getstatusoutput completely.
3799 # XXX Issue: cannot block execution. Deadlock condition.
3800 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3802 Spawn a subprocess with extra portage-specific options.
3805 Sandbox: Sandbox means the spawned process will be limited in its ability t
3806 read and write files (normally this means it is restricted to ${IMAGE}/)
3807 SElinux Sandbox: Enables sandboxing on SElinux
3808 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3811 Notes: os.system cannot be used because it messes with signal handling. Instead we
3812 use the portage.process spawn* family of functions.
3814 This function waits for the process to terminate.
3816 @param mystring: Command to run
3817 @type mystring: String
3818 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3819 @type mysettings: Dictionary or config instance
3820 @param debug: Ignored
3821 @type debug: Boolean
3822 @param free: Enable sandboxing for this process
3824 @param droppriv: Drop to portage:portage when running this command
3825 @type droppriv: Boolean
3826 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3827 @type sesandbox: Boolean
3828 @param fakeroot: Run this command with faked root privileges
3829 @type fakeroot: Boolean
3830 @param keywords: Extra options encoded as a dict, to be passed to spawn
3831 @type keywords: Dictionary
3834 1. The return code of the spawned process.
3837 if isinstance(mysettings, dict):
3839 keywords["opt_name"]="[ %s ]" % "portage"
3841 check_config_instance(mysettings)
3842 env=mysettings.environ()
3843 if mysettings.mycpv is not None:
3844 keywords["opt_name"] = "[%s]" % mysettings.mycpv
3846 keywords["opt_name"] = "[%s/%s]" % \
3847 (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
3849 fd_pipes = keywords.get("fd_pipes")
3850 if fd_pipes is None:
3852 0:sys.stdin.fileno(),
3853 1:sys.stdout.fileno(),
3854 2:sys.stderr.fileno(),
3856 # In some cases the above print statements don't flush stdout, so
3857 # it needs to be flushed before allowing a child process to use it
3858 # so that output always shows in the correct order.
3859 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3860 for fd in fd_pipes.values():
3861 if fd in stdout_filenos:
3866 # The default policy for the sesandbox domain only allows entry (via exec)
3867 # from shells and from binaries that belong to portage (the number of entry
3868 # points is minimized). The "tee" binary is not among the allowed entry
3869 # points, so it is spawned outside of the sesandbox domain and reads from a
3870 # pseudo-terminal that connects two domains.
3871 logfile = keywords.get("logfile")
3875 fd_pipes_orig = None
3878 del keywords["logfile"]
3879 if 1 not in fd_pipes or 2 not in fd_pipes:
3880 raise ValueError(fd_pipes)
3882 fd_pipes.setdefault(0, sys.stdin.fileno())
3883 fd_pipes_orig = fd_pipes.copy()
3885 got_pty, master_fd, slave_fd = \
3886 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3888 # We must set non-blocking mode before we close the slave_fd
3889 # since otherwise the fcntl call can fail on FreeBSD (the child
3890 # process might have already exited and closed slave_fd so we
3891 # have to keep it open in order to avoid FreeBSD potentially
3892 # generating an EAGAIN exception).
3894 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3895 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3897 fd_pipes[0] = fd_pipes_orig[0]
3898 fd_pipes[1] = slave_fd
3899 fd_pipes[2] = slave_fd
3900 keywords["fd_pipes"] = fd_pipes
3902 features = mysettings.features
3903 # TODO: Enable fakeroot to be used together with droppriv. The
3904 # fake ownership/permissions will have to be converted to real
3905 # permissions in the merge phase.
3906 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3907 if droppriv and not uid and portage_gid and portage_uid:
3908 keywords.update({"uid":portage_uid,"gid":portage_gid,
3909 "groups":userpriv_groups,"umask":0o02})
3911 free=((droppriv and "usersandbox" not in features) or \
3912 (not droppriv and "sandbox" not in features and \
3913 "usersandbox" not in features and not fakeroot))
3915 if free or "SANDBOX_ACTIVE" in os.environ:
3916 keywords["opt_name"] += " bash"
3917 spawn_func = portage.process.spawn_bash
3919 keywords["opt_name"] += " fakeroot"
3920 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3921 spawn_func = portage.process.spawn_fakeroot
3923 keywords["opt_name"] += " sandbox"
3924 spawn_func = portage.process.spawn_sandbox
3927 spawn_func = selinux.spawn_wrapper(spawn_func,
3928 mysettings["PORTAGE_SANDBOX_T"])
3930 returnpid = keywords.get("returnpid")
3931 keywords["returnpid"] = True
3933 mypids.extend(spawn_func(mystring, env=env, **keywords))
3942 log_file = open(_unicode_encode(logfile), mode='ab')
3943 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
3944 master_file = os.fdopen(master_fd, 'rb')
3945 iwtd = [master_file]
3948 import array, select
3952 events = select.select(iwtd, owtd, ewtd)
3954 # Use non-blocking mode to prevent read
3955 # calls from blocking indefinitely.
3956 buf = array.array('B')
3958 buf.fromfile(f, buffsize)
3964 if f is master_file:
3965 buf.tofile(stdout_file)
3967 buf.tofile(log_file)
3973 retval = os.waitpid(pid, 0)[1]
3974 portage.process.spawned_pids.remove(pid)
3975 if retval != os.EX_OK:
3977 return (retval & 0xff) << 8
3981 _userpriv_spawn_kwargs = (
3982 ("uid", portage_uid),
3983 ("gid", portage_gid),
3984 ("groups", userpriv_groups),
3988 def _spawn_fetch(settings, args, **kwargs):
3990 Spawn a process with appropriate settings for fetching, including
3991 userfetch and selinux support.
3994 global _userpriv_spawn_kwargs
3996 # Redirect all output to stdout since some fetchers like
3997 # wget pollute stderr (if portage detects a problem then it
3998 # can send it's own message to stderr).
3999 if "fd_pipes" not in kwargs:
4001 kwargs["fd_pipes"] = {
4002 0 : sys.stdin.fileno(),
4003 1 : sys.stdout.fileno(),
4004 2 : sys.stdout.fileno(),
4007 if "userfetch" in settings.features and \
4008 os.getuid() == 0 and portage_gid and portage_uid:
4009 kwargs.update(_userpriv_spawn_kwargs)
4011 spawn_func = portage.process.spawn
4013 if settings.selinux_enabled():
4014 spawn_func = selinux.spawn_wrapper(spawn_func,
4015 settings["PORTAGE_FETCH_T"])
4017 # bash is an allowed entrypoint, while most binaries are not
4018 if args[0] != BASH_BINARY:
4019 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
4021 rval = spawn_func(args, env=dict(iter(settings.items())), **kwargs)
4025 _userpriv_test_write_file_cache = {}
4026 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
4027 "rm -f %(file_path)s ; exit $rval"
4029 def _userpriv_test_write_file(settings, file_path):
4031 Drop privileges and try to open a file for writing. The file may or
4032 may not exist, and the parent directory is assumed to exist. The file
4033 is removed before returning.
4035 @param settings: A config instance which is passed to _spawn_fetch()
4036 @param file_path: A file path to open and write.
4037 @return: True if write succeeds, False otherwise.
4040 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
4041 rval = _userpriv_test_write_file_cache.get(file_path)
4042 if rval is not None:
4045 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
4046 {"file_path" : _shell_quote(file_path)}]
4048 returncode = _spawn_fetch(settings, args)
4050 rval = returncode == os.EX_OK
4051 _userpriv_test_write_file_cache[file_path] = rval
4054 def _checksum_failure_temp_file(distdir, basename):
4056 First try to find a duplicate temp file with the same checksum and return
4057 that filename if available. Otherwise, use mkstemp to create a new unique
4058 filename._checksum_failure_.$RANDOM, rename the given file, and return the
4059 new filename. In any case, filename will be renamed or removed before this
4060 function returns a temp filename.
4063 filename = os.path.join(distdir, basename)
4064 size = os.stat(filename).st_size
4066 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
4067 for temp_filename in os.listdir(distdir):
4068 if not tempfile_re.match(temp_filename):
4070 temp_filename = os.path.join(distdir, temp_filename)
4072 if size != os.stat(temp_filename).st_size:
4077 temp_checksum = portage.checksum.perform_md5(temp_filename)
4078 except portage.exception.FileNotFound:
4079 # Apparently the temp file disappeared. Let it go.
4081 if checksum is None:
4082 checksum = portage.checksum.perform_md5(filename)
4083 if checksum == temp_checksum:
4085 return temp_filename
4087 from tempfile import mkstemp
4088 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
4090 os.rename(filename, temp_filename)
4091 return temp_filename
4093 def _check_digests(filename, digests, show_errors=1):
4095 Check digests and display a message if an error occurs.
4096 @return True if all digests match, False otherwise.
4098 verified_ok, reason = portage.checksum.verify_all(filename, digests)
4101 writemsg(_("!!! Previously fetched"
4102 " file: '%s'\n") % filename, noiselevel=-1)
4103 writemsg(_("!!! Reason: %s\n") % reason[0],
4105 writemsg(_("!!! Got: %s\n"
4106 "!!! Expected: %s\n") % \
4107 (reason[1], reason[2]), noiselevel=-1)
4111 def _check_distfile(filename, digests, eout, show_errors=1):
4113 @return a tuple of (match, stat_obj) where match is True if filename
4114 matches all given digests (if any) and stat_obj is a stat result, or
4115 None if the file does not exist.
4119 size = digests.get("size")
4120 if size is not None and len(digests) == 1:
4124 st = os.stat(filename)
4126 return (False, None)
4127 if size is not None and size != st.st_size:
4130 if size is not None:
4131 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
4133 elif st.st_size == 0:
4134 # Zero-byte distfiles are always invalid.
4137 if _check_digests(filename, digests, show_errors=show_errors):
4138 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
4139 " ".join(sorted(digests))))
4145 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
4147 _size_suffix_map = {
4159 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
4160 "fetch files. Will use digest file if available."
4165 features = mysettings.features
4166 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4168 from portage.data import secpass
4169 userfetch = secpass >= 2 and "userfetch" in features
4170 userpriv = secpass >= 2 and "userpriv" in features
4172 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
4173 if "mirror" in restrict or \
4174 "nomirror" in restrict:
4175 if ("mirror" in features) and ("lmirror" not in features):
4176 # lmirror should allow you to bypass mirror restrictions.
4177 # XXX: This is not a good thing, and is temporary at best.
4178 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
4181 # Generally, downloading the same file repeatedly from
4182 # every single available mirror is a waste of bandwidth
4183 # and time, so there needs to be a cap.
4184 checksum_failure_max_tries = 5
4185 v = checksum_failure_max_tries
4187 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
4188 checksum_failure_max_tries))
4189 except (ValueError, OverflowError):
4190 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4191 " contains non-integer value: '%s'\n") % \
4192 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
4193 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4194 "default value: %s\n") % checksum_failure_max_tries,
4196 v = checksum_failure_max_tries
4198 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4199 " contains value less than 1: '%s'\n") % v, noiselevel=-1)
4200 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4201 "default value: %s\n") % checksum_failure_max_tries,
4203 v = checksum_failure_max_tries
4204 checksum_failure_max_tries = v
4207 fetch_resume_size_default = "350K"
4208 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
4209 if fetch_resume_size is not None:
4210 fetch_resume_size = "".join(fetch_resume_size.split())
4211 if not fetch_resume_size:
4212 # If it's undefined or empty, silently use the default.
4213 fetch_resume_size = fetch_resume_size_default
4214 match = _fetch_resume_size_re.match(fetch_resume_size)
4215 if match is None or \
4216 (match.group(2).upper() not in _size_suffix_map):
4217 writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
4218 " contains an unrecognized format: '%s'\n") % \
4219 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
4220 writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
4221 "default value: %s\n") % fetch_resume_size_default,
4223 fetch_resume_size = None
4224 if fetch_resume_size is None:
4225 fetch_resume_size = fetch_resume_size_default
4226 match = _fetch_resume_size_re.match(fetch_resume_size)
4227 fetch_resume_size = int(match.group(1)) * \
4228 2 ** _size_suffix_map[match.group(2).upper()]
4230 # Behave like the package has RESTRICT="primaryuri" after a
4231 # couple of checksum failures, to increase the probablility
4232 # of success before checksum_failure_max_tries is reached.
4233 checksum_failure_primaryuri = 2
4234 thirdpartymirrors = mysettings.thirdpartymirrors()
4236 # In the background parallel-fetch process, it's safe to skip checksum
4237 # verification of pre-existing files in $DISTDIR that have the correct
4238 # file size. The parent process will verify their checksums prior to
4241 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
4242 if parallel_fetchonly:
4245 check_config_instance(mysettings)
4247 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
4248 CUSTOM_MIRRORS_FILE), recursive=1)
4252 if listonly or ("distlocks" not in features):
4256 if "skiprocheck" in features:
4259 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
4261 writemsg(colorize("BAD",
4262 _("!!! For fetching to a read-only filesystem, "
4263 "locking should be turned off.\n")), noiselevel=-1)
4264 writemsg(_("!!! This can be done by adding -distlocks to "
4265 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
4268 # local mirrors are always added
4269 if "local" in custommirrors:
4270 mymirrors += custommirrors["local"]
4272 if "nomirror" in restrict or \
4273 "mirror" in restrict:
4274 # We don't add any mirrors.
4278 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
4280 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
4281 pkgdir = mysettings.get("O")
4282 if not (pkgdir is None or skip_manifest):
4283 mydigests = Manifest(
4284 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
4286 # no digests because fetch was not called for a specific package
4289 ro_distdirs = [x for x in \
4290 util.shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
4291 if os.path.isdir(x)]
4294 for x in range(len(mymirrors)-1,-1,-1):
4295 if mymirrors[x] and mymirrors[x][0]=='/':
4296 fsmirrors += [mymirrors[x]]
4299 restrict_fetch = "fetch" in restrict
4300 custom_local_mirrors = custommirrors.get("local", [])
4302 # With fetch restriction, a normal uri may only be fetched from
4303 # custom local mirrors (if available). A mirror:// uri may also
4304 # be fetched from specific mirrors (effectively overriding fetch
4305 # restriction, but only for specific mirrors).
4306 locations = custom_local_mirrors
4308 locations = mymirrors
4310 file_uri_tuples = []
4311 if isinstance(myuris, dict):
4312 for myfile, uri_set in myuris.items():
4313 for myuri in uri_set:
4314 file_uri_tuples.append((myfile, myuri))
4316 for myuri in myuris:
4317 file_uri_tuples.append((os.path.basename(myuri), myuri))
4320 primaryuri_indexes={}
4321 primaryuri_dict = {}
4322 thirdpartymirror_uris = {}
4323 for myfile, myuri in file_uri_tuples:
4324 if myfile not in filedict:
4326 for y in range(0,len(locations)):
4327 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
4328 if myuri[:9]=="mirror://":
4329 eidx = myuri.find("/", 9)
4331 mirrorname = myuri[9:eidx]
4332 path = myuri[eidx+1:]
4334 # Try user-defined mirrors first
4335 if mirrorname in custommirrors:
4336 for cmirr in custommirrors[mirrorname]:
4337 filedict[myfile].append(
4338 cmirr.rstrip("/") + "/" + path)
4340 # now try the official mirrors
4341 if mirrorname in thirdpartymirrors:
4342 shuffle(thirdpartymirrors[mirrorname])
4344 uris = [locmirr.rstrip("/") + "/" + path \
4345 for locmirr in thirdpartymirrors[mirrorname]]
4346 filedict[myfile].extend(uris)
4347 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
4349 if not filedict[myfile]:
4350 writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
4352 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
4353 writemsg(" %s\n" % (myuri), noiselevel=-1)
4356 # Only fetch from specific mirrors is allowed.
4358 if "primaryuri" in restrict:
4359 # Use the source site first.
4360 if myfile in primaryuri_indexes:
4361 primaryuri_indexes[myfile] += 1
4363 primaryuri_indexes[myfile] = 0
4364 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
4366 filedict[myfile].append(myuri)
4367 primaryuris = primaryuri_dict.get(myfile)
4368 if primaryuris is None:
4370 primaryuri_dict[myfile] = primaryuris
4371 primaryuris.append(myuri)
4373 # Prefer thirdpartymirrors over normal mirrors in cases when
4374 # the file does not yet exist on the normal mirrors.
4375 for myfile, uris in thirdpartymirror_uris.items():
4376 primaryuri_dict.setdefault(myfile, []).extend(uris)
4383 if can_fetch and not fetch_to_ro:
4384 global _userpriv_test_write_file_cache
4388 dir_gid = portage_gid
4389 if "FAKED_MODE" in mysettings:
4390 # When inside fakeroot, directories with portage's gid appear
4391 # to have root's gid. Therefore, use root's gid instead of
4392 # portage's gid to avoid spurrious permissions adjustments
4393 # when inside fakeroot.
4396 if "distlocks" in features:
4397 distdir_dirs.append(".locks")
4400 for x in distdir_dirs:
4401 mydir = os.path.join(mysettings["DISTDIR"], x)
4402 write_test_file = os.path.join(
4403 mydir, ".__portage_test_write__")
4410 if st is not None and stat.S_ISDIR(st.st_mode):
4411 if not (userfetch or userpriv):
4413 if _userpriv_test_write_file(mysettings, write_test_file):
4416 _userpriv_test_write_file_cache.pop(write_test_file, None)
4417 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
4419 # The directory has just been created
4420 # and therefore it must be empty.
4422 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
4425 raise # bail out on the first error that occurs during recursion
4426 if not apply_recursive_permissions(mydir,
4427 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
4428 filemode=filemode, filemask=modemask, onerror=onerror):
4429 raise portage.exception.OperationNotPermitted(
4430 _("Failed to apply recursive permissions for the portage group."))
4431 except portage.exception.PortageException as e:
4432 if not os.path.isdir(mysettings["DISTDIR"]):
4433 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4434 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
4435 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
4438 not fetch_to_ro and \
4439 not os.access(mysettings["DISTDIR"], os.W_OK):
4440 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
4444 if can_fetch and use_locks and locks_in_subdir:
4445 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
4446 if not os.access(distlocks_subdir, os.W_OK):
4447 writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir,
4450 del distlocks_subdir
4452 distdir_writable = can_fetch and not fetch_to_ro
4453 failed_files = set()
4454 restrict_fetch_msg = False
4456 for myfile in filedict:
4460 1 partially downloaded
4461 2 completely downloaded
4465 orig_digests = mydigests.get(myfile, {})
4466 size = orig_digests.get("size")
4468 # Zero-byte distfiles are always invalid, so discard their digests.
4469 del mydigests[myfile]
4470 orig_digests.clear()
4472 pruned_digests = orig_digests
4473 if parallel_fetchonly:
4475 if size is not None:
4476 pruned_digests["size"] = size
4478 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
4480 has_space_superuser = True
4483 writemsg_stdout("\n", noiselevel=-1)
4485 # check if there is enough space in DISTDIR to completely store myfile
4486 # overestimate the filesize so we aren't bitten by FS overhead
4487 if size is not None and hasattr(os, "statvfs"):
4488 vfs_stat = os.statvfs(mysettings["DISTDIR"])
4490 mysize = os.stat(myfile_path).st_size
4491 except OSError as e:
4492 if e.errno not in (errno.ENOENT, errno.ESTALE):
4496 if (size - mysize + vfs_stat.f_bsize) >= \
4497 (vfs_stat.f_bsize * vfs_stat.f_bavail):
4499 if (size - mysize + vfs_stat.f_bsize) >= \
4500 (vfs_stat.f_bsize * vfs_stat.f_bfree):
4501 has_space_superuser = False
4503 if not has_space_superuser:
4511 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
4512 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
4514 if has_space_superuser:
4515 writemsg(_("!!! Insufficient privileges to use "
4516 "remaining space.\n"), noiselevel=-1)
4518 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
4519 " in /etc/make.conf in order to fetch with\n"
4520 "!!! superuser privileges.\n"), noiselevel=-1)
4522 if distdir_writable and use_locks:
4525 lock_file = os.path.join(mysettings["DISTDIR"],
4526 locks_in_subdir, myfile)
4528 lock_file = myfile_path
4532 lock_kwargs["flags"] = os.O_NONBLOCK
4535 file_lock = portage.locks.lockfile(myfile_path,
4536 wantnewlockfile=1, **lock_kwargs)
4537 except portage.exception.TryAgain:
4538 writemsg(_(">>> File '%s' is already locked by "
4539 "another fetcher. Continuing...\n") % myfile,
4545 eout = portage.output.EOutput()
4546 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
4547 match, mystat = _check_distfile(
4548 myfile_path, pruned_digests, eout)
4550 if distdir_writable:
4552 apply_secpass_permissions(myfile_path,
4553 gid=portage_gid, mode=0o664, mask=0o2,
4555 except portage.exception.PortageException as e:
4556 if not os.access(myfile_path, os.R_OK):
4557 writemsg(_("!!! Failed to adjust permissions:"
4558 " %s\n") % str(e), noiselevel=-1)
4562 if distdir_writable and mystat is None:
4563 # Remove broken symlinks if necessary.
4565 os.unlink(myfile_path)
4569 if mystat is not None:
4570 if stat.S_ISDIR(mystat.st_mode):
4571 portage.util.writemsg_level(
4572 _("!!! Unable to fetch file since "
4573 "a directory is in the way: \n"
4574 "!!! %s\n") % myfile_path,
4575 level=logging.ERROR, noiselevel=-1)
4578 if mystat.st_size == 0:
4579 if distdir_writable:
4581 os.unlink(myfile_path)
4584 elif distdir_writable:
4585 if mystat.st_size < fetch_resume_size and \
4586 mystat.st_size < size:
4587 # If the file already exists and the size does not
4588 # match the existing digests, it may be that the
4589 # user is attempting to update the digest. In this
4590 # case, the digestgen() function will advise the
4591 # user to use `ebuild --force foo.ebuild manifest`
4592 # in order to force the old digests to be replaced.
4593 # Since the user may want to keep this file, rename
4594 # it instead of deleting it.
4595 writemsg(_(">>> Renaming distfile with size "
4596 "%d (smaller than " "PORTAGE_FETCH_RESU"
4597 "ME_MIN_SIZE)\n") % mystat.st_size)
4599 _checksum_failure_temp_file(
4600 mysettings["DISTDIR"], myfile)
4601 writemsg_stdout(_("Refetching... "
4602 "File renamed to '%s'\n\n") % \
4603 temp_filename, noiselevel=-1)
4604 elif mystat.st_size >= size:
4606 _checksum_failure_temp_file(
4607 mysettings["DISTDIR"], myfile)
4608 writemsg_stdout(_("Refetching... "
4609 "File renamed to '%s'\n\n") % \
4610 temp_filename, noiselevel=-1)
4612 if distdir_writable and ro_distdirs:
4613 readonly_file = None
4614 for x in ro_distdirs:
4615 filename = os.path.join(x, myfile)
4616 match, mystat = _check_distfile(
4617 filename, pruned_digests, eout)
4619 readonly_file = filename
4621 if readonly_file is not None:
4623 os.unlink(myfile_path)
4624 except OSError as e:
4625 if e.errno not in (errno.ENOENT, errno.ESTALE):
4628 os.symlink(readonly_file, myfile_path)
4631 if fsmirrors and not os.path.exists(myfile_path) and has_space:
4632 for mydir in fsmirrors:
4633 mirror_file = os.path.join(mydir, myfile)
4635 shutil.copyfile(mirror_file, myfile_path)
4636 writemsg(_("Local mirror has file: %s\n") % myfile)
4638 except (IOError, OSError) as e:
4639 if e.errno not in (errno.ENOENT, errno.ESTALE):
4644 mystat = os.stat(myfile_path)
4645 except OSError as e:
4646 if e.errno not in (errno.ENOENT, errno.ESTALE):
4651 apply_secpass_permissions(
4652 myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
4654 except portage.exception.PortageException as e:
4655 if not os.access(myfile_path, os.R_OK):
4656 writemsg(_("!!! Failed to adjust permissions:"
4657 " %s\n") % str(e), noiselevel=-1)
4659 # If the file is empty then it's obviously invalid. Remove
4660 # the empty file and try to download if possible.
4661 if mystat.st_size == 0:
4662 if distdir_writable:
4664 os.unlink(myfile_path)
4665 except EnvironmentError:
4667 elif myfile not in mydigests:
4668 # We don't have a digest, but the file exists. We must
4669 # assume that it is fully downloaded.
4672 if mystat.st_size < mydigests[myfile]["size"] and \
4674 fetched = 1 # Try to resume this download.
4675 elif parallel_fetchonly and \
4676 mystat.st_size == mydigests[myfile]["size"]:
4677 eout = portage.output.EOutput()
4679 mysettings.get("PORTAGE_QUIET") == "1"
4681 "%s size ;-)" % (myfile, ))
4685 verified_ok, reason = portage.checksum.verify_all(
4686 myfile_path, mydigests[myfile])
4688 writemsg(_("!!! Previously fetched"
4689 " file: '%s'\n") % myfile, noiselevel=-1)
4690 writemsg(_("!!! Reason: %s\n") % reason[0],
4692 writemsg(_("!!! Got: %s\n"
4693 "!!! Expected: %s\n") % \
4694 (reason[1], reason[2]), noiselevel=-1)
4695 if reason[0] == _("Insufficient data for checksum verification"):
4697 if distdir_writable:
4699 _checksum_failure_temp_file(
4700 mysettings["DISTDIR"], myfile)
4701 writemsg_stdout(_("Refetching... "
4702 "File renamed to '%s'\n\n") % \
4703 temp_filename, noiselevel=-1)
4705 eout = portage.output.EOutput()
4707 mysettings.get("PORTAGE_QUIET", None) == "1"
4708 digests = mydigests.get(myfile)
4710 digests = list(digests)
4713 "%s %s ;-)" % (myfile, " ".join(digests)))
4715 continue # fetch any remaining files
4717 # Create a reversed list since that is optimal for list.pop().
4718 uri_list = filedict[myfile][:]
4720 checksum_failure_count = 0
4721 tried_locations = set()
4723 loc = uri_list.pop()
4724 # Eliminate duplicates here in case we've switched to
4725 # "primaryuri" mode on the fly due to a checksum failure.
4726 if loc in tried_locations:
4728 tried_locations.add(loc)
4730 writemsg_stdout(loc+" ", noiselevel=-1)
4732 # allow different fetchcommands per protocol
4733 protocol = loc[0:loc.find("://")]
4735 missing_file_param = False
4736 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
4737 fetchcommand = mysettings.get(fetchcommand_var)
4738 if fetchcommand is None:
4739 fetchcommand_var = "FETCHCOMMAND"
4740 fetchcommand = mysettings.get(fetchcommand_var)
4741 if fetchcommand is None:
4742 portage.util.writemsg_level(
4743 _("!!! %s is unset. It should "
4744 "have been defined in\n!!! %s/make.globals.\n") \
4745 % (fetchcommand_var,
4746 portage.const.GLOBAL_CONFIG_PATH),
4747 level=logging.ERROR, noiselevel=-1)
4749 if "${FILE}" not in fetchcommand:
4750 portage.util.writemsg_level(
4751 _("!!! %s does not contain the required ${FILE}"
4752 " parameter.\n") % fetchcommand_var,
4753 level=logging.ERROR, noiselevel=-1)
4754 missing_file_param = True
4756 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
4757 resumecommand = mysettings.get(resumecommand_var)
4758 if resumecommand is None:
4759 resumecommand_var = "RESUMECOMMAND"
4760 resumecommand = mysettings.get(resumecommand_var)
4761 if resumecommand is None:
4762 portage.util.writemsg_level(
4763 _("!!! %s is unset. It should "
4764 "have been defined in\n!!! %s/make.globals.\n") \
4765 % (resumecommand_var,
4766 portage.const.GLOBAL_CONFIG_PATH),
4767 level=logging.ERROR, noiselevel=-1)
4769 if "${FILE}" not in resumecommand:
4770 portage.util.writemsg_level(
4771 _("!!! %s does not contain the required ${FILE}"
4772 " parameter.\n") % resumecommand_var,
4773 level=logging.ERROR, noiselevel=-1)
4774 missing_file_param = True
4776 if missing_file_param:
4777 portage.util.writemsg_level(
4778 _("!!! Refer to the make.conf(5) man page for "
4779 "information about how to\n!!! correctly specify "
4780 "FETCHCOMMAND and RESUMECOMMAND.\n"),
4781 level=logging.ERROR, noiselevel=-1)
4782 if myfile != os.path.basename(loc):
4788 mysize = os.stat(myfile_path).st_size
4789 except OSError as e:
4790 if e.errno not in (errno.ENOENT, errno.ESTALE):
4796 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
4798 elif size is None or size > mysize:
4799 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
4802 writemsg(_("!!! File %s is incorrect size, "
4803 "but unable to retry.\n") % myfile, noiselevel=-1)
4808 if fetched != 2 and has_space:
4809 #we either need to resume or start the download
4812 mystat = os.stat(myfile_path)
4813 except OSError as e:
4814 if e.errno not in (errno.ENOENT, errno.ESTALE):
4819 if mystat.st_size < fetch_resume_size:
4820 writemsg(_(">>> Deleting distfile with size "
4821 "%d (smaller than " "PORTAGE_FETCH_RESU"
4822 "ME_MIN_SIZE)\n") % mystat.st_size)
4824 os.unlink(myfile_path)
4825 except OSError as e:
4827 (errno.ENOENT, errno.ESTALE):
4833 writemsg(_(">>> Resuming download...\n"))
4834 locfetch=resumecommand
4835 command_var = resumecommand_var
4838 locfetch=fetchcommand
4839 command_var = fetchcommand_var
4840 writemsg_stdout(_(">>> Downloading '%s'\n") % \
4841 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
4843 "DISTDIR": mysettings["DISTDIR"],
4848 myfetch = util.shlex_split(locfetch)
4849 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
4853 myret = _spawn_fetch(mysettings, myfetch)
4857 apply_secpass_permissions(myfile_path,
4858 gid=portage_gid, mode=0o664, mask=0o2)
4859 except portage.exception.FileNotFound as e:
4861 except portage.exception.PortageException as e:
4862 if not os.access(myfile_path, os.R_OK):
4863 writemsg(_("!!! Failed to adjust permissions:"
4864 " %s\n") % str(e), noiselevel=-1)
4866 # If the file is empty then it's obviously invalid. Don't
4867 # trust the return value from the fetcher. Remove the
4868 # empty file and try to download again.
4870 if os.stat(myfile_path).st_size == 0:
4871 os.unlink(myfile_path)
4874 except EnvironmentError:
4877 if mydigests is not None and myfile in mydigests:
4879 mystat = os.stat(myfile_path)
4880 except OSError as e:
4881 if e.errno not in (errno.ENOENT, errno.ESTALE):
4887 if stat.S_ISDIR(mystat.st_mode):
4888 # This can happen if FETCHCOMMAND erroneously
4889 # contains wget's -P option where it should
4891 portage.util.writemsg_level(
4892 _("!!! The command specified in the "
4893 "%s variable appears to have\n!!! "
4894 "created a directory instead of a "
4895 "normal file.\n") % command_var,
4896 level=logging.ERROR, noiselevel=-1)
4897 portage.util.writemsg_level(
4898 _("!!! Refer to the make.conf(5) "
4899 "man page for information about how "
4900 "to\n!!! correctly specify "
4901 "FETCHCOMMAND and RESUMECOMMAND.\n"),
4902 level=logging.ERROR, noiselevel=-1)
4905 # no exception? file exists. let digestcheck() report
4906 # an appropriately for size or checksum errors
4908 # If the fetcher reported success and the file is
4909 # too small, it's probably because the digest is
4910 # bad (upstream changed the distfile). In this
4911 # case we don't want to attempt to resume. Show a
4912 # digest verification failure to that the user gets
4913 # a clue about what just happened.
4914 if myret != os.EX_OK and \
4915 mystat.st_size < mydigests[myfile]["size"]:
4916 # Fetch failed... Try the next one... Kill 404 files though.
4917 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4918 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4919 if html404.search(codecs.open(
4920 _unicode_encode(myfile_path,
4921 encoding=_encodings['fs'], errors='strict'),
4922 mode='r', encoding=_encodings['content'], errors='replace'
4925 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4926 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
4929 except (IOError, OSError):
4934 # File is the correct size--check the checksums for the fetched
4935 # file NOW, for those users who don't have a stable/continuous
4936 # net connection. This way we have a chance to try to download
4937 # from another mirror...
4938 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4941 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
4943 writemsg(_("!!! Reason: %s\n") % reason[0],
4945 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
4946 (reason[1], reason[2]), noiselevel=-1)
4947 if reason[0] == _("Insufficient data for checksum verification"):
4950 _checksum_failure_temp_file(
4951 mysettings["DISTDIR"], myfile)
4952 writemsg_stdout(_("Refetching... "
4953 "File renamed to '%s'\n\n") % \
4954 temp_filename, noiselevel=-1)
4956 checksum_failure_count += 1
4957 if checksum_failure_count == \
4958 checksum_failure_primaryuri:
4959 # Switch to "primaryuri" mode in order
4960 # to increase the probablility of
4963 primaryuri_dict.get(myfile)
4966 reversed(primaryuris))
4967 if checksum_failure_count >= \
4968 checksum_failure_max_tries:
4971 eout = portage.output.EOutput()
4972 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4973 digests = mydigests.get(myfile)
4975 eout.ebegin("%s %s ;-)" % \
4976 (myfile, " ".join(sorted(digests))))
4984 elif mydigests!=None:
4985 writemsg(_("No digest file available and download failed.\n\n"),
4988 if use_locks and file_lock:
4989 portage.locks.unlockfile(file_lock)
4992 writemsg_stdout("\n", noiselevel=-1)
4994 if restrict_fetch and not restrict_fetch_msg:
4995 restrict_fetch_msg = True
4996 msg = _("\n!!! %s/%s"
4997 " has fetch restriction turned on.\n"
4998 "!!! This probably means that this "
4999 "ebuild's files must be downloaded\n"
5000 "!!! manually. See the comments in"
5001 " the ebuild for more information.\n\n") % \
5002 (mysettings["CATEGORY"], mysettings["PF"])
5003 portage.util.writemsg_level(msg,
5004 level=logging.ERROR, noiselevel=-1)
5005 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
5006 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
5008 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
5009 private_tmpdir = None
5010 if not parallel_fetchonly and not have_builddir:
5011 # When called by digestgen(), it's normal that
5012 # PORTAGE_BUILDDIR doesn't exist. It's helpful
5013 # to show the pkg_nofetch output though, so go
5014 # ahead and create a temporary PORTAGE_BUILDDIR.
5015 # Use a temporary config instance to avoid altering
5016 # the state of the one that's been passed in.
5017 mysettings = config(clone=mysettings)
5018 from tempfile import mkdtemp
5020 private_tmpdir = mkdtemp("", "._portage_fetch_.",
5022 except OSError as e:
5023 if e.errno != portage.exception.PermissionDenied.errno:
5025 raise portage.exception.PermissionDenied(global_tmpdir)
5026 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
5027 mysettings.backup_changes("PORTAGE_TMPDIR")
5028 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5029 portage.doebuild_environment(mysettings["EBUILD"], "fetch",
5030 mysettings["ROOT"], mysettings, debug, 1, None)
5031 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
5032 have_builddir = True
5034 if not parallel_fetchonly and have_builddir:
5035 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
5036 # ensuring sane $PWD (bug #239560) and storing elog
5037 # messages. Therefore, calling code needs to ensure that
5038 # PORTAGE_BUILDDIR is already clean and locked here.
5040 # All the pkg_nofetch goes to stderr since it's considered
5041 # to be an error message.
5043 0 : sys.stdin.fileno(),
5044 1 : sys.stderr.fileno(),
5045 2 : sys.stderr.fileno(),
5048 ebuild_phase = mysettings.get("EBUILD_PHASE")
5050 mysettings["EBUILD_PHASE"] = "nofetch"
5051 spawn(_shell_quote(EBUILD_SH_BINARY) + \
5052 " nofetch", mysettings, fd_pipes=fd_pipes)
5054 if ebuild_phase is None:
5055 mysettings.pop("EBUILD_PHASE", None)
5057 mysettings["EBUILD_PHASE"] = ebuild_phase
5058 if private_tmpdir is not None:
5059 shutil.rmtree(private_tmpdir)
5061 elif restrict_fetch:
5065 elif not filedict[myfile]:
5066 writemsg(_("Warning: No mirrors available for file"
5067 " '%s'\n") % (myfile), noiselevel=-1)
5069 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
5075 failed_files.add(myfile)
5082 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
5084 Generates a digest file if missing. Assumes all files are available.
5085 DEPRECATED: this now only is a compability wrapper for
5086 portage.manifest.Manifest()
5087 NOTE: manifestonly and overwrite are useless with manifest2 and
5088 are therefore ignored."""
5089 if myportdb is None:
5090 writemsg("Warning: myportdb not specified to digestgen\n")
5093 global _doebuild_manifest_exempt_depend
5095 _doebuild_manifest_exempt_depend += 1
5097 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
5098 for cpv in fetchlist_dict:
5100 for myfile in fetchlist_dict[cpv]:
5101 distfiles_map.setdefault(myfile, []).append(cpv)
5102 except portage.exception.InvalidDependString as e:
5103 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5106 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
5107 manifest1_compat = False
5108 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
5109 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
5110 # Don't require all hashes since that can trigger excessive
5111 # fetches when sufficient digests already exist. To ease transition
5112 # while Manifest 1 is being removed, only require hashes that will
5113 # exist before and after the transition.
5114 required_hash_types = set()
5115 required_hash_types.add("size")
5116 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
5117 dist_hashes = mf.fhashdict.get("DIST", {})
5119 # To avoid accidental regeneration of digests with the incorrect
5120 # files (such as partially downloaded files), trigger the fetch
5121 # code if the file exists and it's size doesn't match the current
5122 # manifest entry. If there really is a legitimate reason for the
5123 # digest to change, `ebuild --force digest` can be used to avoid
5124 # triggering this code (or else the old digests can be manually
5125 # removed from the Manifest).
5127 for myfile in distfiles_map:
5128 myhashes = dist_hashes.get(myfile)
5131 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5134 if st is None or st.st_size == 0:
5135 missing_files.append(myfile)
5137 size = myhashes.get("size")
5140 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5141 except OSError as e:
5142 if e.errno != errno.ENOENT:
5146 missing_files.append(myfile)
5148 if required_hash_types.difference(myhashes):
5149 missing_files.append(myfile)
5152 if st.st_size == 0 or size is not None and size != st.st_size:
5153 missing_files.append(myfile)
5157 mytree = os.path.realpath(os.path.dirname(
5158 os.path.dirname(mysettings["O"])))
5159 fetch_settings = config(clone=mysettings)
5160 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5161 for myfile in missing_files:
5163 for cpv in distfiles_map[myfile]:
5164 myebuild = os.path.join(mysettings["O"],
5165 catsplit(cpv)[1] + ".ebuild")
5166 # for RESTRICT=fetch, mirror, etc...
5167 doebuild_environment(myebuild, "fetch",
5168 mysettings["ROOT"], fetch_settings,
5170 uris.update(myportdb.getFetchMap(
5171 cpv, mytree=mytree)[myfile])
5173 fetch_settings["A"] = myfile # for use by pkg_nofetch()
5176 st = os.stat(os.path.join(
5177 mysettings["DISTDIR"],myfile))
5181 if not fetch({myfile : uris}, fetch_settings):
5182 writemsg(_("!!! Fetch failed for %s, can't update "
5183 "Manifest\n") % myfile, noiselevel=-1)
5184 if myfile in dist_hashes and \
5185 st is not None and st.st_size > 0:
5186 # stat result is obtained before calling fetch(),
5187 # since fetch may rename the existing file if the
5188 # digest does not match.
5189 writemsg(_("!!! If you would like to "
5190 "forcefully replace the existing "
5191 "Manifest entry\n!!! for %s, use "
5192 "the following command:\n") % myfile + \
5193 "!!! " + colorize("INFORM",
5194 "ebuild --force %s manifest" % \
5195 os.path.basename(myebuild)) + "\n",
5198 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
5200 mf.create(requiredDistfiles=myarchives,
5201 assumeDistHashesSometimes=True,
5202 assumeDistHashesAlways=(
5203 "assume-digests" in mysettings.features))
5204 except portage.exception.FileNotFound as e:
5205 writemsg(_("!!! File %s doesn't exist, can't update "
5206 "Manifest\n") % e, noiselevel=-1)
5208 except portage.exception.PortagePackageException as e:
5209 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
5212 mf.write(sign=False)
5213 except portage.exception.PermissionDenied as e:
5214 writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
5216 if "assume-digests" not in mysettings.features:
5217 distlist = list(mf.fhashdict.get("DIST", {}))
5220 for filename in distlist:
5221 if not os.path.exists(
5222 os.path.join(mysettings["DISTDIR"], filename)):
5223 auto_assumed.append(filename)
5225 mytree = os.path.realpath(
5226 os.path.dirname(os.path.dirname(mysettings["O"])))
5227 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
5228 pkgs = myportdb.cp_list(cp, mytree=mytree)
5230 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
5231 str(len(auto_assumed)).rjust(18)) + "\n")
5232 for pkg_key in pkgs:
5233 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
5234 pv = pkg_key.split("/")[1]
5235 for filename in auto_assumed:
5236 if filename in fetchlist:
5238 " %s::%s\n" % (pv, filename))
5241 _doebuild_manifest_exempt_depend -= 1
5243 def digestParseFile(myfilename, mysettings=None):
5244 """(filename) -- Parses a given file for entries matching:
5245 <checksumkey> <checksum_hex_string> <filename> <filesize>
5246 Ignores lines that don't start with a valid checksum identifier
5247 and returns a dict with the filenames as keys and {checksumkey:checksum}
5249 DEPRECATED: this function is now only a compability wrapper for
5250 portage.manifest.Manifest()."""
5252 mysplit = myfilename.split(os.sep)
5253 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
5254 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
5255 elif mysplit[-1] == "Manifest":
5256 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
5258 if mysettings is None:
5260 mysettings = config(clone=settings)
5262 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
5264 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
5265 """Verifies checksums. Assumes all files have been downloaded.
5266 DEPRECATED: this is now only a compability wrapper for
5267 portage.manifest.Manifest()."""
5268 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
5270 pkgdir = mysettings["O"]
5271 manifest_path = os.path.join(pkgdir, "Manifest")
5272 if not os.path.exists(manifest_path):
5273 writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
5279 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5280 manifest_empty = True
5281 for d in mf.fhashdict.values():
5283 manifest_empty = False
5286 writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
5292 eout = portage.output.EOutput()
5293 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5295 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
5296 eout.ebegin(_("checking ebuild checksums ;-)"))
5297 mf.checkTypeHashes("EBUILD")
5299 eout.ebegin(_("checking auxfile checksums ;-)"))
5300 mf.checkTypeHashes("AUX")
5302 eout.ebegin(_("checking miscfile checksums ;-)"))
5303 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
5306 eout.ebegin(_("checking %s ;-)") % f)
5307 ftype = mf.findFile(f)
5310 mf.checkFileHashes(ftype, f)
5312 except KeyError as e:
5314 writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
5316 except portage.exception.FileNotFound as e:
5318 writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
5321 except portage.exception.DigestException as e:
5323 writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
5324 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
5325 writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
5326 writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
5327 writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
5329 # Make sure that all of the ebuilds are actually listed in the Manifest.
5330 glep55 = 'parse-eapi-glep-55' in mysettings.features
5331 for f in os.listdir(pkgdir):
5334 pf, eapi = _split_ebuild_name_glep55(f)
5335 elif f[-7:] == '.ebuild':
5337 if pf is not None and not mf.hasFile("EBUILD", f):
5338 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5339 os.path.join(pkgdir, f), noiselevel=-1)
5342 """ epatch will just grab all the patches out of a directory, so we have to
5343 make sure there aren't any foreign files that it might grab."""
5344 filesdir = os.path.join(pkgdir, "files")
5346 for parent, dirs, files in os.walk(filesdir):
5348 parent = _unicode_decode(parent,
5349 encoding=_encodings['fs'], errors='strict')
5350 except UnicodeDecodeError:
5351 parent = _unicode_decode(parent,
5352 encoding=_encodings['fs'], errors='replace')
5353 writemsg(_("!!! Path contains invalid "
5354 "character(s) for encoding '%s': '%s'") \
5355 % (_encodings['fs'], parent), noiselevel=-1)
5360 if d.startswith(".") or d == "CVS":
5364 f = _unicode_decode(f,
5365 encoding=_encodings['fs'], errors='strict')
5366 except UnicodeDecodeError:
5367 f = _unicode_decode(f,
5368 encoding=_encodings['fs'], errors='replace')
5369 if f.startswith("."):
5371 f = os.path.join(parent, f)[len(filesdir) + 1:]
5372 writemsg(_("!!! File name contains invalid "
5373 "character(s) for encoding '%s': '%s'") \
5374 % (_encodings['fs'], f), noiselevel=-1)
5378 if f.startswith("."):
5380 f = os.path.join(parent, f)[len(filesdir) + 1:]
5381 file_type = mf.findFile(f)
5382 if file_type != "AUX" and not f.startswith("digest-"):
5383 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5384 os.path.join(filesdir, f), noiselevel=-1)
5389 # parse actionmap to spawn ebuild with the appropriate args
5390 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
5391 logfile=None, fd_pipes=None, returnpid=False):
5392 if not returnpid and \
5393 (alwaysdep or "noauto" not in mysettings.features):
5394 # process dependency first
5395 if "dep" in actionmap[mydo]:
5396 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
5397 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
5398 fd_pipes=fd_pipes, returnpid=returnpid)
5402 eapi = mysettings["EAPI"]
5404 if mydo == "configure" and eapi in ("0", "1"):
5407 if mydo == "prepare" and eapi in ("0", "1"):
5410 if mydo == "pretend" and eapi in ("0", "1", "2"):
5413 kwargs = actionmap[mydo]["args"]
5414 mysettings["EBUILD_PHASE"] = mydo
5415 _doebuild_exit_status_unlink(
5416 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5419 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
5420 mysettings, debug=debug, logfile=logfile,
5421 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
5423 mysettings["EBUILD_PHASE"] = ""
5428 msg = _doebuild_exit_status_check(mydo, mysettings)
5430 if phase_retval == os.EX_OK:
5432 from textwrap import wrap
5433 from portage.elog.messages import eerror
5434 for l in wrap(msg, 72):
5435 eerror(l, phase=mydo, key=mysettings.mycpv)
5437 _post_phase_userpriv_perms(mysettings)
5438 if mydo == "install":
5439 _check_build_log(mysettings)
5440 if phase_retval == os.EX_OK:
5441 _post_src_install_chost_fix(mysettings)
5442 phase_retval = _post_src_install_checks(mysettings)
5444 if mydo == "test" and phase_retval != os.EX_OK and \
5445 "test-fail-continue" in mysettings.features:
5446 phase_retval = os.EX_OK
5450 _post_phase_cmds = {
5454 "install_symlink_html_docs"],
5459 "preinst_selinux_labels",
5460 "preinst_suid_scan",
5464 "postinst_bsdflags"]
5467 def _post_phase_userpriv_perms(mysettings):
5468 if "userpriv" in mysettings.features and secpass >= 2:
5469 """ Privileged phases may have left files that need to be made
5470 writable to a less privileged user."""
5471 apply_recursive_permissions(mysettings["T"],
5472 uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
5473 filemode=0o60, filemask=0)
5475 def _post_src_install_checks(mysettings):
5476 _post_src_install_uid_fix(mysettings)
5477 global _post_phase_cmds
5478 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
5479 if retval != os.EX_OK:
5480 writemsg(_("!!! install_qa_check failed; exiting.\n"),
5484 def _check_build_log(mysettings, out=None):
5486 Search the content of $PORTAGE_LOG_FILE if it exists
5487 and generate the following QA Notices when appropriate:
5489 * Automake "maintainer mode"
5491 * Unrecognized configure options
5493 logfile = mysettings.get("PORTAGE_LOG_FILE")
5497 f = codecs.open(_unicode_encode(logfile,
5498 encoding=_encodings['fs'], errors='strict'),
5499 mode='r', encoding=_encodings['content'], errors='replace')
5500 except EnvironmentError:
5503 am_maintainer_mode = []
5504 bash_command_not_found = []
5505 bash_command_not_found_re = re.compile(
5506 r'(.*): line (\d*): (.*): command not found$')
5507 command_not_found_exclude_re = re.compile(r'/configure: line ')
5508 helper_missing_file = []
5509 helper_missing_file_re = re.compile(
5510 r'^!!! (do|new).*: .* does not exist$')
5512 configure_opts_warn = []
5513 configure_opts_warn_re = re.compile(
5514 r'^configure: WARNING: [Uu]nrecognized options: ')
5516 # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
5519 # Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
5520 am_maintainer_mode_re = re.compile(r'/missing --run ')
5521 am_maintainer_mode_exclude_re = \
5522 re.compile(r'(/missing --run (autoheader|makeinfo)|^\s*Automake:\s)')
5524 make_jobserver_re = \
5525 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
5530 if am_maintainer_mode_re.search(line) is not None and \
5531 am_maintainer_mode_exclude_re.search(line) is None:
5532 am_maintainer_mode.append(line.rstrip("\n"))
5534 if bash_command_not_found_re.match(line) is not None and \
5535 command_not_found_exclude_re.search(line) is None:
5536 bash_command_not_found.append(line.rstrip("\n"))
5538 if helper_missing_file_re.match(line) is not None:
5539 helper_missing_file.append(line.rstrip("\n"))
5541 if configure_opts_warn_re.match(line) is not None:
5542 configure_opts_warn.append(line.rstrip("\n"))
5544 if make_jobserver_re.match(line) is not None:
5545 make_jobserver.append(line.rstrip("\n"))
5550 from portage.elog.messages import eqawarn
5551 def _eqawarn(lines):
5553 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
5554 from textwrap import wrap
5557 if am_maintainer_mode:
5558 msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
5560 msg.extend("\t" + line for line in am_maintainer_mode)
5563 "If you patch Makefile.am, "
5564 "configure.in, or configure.ac then you "
5565 "should use autotools.eclass and "
5566 "eautomake or eautoreconf. Exceptions "
5567 "are limited to system packages "
5568 "for which it is impossible to run "
5569 "autotools during stage building. "
5570 "See http://www.gentoo.org/p"
5571 "roj/en/qa/autofailure.xml for more information."),
5575 if bash_command_not_found:
5576 msg = [_("QA Notice: command not found:")]
5578 msg.extend("\t" + line for line in bash_command_not_found)
5581 if helper_missing_file:
5582 msg = [_("QA Notice: file does not exist:")]
5584 msg.extend("\t" + line[4:] for line in helper_missing_file)
5587 if configure_opts_warn:
5588 msg = [_("QA Notice: Unrecognized configure options:")]
5590 msg.extend("\t" + line for line in configure_opts_warn)
5594 msg = [_("QA Notice: make jobserver unavailable:")]
5596 msg.extend("\t" + line for line in make_jobserver)
5599 def _post_src_install_chost_fix(settings):
5601 It's possible that the ebuild has changed the
5602 CHOST variable, so revert it to the initial
5605 chost = settings.get('CHOST')
5607 write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
5608 'build-info', 'CHOST'), chost + '\n')
5610 def _post_src_install_uid_fix(mysettings, out=None):
5612 Files in $D with user and group bits that match the "portage"
5613 user or group are automatically mapped to PORTAGE_INST_UID and
5614 PORTAGE_INST_GID if necessary. The chown system call may clear
5615 S_ISUID and S_ISGID bits, so those bits are restored if
5621 inst_uid = int(mysettings["PORTAGE_INST_UID"])
5622 inst_gid = int(mysettings["PORTAGE_INST_GID"])
5625 # Temporarily remove all of the flags in order to avoid EPERM errors.
5626 os.system("mtree -c -p %s -k flags > %s" % \
5627 (_shell_quote(mysettings["D"]),
5628 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5629 os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
5630 (_shell_quote(mysettings["D"]),))
5631 os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
5632 (_shell_quote(mysettings["D"]),))
5634 destdir = mysettings["D"]
5639 unicode_error = False
5641 counted_inodes = set()
5643 for parent, dirs, files in os.walk(destdir):
5645 parent = _unicode_decode(parent,
5646 encoding=_encodings['merge'], errors='strict')
5647 except UnicodeDecodeError:
5648 new_parent = _unicode_decode(parent,
5649 encoding=_encodings['merge'], errors='replace')
5650 new_parent = _unicode_encode(new_parent,
5651 encoding=_encodings['merge'], errors='backslashreplace')
5652 new_parent = _unicode_decode(new_parent,
5653 encoding=_encodings['merge'], errors='replace')
5654 os.rename(parent, new_parent)
5655 unicode_error = True
5656 unicode_errors.append(new_parent[len(destdir):])
5659 for fname in chain(dirs, files):
5661 fname = _unicode_decode(fname,
5662 encoding=_encodings['merge'], errors='strict')
5663 except UnicodeDecodeError:
5664 fpath = _os.path.join(
5665 parent.encode(_encodings['merge']), fname)
5666 new_fname = _unicode_decode(fname,
5667 encoding=_encodings['merge'], errors='replace')
5668 new_fname = _unicode_encode(new_fname,
5669 encoding=_encodings['merge'], errors='backslashreplace')
5670 new_fname = _unicode_decode(new_fname,
5671 encoding=_encodings['merge'], errors='replace')
5672 new_fpath = os.path.join(parent, new_fname)
5673 os.rename(fpath, new_fpath)
5674 unicode_error = True
5675 unicode_errors.append(new_fpath[len(destdir):])
5679 fpath = os.path.join(parent, fname)
5681 mystat = os.lstat(fpath)
5682 if stat.S_ISREG(mystat.st_mode) and \
5683 mystat.st_ino not in counted_inodes:
5684 counted_inodes.add(mystat.st_ino)
5685 size += mystat.st_size
5686 if mystat.st_uid != portage_uid and \
5687 mystat.st_gid != portage_gid:
5691 if mystat.st_uid == portage_uid:
5693 if mystat.st_gid == portage_gid:
5695 apply_secpass_permissions(
5696 _unicode_encode(fpath, encoding=_encodings['merge']),
5697 uid=myuid, gid=mygid,
5698 mode=mystat.st_mode, stat_cached=mystat,
5704 if not unicode_error:
5708 from portage.elog.messages import eerror
5709 for l in _merge_unicode_error(unicode_errors):
5710 eerror(l, phase='install', key=mysettings.mycpv, out=out)
5712 open(_unicode_encode(os.path.join(mysettings['PORTAGE_BUILDDIR'],
5713 'build-info', 'SIZE')), 'w').write(str(size) + '\n')
5716 # Restore all of the flags saved above.
5717 os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
5718 (_shell_quote(mysettings["D"]),
5719 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5721 def _merge_unicode_error(errors):
5722 from textwrap import wrap
5725 msg = _("This package installs one or more file names containing "
5726 "characters that do not match your current locale "
5727 "settings. The current setting for filesystem encoding is '%s'.") \
5728 % _encodings['merge']
5729 lines.extend(wrap(msg, 72))
5733 lines.extend("\t" + x for x in errors)
5736 if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
5737 msg = _("For best results, UTF-8 encoding is recommended. See "
5738 "the Gentoo Linux Localization Guide for instructions "
5739 "about how to configure your locale for UTF-8 encoding:")
5740 lines.extend(wrap(msg, 72))
5742 lines.append("\t" + \
5743 "http://www.gentoo.org/doc/en/guide-localization.xml")
5748 def _post_pkg_preinst_cmd(mysettings):
5750 Post phase logic and tasks that have been factored out of
5751 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
5752 can be used to wipe out any gmon.out files created during
5753 previous functions (in case any tools were built with -pg
5757 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5758 misc_sh_binary = os.path.join(portage_bin_path,
5759 os.path.basename(MISC_SH_BINARY))
5761 mysettings["EBUILD_PHASE"] = ""
5762 global _post_phase_cmds
5763 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
5767 def _post_pkg_postinst_cmd(mysettings):
5769 Post phase logic and tasks that have been factored out of
5773 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5774 misc_sh_binary = os.path.join(portage_bin_path,
5775 os.path.basename(MISC_SH_BINARY))
5777 mysettings["EBUILD_PHASE"] = ""
5778 global _post_phase_cmds
5779 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
5783 def _spawn_misc_sh(mysettings, commands, **kwargs):
5785 @param mysettings: the ebuild config
5786 @type mysettings: config
5787 @param commands: a list of function names to call in misc-functions.sh
5788 @type commands: list
5790 @returns: the return value from the spawn() call
5793 # Note: PORTAGE_BIN_PATH may differ from the global
5794 # constant when portage is reinstalling itself.
5795 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5796 misc_sh_binary = os.path.join(portage_bin_path,
5797 os.path.basename(MISC_SH_BINARY))
5798 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
5799 _doebuild_exit_status_unlink(
5800 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5801 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5802 logfile = mysettings.get("PORTAGE_LOG_FILE")
5803 mydo = mysettings["EBUILD_PHASE"]
5805 rval = spawn(mycommand, mysettings, debug=debug,
5806 logfile=logfile, **kwargs)
5810 msg = _doebuild_exit_status_check(mydo, mysettings)
5812 if rval == os.EX_OK:
5814 from textwrap import wrap
5815 from portage.elog.messages import eerror
5816 for l in wrap(msg, 72):
5817 eerror(l, phase=mydo, key=mysettings.mycpv)
5821 _testing_eapis = frozenset(["3_pre1"])
5822 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
5824 def _eapi_is_deprecated(eapi):
5825 return eapi in _deprecated_eapis
5827 def eapi_is_supported(eapi):
5828 eapi = str(eapi).strip()
5830 if _eapi_is_deprecated(eapi):
5833 if eapi in _testing_eapis:
5842 return eapi <= portage.const.EAPI
5844 # Generally, it's best not to assume that cache entries for unsupported EAPIs
5845 # can be validated. However, the current package manager specification does not
5846 # guarantee that the EAPI can be parsed without sourcing the ebuild, so
5847 # it's too costly to discard existing cache entries for unsupported EAPIs.
5848 # Therefore, by default, assume that cache entries for unsupported EAPIs can be
5849 # validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
5850 # since the EAPI can be determined without the incurring the cost of sourcing
5852 _validate_cache_for_unsupported_eapis = True
5854 _parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
5855 _parse_eapi_ebuild_head_max_lines = 30
5857 def _parse_eapi_ebuild_head(f):
5860 m = _parse_eapi_ebuild_head_re.match(line)
5862 return m.group(1).strip()
5864 if count >= _parse_eapi_ebuild_head_max_lines:
5868 # True when FEATURES=parse-eapi-glep-55 is enabled.
5869 _glep_55_enabled = False
5871 _split_ebuild_name_glep55_re = re.compile(r'^(.*)\.ebuild(-([^.]+))?$')
5873 def _split_ebuild_name_glep55(name):
5875 @returns: (pkg-ver-rev, eapi)
5877 m = _split_ebuild_name_glep55_re.match(name)
5880 return (m.group(1), m.group(3))
5882 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
5884 ebuild_path = os.path.abspath(myebuild)
5885 pkg_dir = os.path.dirname(ebuild_path)
5887 if "CATEGORY" in mysettings.configdict["pkg"]:
5888 cat = mysettings.configdict["pkg"]["CATEGORY"]
5890 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
5893 if 'parse-eapi-glep-55' in mysettings.features:
5894 mypv, eapi = portage._split_ebuild_name_glep55(
5895 os.path.basename(myebuild))
5897 mypv = os.path.basename(ebuild_path)[:-7]
5899 mycpv = cat+"/"+mypv
5900 mysplit=pkgsplit(mypv,silent=0)
5902 raise portage.exception.IncorrectParameter(
5903 _("Invalid ebuild path: '%s'") % myebuild)
5905 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
5906 # so that the caller can override it.
5907 tmpdir = mysettings["PORTAGE_TMPDIR"]
5909 if mydo == 'depend':
5910 if mycpv != mysettings.mycpv:
5911 # Don't pass in mydbapi here since the resulting aux_get
5912 # call would lead to infinite 'depend' phase recursion.
5913 mysettings.setcpv(mycpv)
5915 # If IUSE isn't in configdict['pkg'], it means that setcpv()
5916 # hasn't been called with the mydb argument, so we have to
5917 # call it here (portage code always calls setcpv properly,
5918 # but api consumers might not).
5919 if mycpv != mysettings.mycpv or \
5920 'IUSE' not in mysettings.configdict['pkg']:
5921 # Reload env.d variables and reset any previous settings.
5924 mysettings.setcpv(mycpv, mydb=mydbapi)
5926 # config.reset() might have reverted a change made by the caller,
5927 # so restore it to it's original value.
5928 mysettings["PORTAGE_TMPDIR"] = tmpdir
5930 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
5931 mysettings["EBUILD_PHASE"] = mydo
5933 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
5935 # We are disabling user-specific bashrc files.
5936 mysettings["BASH_ENV"] = INVALID_ENV_FILE
5938 if debug: # Otherwise it overrides emerge's settings.
5939 # We have no other way to set debug... debug can't be passed in
5940 # due to how it's coded... Don't overwrite this so we can use it.
5941 mysettings["PORTAGE_DEBUG"] = "1"
5943 mysettings["ROOT"] = myroot
5944 mysettings["STARTDIR"] = getcwd()
5945 mysettings["EBUILD"] = ebuild_path
5946 mysettings["O"] = pkg_dir
5947 mysettings.configdict["pkg"]["CATEGORY"] = cat
5948 mysettings["FILESDIR"] = pkg_dir+"/files"
5949 mysettings["PF"] = mypv
5951 if hasattr(mydbapi, '_repo_info'):
5952 mytree = os.path.dirname(os.path.dirname(pkg_dir))
5953 repo_info = mydbapi._repo_info[mytree]
5954 mysettings['PORTDIR'] = repo_info.portdir
5955 mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
5957 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
5958 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
5959 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
5961 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
5962 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
5964 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
5965 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
5966 mysettings["PN"] = mysplit[0]
5967 mysettings["PV"] = mysplit[1]
5968 mysettings["PR"] = mysplit[2]
5970 if portage.util.noiselimit < 0:
5971 mysettings["PORTAGE_QUIET"] = "1"
5973 if mydo == 'depend' and \
5974 'EAPI' not in mysettings.configdict['pkg']:
5976 if eapi is not None:
5977 # From parse-eapi-glep-55 above.
5979 elif 'parse-eapi-ebuild-head' in mysettings.features:
5980 eapi = _parse_eapi_ebuild_head(
5981 codecs.open(_unicode_encode(ebuild_path,
5982 encoding=_encodings['fs'], errors='strict'),
5983 mode='r', encoding=_encodings['content'], errors='replace'))
5985 if eapi is not None:
5986 if not eapi_is_supported(eapi):
5987 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
5988 mysettings.configdict['pkg']['EAPI'] = eapi
5990 if mydo != "depend":
5991 # Metadata vars such as EAPI and RESTRICT are
5992 # set by the above config.setcpv() call.
5993 eapi = mysettings["EAPI"]
5994 if not eapi_is_supported(eapi):
5995 # can't do anything with this.
5996 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
5998 if mysplit[2] == "r0":
5999 mysettings["PVR"]=mysplit[1]
6001 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
6003 if "PATH" in mysettings:
6004 mysplit=mysettings["PATH"].split(":")
6007 # Note: PORTAGE_BIN_PATH may differ from the global constant
6008 # when portage is reinstalling itself.
6009 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6010 if portage_bin_path not in mysplit:
6011 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
6013 # Sandbox needs cannonical paths.
6014 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
6015 mysettings["PORTAGE_TMPDIR"])
6016 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
6017 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
6019 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
6020 # locations in order to prevent interference.
6021 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
6022 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6023 mysettings["PKG_TMPDIR"],
6024 mysettings["CATEGORY"], mysettings["PF"])
6026 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6027 mysettings["BUILD_PREFIX"],
6028 mysettings["CATEGORY"], mysettings["PF"])
6030 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
6031 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
6032 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
6033 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
6035 mysettings["PORTAGE_BASHRC"] = os.path.join(
6036 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
6037 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
6038 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
6040 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
6041 if eapi not in ('0', '1', '2'):
6042 # Discard KV for EAPIs that don't support it. Cache KV is restored
6043 # from the backupenv whenever config.reset() is called.
6044 mysettings.pop('KV', None)
6045 elif mydo != 'depend' and 'KV' not in mysettings and \
6046 mydo in ('compile', 'config', 'configure', 'info',
6047 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
6048 'prepare', 'prerm', 'setup', 'test', 'unpack'):
6049 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
6051 # Regular source tree
6052 mysettings["KV"]=mykv
6055 mysettings.backup_changes("KV")
6057 # Allow color.map to control colors associated with einfo, ewarn, etc...
6059 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
6060 mycolors.append("%s=$'%s'" % \
6061 (c, portage.output.style_to_ansi_code(c)))
6062 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
6064 def prepare_build_dirs(myroot, mysettings, cleanup):
6066 clean_dirs = [mysettings["HOME"]]
6068 # We enable cleanup when we want to make sure old cruft (such as the old
6069 # environment) doesn't interfere with the current phase.
6071 clean_dirs.append(mysettings["T"])
6073 for clean_dir in clean_dirs:
6075 shutil.rmtree(clean_dir)
6076 except OSError as oe:
6077 if errno.ENOENT == oe.errno:
6079 elif errno.EPERM == oe.errno:
6080 writemsg("%s\n" % oe, noiselevel=-1)
6081 writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
6082 clean_dir, noiselevel=-1)
6087 def makedirs(dir_path):
6089 os.makedirs(dir_path)
6090 except OSError as oe:
6091 if errno.EEXIST == oe.errno:
6093 elif errno.EPERM == oe.errno:
6094 writemsg("%s\n" % oe, noiselevel=-1)
6095 writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
6096 dir_path, noiselevel=-1)
6102 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
6104 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
6105 mydirs.append(os.path.dirname(mydirs[-1]))
6108 for mydir in mydirs:
6109 portage.util.ensure_dirs(mydir)
6110 portage.util.apply_secpass_permissions(mydir,
6111 gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
6112 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
6113 """These directories don't necessarily need to be group writable.
6114 However, the setup phase is commonly run as a privileged user prior
6115 to the other phases being run by an unprivileged user. Currently,
6116 we use the portage group to ensure that the unprivleged user still
6117 has write access to these directories in any case."""
6118 portage.util.ensure_dirs(mysettings[dir_key], mode=0o775)
6119 portage.util.apply_secpass_permissions(mysettings[dir_key],
6120 uid=portage_uid, gid=portage_gid)
6121 except portage.exception.PermissionDenied as e:
6122 writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
6124 except portage.exception.OperationNotPermitted as e:
6125 writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
6127 except portage.exception.FileNotFound as e:
6128 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
6131 _prepare_workdir(mysettings)
6132 if mysettings.get('EBUILD_PHASE') != 'fetch':
6133 # Avoid spurious permissions adjustments when fetching with
6134 # a temporary PORTAGE_TMPDIR setting (for fetchonly).
6135 _prepare_features_dirs(mysettings)
6137 def _adjust_perms_msg(settings, msg):
6140 writemsg(msg, noiselevel=-1)
6142 background = settings.get("PORTAGE_BACKGROUND") == "1"
6143 log_path = settings.get("PORTAGE_LOG_FILE")
6146 if background and log_path is not None:
6148 log_file = codecs.open(_unicode_encode(log_path,
6149 encoding=_encodings['fs'], errors='strict'),
6150 mode='a', encoding=_encodings['content'], errors='replace')
6156 log_file.write(_unicode_decode(msg))
6162 if log_file is not None:
6165 def _prepare_features_dirs(mysettings):
6169 "path_dir": "/usr/lib/ccache/bin",
6170 "basedir_var":"CCACHE_DIR",
6171 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
6172 "always_recurse":False},
6174 "path_dir": "/usr/lib/distcc/bin",
6175 "basedir_var":"DISTCC_DIR",
6176 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
6177 "subdirs":("lock", "state"),
6178 "always_recurse":True}
6183 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
6184 from portage.data import secpass
6185 droppriv = secpass >= 2 and \
6186 "userpriv" in mysettings.features and \
6187 "userpriv" not in restrict
6188 for myfeature, kwargs in features_dirs.items():
6189 if myfeature in mysettings.features:
6191 basedir = mysettings.get(kwargs["basedir_var"])
6192 if basedir is None or not basedir.strip():
6193 basedir = kwargs["default_dir"]
6194 mysettings[kwargs["basedir_var"]] = basedir
6196 path_dir = kwargs["path_dir"]
6197 if not os.path.isdir(path_dir):
6198 raise portage.exception.DirectoryNotFound(path_dir)
6200 mydirs = [mysettings[kwargs["basedir_var"]]]
6201 if "subdirs" in kwargs:
6202 for subdir in kwargs["subdirs"]:
6203 mydirs.append(os.path.join(basedir, subdir))
6204 for mydir in mydirs:
6205 modified = portage.util.ensure_dirs(mydir)
6206 # Generally, we only want to apply permissions for
6207 # initial creation. Otherwise, we don't know exactly what
6208 # permissions the user wants, so should leave them as-is.
6209 droppriv_fix = False
6212 if st.st_gid != portage_gid or \
6213 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
6215 if not droppriv_fix:
6216 # Check permissions of files in the directory.
6217 for filename in os.listdir(mydir):
6219 subdir_st = os.lstat(
6220 os.path.join(mydir, filename))
6223 if subdir_st.st_gid != portage_gid or \
6224 ((stat.S_ISDIR(subdir_st.st_mode) and \
6225 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
6230 _adjust_perms_msg(mysettings,
6231 colorize("WARN", " * ") + \
6232 _("Adjusting permissions "
6233 "for FEATURES=userpriv: '%s'\n") % mydir)
6235 _adjust_perms_msg(mysettings,
6236 colorize("WARN", " * ") + \
6237 _("Adjusting permissions "
6238 "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
6240 if modified or kwargs["always_recurse"] or droppriv_fix:
6242 raise # The feature is disabled if a single error
6243 # occurs during permissions adjustment.
6244 if not apply_recursive_permissions(mydir,
6245 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
6246 filemode=filemode, filemask=modemask, onerror=onerror):
6247 raise portage.exception.OperationNotPermitted(
6248 _("Failed to apply recursive permissions for the portage group."))
6250 except portage.exception.DirectoryNotFound as e:
6252 writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
6253 (e,), noiselevel=-1)
6254 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6257 except portage.exception.PortageException as e:
6259 writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
6260 writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
6261 (kwargs["basedir_var"], basedir), noiselevel=-1)
6262 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6266 mysettings.features.remove(myfeature)
6267 mysettings['FEATURES'] = ' '.join(sorted(mysettings.features))
6270 def _prepare_workdir(mysettings):
6271 workdir_mode = 0o700
6273 mode = mysettings["PORTAGE_WORKDIR_MODE"]
6275 parsed_mode = int(mode, 8)
6280 if parsed_mode & 0o7777 != parsed_mode:
6281 raise ValueError("Invalid file mode: %s" % mode)
6283 workdir_mode = parsed_mode
6284 except KeyError as e:
6285 writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
6286 except ValueError as e:
6288 writemsg("%s\n" % e)
6289 writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
6290 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
6291 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
6293 apply_secpass_permissions(mysettings["WORKDIR"],
6294 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
6295 except portage.exception.FileNotFound:
6296 pass # ebuild.sh will create it
6298 if mysettings.get("PORT_LOGDIR", "") == "":
6299 while "PORT_LOGDIR" in mysettings:
6300 del mysettings["PORT_LOGDIR"]
6301 if "PORT_LOGDIR" in mysettings:
6303 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
6305 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
6306 uid=portage_uid, gid=portage_gid, mode=0o2770)
6307 except portage.exception.PortageException as e:
6308 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6309 writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
6310 mysettings["PORT_LOGDIR"], noiselevel=-1)
6311 writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
6312 while "PORT_LOGDIR" in mysettings:
6313 del mysettings["PORT_LOGDIR"]
6314 if "PORT_LOGDIR" in mysettings and \
6315 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
6316 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
6317 if not os.path.exists(logid_path):
6318 open(_unicode_encode(logid_path), 'w')
6319 logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
6320 time.gmtime(os.stat(logid_path).st_mtime)),
6321 encoding=_encodings['content'], errors='replace')
6322 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6323 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
6324 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6325 del logid_path, logid_time
6327 # NOTE: When sesandbox is enabled, the local SELinux security policies
6328 # may not allow output to be piped out of the sesandbox domain. The
6329 # current policy will allow it to work when a pty is available, but
6330 # not through a normal pipe. See bug #162404.
6331 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6332 mysettings["T"], "build.log")
6334 def _doebuild_exit_status_check(mydo, settings):
6336 Returns an error string if the shell appeared
6337 to exit unsuccessfully, None otherwise.
6339 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
6340 if not exit_status_file or \
6341 os.path.exists(exit_status_file):
6343 msg = _("The ebuild phase '%s' has exited "
6344 "unexpectedly. This type of behavior "
6345 "is known to be triggered "
6346 "by things such as failed variable "
6347 "assignments (bug #190128) or bad substitution "
6348 "errors (bug #200313). Normally, before exiting, bash should "
6349 "have displayed an error message above. If bash did not "
6350 "produce an error message above, it's possible "
6351 "that the ebuild has called `exit` when it "
6352 "should have called `die` instead. This behavior may also "
6353 "be triggered by a corrupt bash binary or a hardware "
6354 "problem such as memory or cpu malfunction. If the problem is not "
6355 "reproducible or it appears to occur randomly, then it is likely "
6356 "to be triggered by a hardware problem. "
6357 "If you suspect a hardware problem then you should "
6358 "try some basic hardware diagnostics such as memtest. "
6359 "Please do not report this as a bug unless it is consistently "
6360 "reproducible and you are sure that your bash binary and hardware "
6361 "are functioning properly.") % mydo
6364 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
6365 msg = _doebuild_exit_status_check(mydo, settings)
6367 if retval == os.EX_OK:
6369 from textwrap import wrap
6370 from portage.elog.messages import eerror
6371 for l in wrap(msg, 72):
6372 eerror(l, phase=mydo, key=settings.mycpv)
6375 def _doebuild_exit_status_unlink(exit_status_file):
6377 Double check to make sure it really doesn't exist
6378 and raise an OSError if it still does (it shouldn't).
6379 OSError if necessary.
6381 if not exit_status_file:
6384 os.unlink(exit_status_file)
6387 if os.path.exists(exit_status_file):
6388 os.unlink(exit_status_file)
6390 _doebuild_manifest_exempt_depend = 0
6391 _doebuild_manifest_cache = None
6392 _doebuild_broken_ebuilds = set()
6393 _doebuild_broken_manifests = set()
6395 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
6396 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
6397 mydbapi=None, vartree=None, prev_mtimes=None,
6398 fd_pipes=None, returnpid=False):
6401 Wrapper function that invokes specific ebuild phases through the spawning
6404 @param myebuild: name of the ebuild to invoke the phase on (CPV)
6405 @type myebuild: String
6406 @param mydo: Phase to run
6408 @param myroot: $ROOT (usually '/', see man make.conf)
6409 @type myroot: String
6410 @param mysettings: Portage Configuration
6411 @type mysettings: instance of portage.config
6412 @param debug: Turns on various debug information (eg, debug for spawn)
6413 @type debug: Boolean
6414 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
6415 @type listonly: Boolean
6416 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
6417 @type fetchonly: Boolean
6418 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
6419 @type cleanup: Boolean
6420 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
6421 @type dbkey: Dict or String
6422 @param use_cache: Enables the cache
6423 @type use_cache: Boolean
6424 @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
6425 @type fetchall: Boolean
6426 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
6428 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
6429 @type mydbapi: portdbapi instance
6430 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
6431 @type vartree: vartree instance
6432 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
6433 @type prev_mtimes: dictionary
6434 @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
6436 @type fd_pipes: Dictionary
6437 @param returnpid: Return a list of process IDs for a successful spawn, or
6438 an integer value if spawn is unsuccessful. NOTE: This requires the
6439 caller clean up all returned PIDs.
6440 @type returnpid: Boolean
6446 Most errors have an accompanying error message.
6448 listonly and fetchonly are only really necessary for operations involving 'fetch'
6449 prev_mtimes are only necessary for merge operations.
6450 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
6455 writemsg("Warning: tree not specified to doebuild\n")
6459 # chunked out deps for each phase, so that ebuild binary can use it
6460 # to collapse targets down.
6463 "unpack": ["setup"],
6464 "prepare": ["unpack"],
6465 "configure": ["prepare"],
6466 "compile":["configure"],
6467 "test": ["compile"],
6470 "package":["install"],
6474 mydbapi = db[myroot][tree].dbapi
6476 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
6477 vartree = db[myroot]["vartree"]
6479 features = mysettings.features
6480 noauto = "noauto" in features
6481 from portage.data import secpass
6483 clean_phases = ("clean", "cleanrm")
6484 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
6485 "config", "info", "setup", "depend", "pretend",
6486 "fetch", "fetchall", "digest",
6487 "unpack", "prepare", "configure", "compile", "test",
6488 "install", "rpm", "qmerge", "merge",
6489 "package","unmerge", "manifest"]
6491 if mydo not in validcommands:
6492 validcommands.sort()
6493 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
6495 for vcount in range(len(validcommands)):
6497 writemsg("\n!!! ", noiselevel=-1)
6498 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
6499 writemsg("\n", noiselevel=-1)
6502 if mydo == "fetchall":
6506 parallel_fetchonly = mydo in ("fetch", "fetchall") and \
6507 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
6509 if mydo not in clean_phases and not os.path.exists(myebuild):
6510 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
6514 global _doebuild_manifest_exempt_depend
6516 if "strict" in features and \
6517 "digest" not in features and \
6518 tree == "porttree" and \
6519 mydo not in ("digest", "manifest", "help") and \
6520 not _doebuild_manifest_exempt_depend:
6521 # Always verify the ebuild checksums before executing it.
6522 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
6523 _doebuild_broken_ebuilds
6525 if myebuild in _doebuild_broken_ebuilds:
6528 pkgdir = os.path.dirname(myebuild)
6529 manifest_path = os.path.join(pkgdir, "Manifest")
6531 # Avoid checking the same Manifest several times in a row during a
6532 # regen with an empty cache.
6533 if _doebuild_manifest_cache is None or \
6534 _doebuild_manifest_cache.getFullname() != manifest_path:
6535 _doebuild_manifest_cache = None
6536 if not os.path.exists(manifest_path):
6537 out = portage.output.EOutput()
6538 out.eerror(_("Manifest not found for '%s'") % (myebuild,))
6539 _doebuild_broken_ebuilds.add(myebuild)
6541 mf = Manifest(pkgdir, mysettings["DISTDIR"])
6544 mf = _doebuild_manifest_cache
6547 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
6549 out = portage.output.EOutput()
6550 out.eerror(_("Missing digest for '%s'") % (myebuild,))
6551 _doebuild_broken_ebuilds.add(myebuild)
6553 except portage.exception.FileNotFound:
6554 out = portage.output.EOutput()
6555 out.eerror(_("A file listed in the Manifest "
6556 "could not be found: '%s'") % (myebuild,))
6557 _doebuild_broken_ebuilds.add(myebuild)
6559 except portage.exception.DigestException as e:
6560 out = portage.output.EOutput()
6561 out.eerror(_("Digest verification failed:"))
6562 out.eerror("%s" % e.value[0])
6563 out.eerror(_("Reason: %s") % e.value[1])
6564 out.eerror(_("Got: %s") % e.value[2])
6565 out.eerror(_("Expected: %s") % e.value[3])
6566 _doebuild_broken_ebuilds.add(myebuild)
6569 if mf.getFullname() in _doebuild_broken_manifests:
6572 if mf is not _doebuild_manifest_cache:
6574 # Make sure that all of the ebuilds are
6575 # actually listed in the Manifest.
6576 glep55 = 'parse-eapi-glep-55' in mysettings.features
6577 for f in os.listdir(pkgdir):
6580 pf, eapi = _split_ebuild_name_glep55(f)
6581 elif f[-7:] == '.ebuild':
6583 if pf is not None and not mf.hasFile("EBUILD", f):
6584 f = os.path.join(pkgdir, f)
6585 if f not in _doebuild_broken_ebuilds:
6586 out = portage.output.EOutput()
6587 out.eerror(_("A file is not listed in the "
6588 "Manifest: '%s'") % (f,))
6589 _doebuild_broken_manifests.add(manifest_path)
6592 # Only cache it if the above stray files test succeeds.
6593 _doebuild_manifest_cache = mf
6595 def exit_status_check(retval):
6596 msg = _doebuild_exit_status_check(mydo, mysettings)
6598 if retval == os.EX_OK:
6600 from textwrap import wrap
6601 from portage.elog.messages import eerror
6602 for l in wrap(msg, 72):
6603 eerror(l, phase=mydo, key=mysettings.mycpv)
6606 # Note: PORTAGE_BIN_PATH may differ from the global
6607 # constant when portage is reinstalling itself.
6608 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6609 ebuild_sh_binary = os.path.join(portage_bin_path,
6610 os.path.basename(EBUILD_SH_BINARY))
6611 misc_sh_binary = os.path.join(portage_bin_path,
6612 os.path.basename(MISC_SH_BINARY))
6615 builddir_lock = None
6620 if mydo in ("digest", "manifest", "help"):
6621 # Temporarily exempt the depend phase from manifest checks, in case
6622 # aux_get calls trigger cache generation.
6623 _doebuild_manifest_exempt_depend += 1
6625 # If we don't need much space and we don't need a constant location,
6626 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
6627 # so that there's no need for locking and it can be used even if the
6628 # user isn't in the portage group.
6629 if mydo in ("info",):
6630 from tempfile import mkdtemp
6632 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
6633 mysettings["PORTAGE_TMPDIR"] = tmpdir
6635 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
6638 if mydo in clean_phases:
6639 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
6640 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
6641 logfile=None, returnpid=returnpid)
6644 restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
6645 # get possible slot information from the deps file
6646 if mydo == "depend":
6647 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
6648 droppriv = "userpriv" in mysettings.features
6650 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6651 mysettings, fd_pipes=fd_pipes, returnpid=True,
6654 elif isinstance(dbkey, dict):
6655 mysettings["dbkey"] = ""
6658 0:sys.stdin.fileno(),
6659 1:sys.stdout.fileno(),
6660 2:sys.stderr.fileno(),
6662 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6664 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
6665 os.close(pw) # belongs exclusively to the child process now
6666 f = os.fdopen(pr, 'rb')
6667 for k, v in zip(auxdbkeys,
6668 (_unicode_decode(line).rstrip('\n') for line in f)):
6671 retval = os.waitpid(mypids[0], 0)[1]
6672 portage.process.spawned_pids.remove(mypids[0])
6673 # If it got a signal, return the signal that was sent, but
6674 # shift in order to distinguish it from a return value. (just
6675 # like portage.process.spawn() would do).
6677 retval = (retval & 0xff) << 8
6679 # Otherwise, return its exit code.
6680 retval = retval >> 8
6681 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
6682 # Don't trust bash's returncode if the
6683 # number of lines is incorrect.
6687 mysettings["dbkey"] = dbkey
6689 mysettings["dbkey"] = \
6690 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
6692 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
6696 # Validate dependency metadata here to ensure that ebuilds with invalid
6697 # data are never installed via the ebuild command. Don't bother when
6698 # returnpid == True since there's no need to do this every time emerge
6701 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
6702 if rval != os.EX_OK:
6705 if "PORTAGE_TMPDIR" not in mysettings or \
6706 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
6707 writemsg(_("The directory specified in your "
6708 "PORTAGE_TMPDIR variable, '%s',\n"
6709 "does not exist. Please create this directory or "
6710 "correct your PORTAGE_TMPDIR setting.\n") % mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
6713 # as some people use a separate PORTAGE_TMPDIR mount
6714 # we prefer that as the checks below would otherwise be pointless
6716 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
6717 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
6719 checkdir = mysettings["PORTAGE_TMPDIR"]
6721 if not os.access(checkdir, os.W_OK):
6722 writemsg(_("%s is not writable.\n"
6723 "Likely cause is that you've mounted it as readonly.\n") % checkdir,
6727 from tempfile import NamedTemporaryFile
6728 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
6729 os.chmod(fd.name, 0o755)
6730 if not os.access(fd.name, os.X_OK):
6731 writemsg(_("Can not execute files in %s\n"
6732 "Likely cause is that you've mounted it with one of the\n"
6733 "following mount options: 'noexec', 'user', 'users'\n\n"
6734 "Please make sure that portage can execute files in this directory.\n") % checkdir,
6741 if mydo == "unmerge":
6742 return unmerge(mysettings["CATEGORY"],
6743 mysettings["PF"], myroot, mysettings, vartree=vartree)
6745 # Build directory creation isn't required for any of these.
6746 # In the fetch phase, the directory is needed only for RESTRICT=fetch
6747 # in order to satisfy the sane $PWD requirement (from bug #239560)
6748 # when pkg_nofetch is spawned.
6749 have_build_dirs = False
6750 if not parallel_fetchonly and \
6751 mydo not in ('digest', 'help', 'manifest') and \
6752 not (mydo == 'fetch' and 'fetch' not in restrict):
6753 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
6756 have_build_dirs = True
6758 # emerge handles logging externally
6760 # PORTAGE_LOG_FILE is set by the
6761 # above prepare_build_dirs() call.
6762 logfile = mysettings.get("PORTAGE_LOG_FILE")
6765 env_file = os.path.join(mysettings["T"], "environment")
6769 env_stat = os.stat(env_file)
6770 except OSError as e:
6771 if e.errno != errno.ENOENT:
6775 saved_env = os.path.join(
6776 os.path.dirname(myebuild), "environment.bz2")
6777 if not os.path.isfile(saved_env):
6781 "bzip2 -dc %s > %s" % \
6782 (_shell_quote(saved_env),
6783 _shell_quote(env_file)))
6785 env_stat = os.stat(env_file)
6786 except OSError as e:
6787 if e.errno != errno.ENOENT:
6790 if os.WIFEXITED(retval) and \
6791 os.WEXITSTATUS(retval) == os.EX_OK and \
6792 env_stat and env_stat.st_size > 0:
6793 # This is a signal to ebuild.sh, so that it knows to filter
6794 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
6795 # would be preserved between normal phases.
6796 open(_unicode_encode(env_file + '.raw'), 'w')
6798 writemsg(_("!!! Error extracting saved "
6799 "environment: '%s'\n") % \
6800 saved_env, noiselevel=-1)
6803 except OSError as e:
6804 if e.errno != errno.ENOENT:
6811 for var in ("ARCH", ):
6812 value = mysettings.get(var)
6813 if value and value.strip():
6815 msg = _("%(var)s is not set... "
6816 "Are you missing the '%(configroot)setc/make.profile' symlink? "
6817 "Is the symlink correct? "
6818 "Is your portage tree complete?") % \
6819 {"var": var, "configroot": mysettings["PORTAGE_CONFIGROOT"]}
6820 from portage.elog.messages import eerror
6821 from textwrap import wrap
6822 for line in wrap(msg, 70):
6823 eerror(line, phase="setup", key=mysettings.mycpv)
6824 from portage.elog import elog_process
6825 elog_process(mysettings.mycpv, mysettings)
6827 del env_file, env_stat, saved_env
6828 _doebuild_exit_status_unlink(
6829 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6831 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
6833 # if any of these are being called, handle them -- running them out of
6834 # the sandbox -- and stop now.
6836 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
6837 mysettings, debug=debug, free=1, logfile=logfile)
6838 elif mydo == "setup":
6840 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
6841 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
6842 returnpid=returnpid)
6845 retval = exit_status_check(retval)
6847 """ Privileged phases may have left files that need to be made
6848 writable to a less privileged user."""
6849 apply_recursive_permissions(mysettings["T"],
6850 uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
6851 filemode=0o60, filemask=0)
6853 elif mydo == "preinst":
6854 phase_retval = spawn(
6855 _shell_quote(ebuild_sh_binary) + " " + mydo,
6856 mysettings, debug=debug, free=1, logfile=logfile,
6857 fd_pipes=fd_pipes, returnpid=returnpid)
6862 phase_retval = exit_status_check(phase_retval)
6863 if phase_retval == os.EX_OK:
6864 _doebuild_exit_status_unlink(
6865 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6866 mysettings.pop("EBUILD_PHASE", None)
6867 phase_retval = spawn(
6868 " ".join(_post_pkg_preinst_cmd(mysettings)),
6869 mysettings, debug=debug, free=1, logfile=logfile)
6870 phase_retval = exit_status_check(phase_retval)
6871 if phase_retval != os.EX_OK:
6872 writemsg(_("!!! post preinst failed; exiting.\n"),
6875 elif mydo == "postinst":
6876 phase_retval = spawn(
6877 _shell_quote(ebuild_sh_binary) + " " + mydo,
6878 mysettings, debug=debug, free=1, logfile=logfile,
6879 fd_pipes=fd_pipes, returnpid=returnpid)
6884 phase_retval = exit_status_check(phase_retval)
6885 if phase_retval == os.EX_OK:
6886 _doebuild_exit_status_unlink(
6887 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6888 mysettings.pop("EBUILD_PHASE", None)
6889 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
6890 mysettings, debug=debug, free=1, logfile=logfile)
6891 phase_retval = exit_status_check(phase_retval)
6892 if phase_retval != os.EX_OK:
6893 writemsg(_("!!! post postinst failed; exiting.\n"),
6896 elif mydo in ("prerm", "postrm", "config", "info"):
6898 _shell_quote(ebuild_sh_binary) + " " + mydo,
6899 mysettings, debug=debug, free=1, logfile=logfile,
6900 fd_pipes=fd_pipes, returnpid=returnpid)
6905 retval = exit_status_check(retval)
6908 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
6910 emerge_skip_distfiles = returnpid
6911 emerge_skip_digest = returnpid
6912 # Only try and fetch the files if we are going to need them ...
6913 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
6914 # unpack compile install`, we will try and fetch 4 times :/
6915 need_distfiles = not emerge_skip_distfiles and \
6916 (mydo in ("fetch", "unpack") or \
6917 mydo not in ("digest", "manifest") and "noauto" not in features)
6918 alist = mysettings.configdict["pkg"].get("A")
6919 aalist = mysettings.configdict["pkg"].get("AA")
6920 if need_distfiles or alist is None or aalist is None:
6921 # Make sure we get the correct tree in case there are overlays.
6922 mytree = os.path.realpath(
6923 os.path.dirname(os.path.dirname(mysettings["O"])))
6924 useflags = mysettings["PORTAGE_USE"].split()
6926 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
6928 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
6929 except portage.exception.InvalidDependString as e:
6930 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6931 writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
6935 mysettings.configdict["pkg"]["A"] = " ".join(alist)
6936 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
6938 alist = set(alist.split())
6939 aalist = set(aalist.split())
6940 if ("mirror" in features) or fetchall:
6948 # Files are already checked inside fetch(),
6949 # so do not check them again.
6952 if not emerge_skip_distfiles and \
6953 need_distfiles and not fetch(
6954 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
6957 if mydo == "fetch" and listonly:
6961 if mydo == "manifest":
6962 return not digestgen(aalist, mysettings, overwrite=1,
6963 manifestonly=1, myportdb=mydbapi)
6964 elif mydo == "digest":
6965 return not digestgen(aalist, mysettings, overwrite=1,
6967 elif mydo != 'fetch' and not emerge_skip_digest and \
6968 "digest" in mysettings.features:
6969 # Don't do this when called by emerge or when called just
6970 # for fetch (especially parallel-fetch) since it's not needed
6971 # and it can interfere with parallel tasks.
6972 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
6973 except portage.exception.PermissionDenied as e:
6974 writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
6975 if mydo in ("digest", "manifest"):
6978 # See above comment about fetching only when needed
6979 if not emerge_skip_distfiles and \
6980 not digestcheck(checkme, mysettings, "strict" in features):
6986 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
6987 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
6988 orig_distdir = mysettings["DISTDIR"]
6989 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
6990 edpath = mysettings["DISTDIR"] = \
6991 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
6992 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
6994 # Remove any unexpected files or directories.
6995 for x in os.listdir(edpath):
6996 symlink_path = os.path.join(edpath, x)
6997 st = os.lstat(symlink_path)
6998 if x in alist and stat.S_ISLNK(st.st_mode):
7000 if stat.S_ISDIR(st.st_mode):
7001 shutil.rmtree(symlink_path)
7003 os.unlink(symlink_path)
7005 # Check for existing symlinks and recreate if necessary.
7007 symlink_path = os.path.join(edpath, x)
7008 target = os.path.join(orig_distdir, x)
7010 link_target = os.readlink(symlink_path)
7012 os.symlink(target, symlink_path)
7014 if link_target != target:
7015 os.unlink(symlink_path)
7016 os.symlink(target, symlink_path)
7018 #initial dep checks complete; time to process main commands
7020 restrict = mysettings["PORTAGE_RESTRICT"].split()
7021 nosandbox = (("userpriv" in features) and \
7022 ("usersandbox" not in features) and \
7023 "userpriv" not in restrict and \
7024 "nouserpriv" not in restrict)
7025 if nosandbox and ("userpriv" not in features or \
7026 "userpriv" in restrict or \
7027 "nouserpriv" in restrict):
7028 nosandbox = ("sandbox" not in features and \
7029 "usersandbox" not in features)
7031 sesandbox = mysettings.selinux_enabled() and \
7032 "sesandbox" in mysettings.features
7034 droppriv = "userpriv" in mysettings.features and \
7035 "userpriv" not in restrict and \
7038 fakeroot = "fakeroot" in mysettings.features
7040 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
7041 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
7043 # args are for the to spawn function
7045 "pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
7046 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
7047 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
7048 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
7049 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7050 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7051 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7052 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
7053 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
7054 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
7057 # merge the deps in so we have again a 'full' actionmap
7058 # be glad when this can die.
7060 if len(actionmap_deps.get(x, [])):
7061 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
7063 if mydo in actionmap:
7064 if mydo == "package":
7065 # Make sure the package directory exists before executing
7066 # this phase. This can raise PermissionDenied if
7067 # the current user doesn't have write access to $PKGDIR.
7068 parent_dir = os.path.join(mysettings["PKGDIR"],
7069 mysettings["CATEGORY"])
7070 portage.util.ensure_dirs(parent_dir)
7071 if not os.access(parent_dir, os.W_OK):
7072 raise portage.exception.PermissionDenied(
7073 "access('%s', os.W_OK)" % parent_dir)
7074 retval = spawnebuild(mydo,
7075 actionmap, mysettings, debug, logfile=logfile,
7076 fd_pipes=fd_pipes, returnpid=returnpid)
7077 elif mydo=="qmerge":
7078 # check to ensure install was run. this *only* pops up when users
7079 # forget it and are using ebuild
7080 if not os.path.exists(
7081 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
7082 writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
7085 # qmerge is a special phase that implies noclean.
7086 if "noclean" not in mysettings.features:
7087 mysettings.features.add("noclean")
7088 #qmerge is specifically not supposed to do a runtime dep check
7090 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
7091 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
7092 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
7093 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
7095 retval = spawnebuild("install", actionmap, mysettings, debug,
7096 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
7097 returnpid=returnpid)
7098 retval = exit_status_check(retval)
7099 if retval != os.EX_OK:
7100 # The merge phase handles this already. Callers don't know how
7101 # far this function got, so we have to call elog_process() here
7102 # so that it's only called once.
7103 from portage.elog import elog_process
7104 elog_process(mysettings.mycpv, mysettings)
7105 if retval == os.EX_OK:
7106 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
7107 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
7108 "build-info"), myroot, mysettings,
7109 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
7110 vartree=vartree, prev_mtimes=prev_mtimes)
7112 print(_("!!! Unknown mydo: %s") % mydo)
7120 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
7121 shutil.rmtree(tmpdir)
7123 portage.locks.unlockdir(builddir_lock)
7125 # Make sure that DISTDIR is restored to it's normal value before we return!
7126 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
7127 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
7128 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
7132 if os.stat(logfile).st_size == 0:
7137 if mydo in ("digest", "manifest", "help"):
7138 # If necessary, depend phase has been triggered by aux_get calls
7139 # and the exemption is no longer needed.
7140 _doebuild_manifest_exempt_depend -= 1
7142 def _validate_deps(mysettings, myroot, mydo, mydbapi):
7144 invalid_dep_exempt_phases = \
7145 set(["clean", "cleanrm", "help", "prerm", "postrm"])
7146 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7147 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
7148 other_keys = ["SLOT"]
7149 all_keys = dep_keys + misc_keys + other_keys
7150 metadata = dict(zip(all_keys,
7151 mydbapi.aux_get(mysettings.mycpv, all_keys)))
7153 class FakeTree(object):
7154 def __init__(self, mydb):
7156 dep_check_trees = {myroot:{}}
7157 dep_check_trees[myroot]["porttree"] = \
7158 FakeTree(fakedbapi(settings=mysettings))
7161 for dep_type in dep_keys:
7162 mycheck = dep_check(metadata[dep_type], None, mysettings,
7163 myuse="all", myroot=myroot, trees=dep_check_trees)
7165 msgs.append(" %s: %s\n %s\n" % (
7166 dep_type, metadata[dep_type], mycheck[1]))
7170 portage.dep.use_reduce(
7171 portage.dep.paren_reduce(metadata[k]), matchall=True)
7172 except portage.exception.InvalidDependString as e:
7173 msgs.append(" %s: %s\n %s\n" % (
7174 k, metadata[k], str(e)))
7176 if not metadata["SLOT"]:
7177 msgs.append(_(" SLOT is undefined\n"))
7180 portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
7181 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
7183 portage.util.writemsg_level(x,
7184 level=logging.ERROR, noiselevel=-1)
7185 if mydo not in invalid_dep_exempt_phases:
7192 def _movefile(src, dest, **kwargs):
7193 """Calls movefile and raises a PortageException if an error occurs."""
7194 if movefile(src, dest, **kwargs) is None:
7195 raise portage.exception.PortageException(
7196 "mv '%s' '%s'" % (src, dest))
7198 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
7199 hardlink_candidates=None, encoding=_encodings['fs']):
7200 """moves a file from src to dest, preserving all permissions and attributes; mtime will
7201 be preserved even when moving across filesystems. Returns true on success and false on
7202 failure. Move is atomic."""
7203 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
7205 if mysettings is None:
7207 mysettings = settings
7209 selinux_enabled = mysettings.selinux_enabled()
7211 selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
7213 lchown = _unicode_func_wrapper(data.lchown, encoding=encoding)
7214 os = _unicode_module_wrapper(_os,
7215 encoding=encoding, overrides=_os_overrides)
7216 shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
7222 except SystemExit as e:
7224 except Exception as e:
7225 print(_("!!! Stating source file failed... movefile()"))
7231 dstat=os.lstat(dest)
7232 except (OSError, IOError):
7233 dstat=os.lstat(os.path.dirname(dest))
7237 if destexists and dstat.st_flags != 0:
7238 bsd_chflags.lchflags(dest, 0)
7239 # Use normal stat/chflags for the parent since we want to
7240 # follow any symlinks to the real parent directory.
7241 pflags = os.stat(os.path.dirname(dest)).st_flags
7243 bsd_chflags.chflags(os.path.dirname(dest), 0)
7246 if stat.S_ISLNK(dstat[stat.ST_MODE]):
7250 except SystemExit as e:
7252 except Exception as e:
7255 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7257 target=os.readlink(src)
7258 if mysettings and mysettings["D"]:
7259 if target.find(mysettings["D"])==0:
7260 target=target[len(mysettings["D"]):]
7261 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
7264 selinux.symlink(target, dest, src)
7266 os.symlink(target,dest)
7267 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7268 # utime() only works on the target of a symlink, so it's not
7269 # possible to perserve mtime on symlinks.
7270 return os.lstat(dest)[stat.ST_MTIME]
7271 except SystemExit as e:
7273 except Exception as e:
7274 print(_("!!! failed to properly create symlink:"))
7275 print("!!!",dest,"->",target)
7280 # Since identical files might be merged to multiple filesystems,
7281 # so os.link() calls might fail for some paths, so try them all.
7282 # For atomic replacement, first create the link as a temp file
7283 # and them use os.rename() to replace the destination.
7284 if hardlink_candidates:
7285 head, tail = os.path.split(dest)
7286 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
7287 (tail, os.getpid()))
7289 os.unlink(hardlink_tmp)
7290 except OSError as e:
7291 if e.errno != errno.ENOENT:
7292 writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
7293 (hardlink_tmp,), noiselevel=-1)
7294 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7297 for hardlink_src in hardlink_candidates:
7299 os.link(hardlink_src, hardlink_tmp)
7304 os.rename(hardlink_tmp, dest)
7305 except OSError as e:
7306 writemsg(_("!!! Failed to rename %s to %s\n") % \
7307 (hardlink_tmp, dest), noiselevel=-1)
7308 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7315 renamefailed = False
7316 if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
7319 ret = selinux.rename(src, dest)
7321 ret=os.rename(src,dest)
7323 except SystemExit as e:
7325 except Exception as e:
7326 if e[0]!=errno.EXDEV:
7327 # Some random error.
7328 print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
7331 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
7334 if stat.S_ISREG(sstat[stat.ST_MODE]):
7335 try: # For safety copy then move it over.
7337 selinux.copyfile(src, dest + "#new")
7338 selinux.rename(dest + "#new", dest)
7340 shutil.copyfile(src,dest+"#new")
7341 os.rename(dest+"#new",dest)
7343 except SystemExit as e:
7345 except Exception as e:
7346 print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
7350 #we don't yet handle special, so we need to fall back to /bin/mv
7351 a = process.spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
7353 writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
7354 writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
7355 {"src": _unicode_decode(src, encoding=encoding),
7356 "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
7357 writemsg("!!! %s\n" % a, noiselevel=-1)
7358 return None # failure
7361 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7362 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7364 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7365 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
7367 except SystemExit as e:
7369 except Exception as e:
7370 print(_("!!! Failed to chown/chmod/unlink in movefile()"))
7377 newmtime = long(os.stat(dest).st_mtime)
7379 if newmtime is not None:
7380 os.utime(dest, (newmtime, newmtime))
7382 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
7383 newmtime = long(sstat.st_mtime)
7385 # The utime can fail here with EPERM even though the move succeeded.
7386 # Instead of failing, use stat to return the mtime if possible.
7388 newmtime = long(os.stat(dest).st_mtime)
7389 except OSError as e:
7390 writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
7391 writemsg("!!! %s\n" % dest, noiselevel=-1)
7392 writemsg("!!! %s\n" % str(e), noiselevel=-1)
7396 # Restore the flags we saved before moving
7398 bsd_chflags.chflags(os.path.dirname(dest), pflags)
7402 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
7403 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
7405 if not os.access(myroot, os.W_OK):
7406 writemsg(_("Permission denied: access('%s', W_OK)\n") % myroot,
7409 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
7410 vartree=vartree, blockers=blockers, scheduler=scheduler)
7411 return mylink.merge(pkgloc, infloc, myroot, myebuild,
7412 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7414 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
7415 ldpath_mtimes=None, scheduler=None):
7416 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
7417 vartree=vartree, scheduler=scheduler)
7418 vartree = mylink.vartree
7422 vartree.dbapi.plib_registry.load()
7423 vartree.dbapi.plib_registry.pruneNonExisting()
7424 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
7425 ldpath_mtimes=ldpath_mtimes)
7426 if retval == os.EX_OK:
7431 vartree.dbapi.linkmap._clear_cache()
7434 def dep_virtual(mysplit, mysettings):
7435 "Does virtual dependency conversion"
7437 myvirtuals = mysettings.getvirtuals()
7439 if isinstance(x, list):
7440 newsplit.append(dep_virtual(x, mysettings))
7443 mychoices = myvirtuals.get(mykey, None)
7445 if len(mychoices) == 1:
7446 a = x.replace(mykey, dep_getkey(mychoices[0]), 1)
7449 # blocker needs "and" not "or(||)".
7454 a.append(x.replace(mykey, dep_getkey(y), 1))
7460 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
7461 trees=None, use_mask=None, use_force=None, **kwargs):
7462 """Recursively expand new-style virtuals so as to collapse one or more
7463 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
7464 zero cost regardless of whether or not they are currently installed. Virtual
7465 blockers are supported but only when the virtual expands to a single
7466 atom because it wouldn't necessarily make sense to block all the components
7467 of a compound virtual. When more than one new-style virtual is matched,
7468 the matches are sorted from highest to lowest versions and the atom is
7469 expanded to || ( highest match ... lowest match )."""
7471 mytrees = trees[myroot]
7472 portdb = mytrees["porttree"].dbapi
7473 atom_graph = mytrees.get("atom_graph")
7474 parent = mytrees.get("parent")
7475 virt_parent = mytrees.get("virt_parent")
7478 if parent is not None:
7479 if virt_parent is not None:
7480 graph_parent = virt_parent
7481 eapi = virt_parent[0].metadata['EAPI']
7483 graph_parent = parent
7484 eapi = parent.metadata["EAPI"]
7485 repoman = not mysettings.local_config
7486 if kwargs["use_binaries"]:
7487 portdb = trees[myroot]["bintree"].dbapi
7488 myvirtuals = mysettings.getvirtuals()
7489 pprovideddict = mysettings.pprovideddict
7490 myuse = kwargs["myuse"]
7495 elif isinstance(x, list):
7496 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
7497 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
7498 use_force=use_force, **kwargs))
7501 if not isinstance(x, portage.dep.Atom):
7503 x = portage.dep.Atom(x)
7504 except portage.exception.InvalidAtom:
7505 if portage.dep._dep_check_strict:
7506 raise portage.exception.ParseError(
7507 _("invalid atom: '%s'") % x)
7509 # Only real Atom instances are allowed past this point.
7512 if x.blocker and x.blocker.overlap.forbid and \
7513 eapi in ("0", "1") and portage.dep._dep_check_strict:
7514 raise portage.exception.ParseError(
7515 _("invalid atom: '%s'") % (x,))
7516 if x.use and eapi in ("0", "1") and \
7517 portage.dep._dep_check_strict:
7518 raise portage.exception.ParseError(
7519 _("invalid atom: '%s'") % (x,))
7521 if repoman and x.use and x.use.conditional:
7522 evaluated_atom = portage.dep.remove_slot(x)
7524 evaluated_atom += ":%s" % x.slot
7525 evaluated_atom += str(x.use._eval_qa_conditionals(
7526 use_mask, use_force))
7527 x = portage.dep.Atom(evaluated_atom)
7529 if not repoman and \
7530 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
7531 if x.use.conditional:
7532 evaluated_atom = portage.dep.remove_slot(x)
7534 evaluated_atom += ":%s" % x.slot
7535 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
7536 x = portage.dep.Atom(evaluated_atom)
7539 if not mykey.startswith("virtual/"):
7541 if atom_graph is not None:
7542 atom_graph.add(x, graph_parent)
7544 mychoices = myvirtuals.get(mykey, [])
7546 # Virtual blockers are no longer expanded here since
7547 # the un-expanded virtual atom is more useful for
7548 # maintaining a cache of blocker atoms.
7550 if atom_graph is not None:
7551 atom_graph.add(x, graph_parent)
7554 if repoman or not hasattr(portdb, 'match_pkgs'):
7555 if portdb.cp_list(x.cp):
7558 # TODO: Add PROVIDE check for repoman.
7561 a.append(dep.Atom(x.replace(x.cp, y.cp, 1)))
7565 newsplit.append(a[0])
7567 newsplit.append(['||'] + a)
7571 # Ignore USE deps here, since otherwise we might not
7572 # get any matches. Choices with correct USE settings
7573 # will be preferred in dep_zapdeps().
7574 matches = portdb.match_pkgs(x.without_use)
7575 # Use descending order to prefer higher versions.
7578 # only use new-style matches
7579 if pkg.cp.startswith("virtual/"):
7581 if not (pkgs or mychoices):
7582 # This one couldn't be expanded as a new-style virtual. Old-style
7583 # virtuals have already been expanded by dep_virtual, so this one
7584 # is unavailable and dep_zapdeps will identify it as such. The
7585 # atom is not eliminated here since it may still represent a
7586 # dependency that needs to be satisfied.
7588 if atom_graph is not None:
7589 atom_graph.add(x, graph_parent)
7594 virt_atom = '=' + pkg.cpv
7596 virt_atom += str(x.use)
7597 virt_atom = dep.Atom(virt_atom)
7598 # According to GLEP 37, RDEPEND is the only dependency
7599 # type that is valid for new-style virtuals. Repoman
7600 # should enforce this.
7601 depstring = pkg.metadata['RDEPEND']
7602 pkg_kwargs = kwargs.copy()
7603 pkg_kwargs["myuse"] = pkg.use.enabled
7605 util.writemsg_level(_("Virtual Parent: %s\n") \
7606 % (pkg,), noiselevel=-1, level=logging.DEBUG)
7607 util.writemsg_level(_("Virtual Depstring: %s\n") \
7608 % (depstring,), noiselevel=-1, level=logging.DEBUG)
7610 # Set EAPI used for validation in dep_check() recursion.
7611 mytrees["virt_parent"] = (pkg, virt_atom)
7614 mycheck = dep_check(depstring, mydbapi, mysettings,
7615 myroot=myroot, trees=trees, **pkg_kwargs)
7617 # Restore previous EAPI after recursion.
7618 if virt_parent is not None:
7619 mytrees["virt_parent"] = virt_parent
7621 del mytrees["virt_parent"]
7624 raise portage.exception.ParseError(
7625 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
7627 # pull in the new-style virtual
7628 mycheck[1].append(virt_atom)
7629 a.append(mycheck[1])
7630 if atom_graph is not None:
7631 atom_graph.add(virt_atom, graph_parent)
7632 # Plain old-style virtuals. New-style virtuals are preferred.
7635 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7636 matches = portdb.match(new_atom)
7637 # portdb is an instance of depgraph._dep_check_composite_db, so
7638 # USE conditionals are already evaluated.
7639 if matches and mykey in \
7640 portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
7642 if atom_graph is not None:
7643 atom_graph.add(new_atom, graph_parent)
7645 if not a and mychoices:
7646 # Check for a virtual package.provided match.
7648 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7649 if match_from_list(new_atom,
7650 pprovideddict.get(new_atom.cp, [])):
7652 if atom_graph is not None:
7653 atom_graph.add(new_atom, graph_parent)
7657 if atom_graph is not None:
7658 atom_graph.add(x, graph_parent)
7660 newsplit.append(a[0])
7662 newsplit.append(['||'] + a)
7666 def dep_eval(deplist):
7669 if deplist[0]=="||":
7670 #or list; we just need one "1"
7671 for x in deplist[1:]:
7672 if isinstance(x, list):
7677 #XXX: unless there's no available atoms in the list
7678 #in which case we need to assume that everything is
7679 #okay as some ebuilds are relying on an old bug.
7680 if len(deplist) == 1:
7685 if isinstance(x, list):
7692 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
7693 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
7694 Returned deplist contains steps that must be taken to satisfy dependencies."""
7698 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
7699 if not reduced or unreduced == ["||"] or dep_eval(reduced):
7702 if unreduced[0] != "||":
7704 for x, satisfied in zip(unreduced, reduced):
7705 if isinstance(x, list):
7706 unresolved += dep_zapdeps(x, satisfied, myroot,
7707 use_binaries=use_binaries, trees=trees)
7709 unresolved.append(x)
7712 # We're at a ( || atom ... ) type level and need to make a choice
7713 deps = unreduced[1:]
7714 satisfieds = reduced[1:]
7716 # Our preference order is for an the first item that:
7717 # a) contains all unmasked packages with the same key as installed packages
7718 # b) contains all unmasked packages
7719 # c) contains masked installed packages
7720 # d) is the first item
7722 preferred_installed = []
7723 preferred_in_graph = []
7724 preferred_any_slot = []
7725 preferred_non_installed = []
7726 unsat_use_in_graph = []
7727 unsat_use_installed = []
7728 unsat_use_non_installed = []
7731 # Alias the trees we'll be checking availability against
7732 parent = trees[myroot].get("parent")
7733 priority = trees[myroot].get("priority")
7734 graph_db = trees[myroot].get("graph_db")
7736 if "vartree" in trees[myroot]:
7737 vardb = trees[myroot]["vartree"].dbapi
7739 mydbapi = trees[myroot]["bintree"].dbapi
7741 mydbapi = trees[myroot]["porttree"].dbapi
7743 # Sort the deps into installed, not installed but already
7744 # in the graph and other, not installed and not in the graph
7745 # and other, with values of [[required_atom], availablility]
7746 for x, satisfied in zip(deps, satisfieds):
7747 if isinstance(x, list):
7748 atoms = dep_zapdeps(x, satisfied, myroot,
7749 use_binaries=use_binaries, trees=trees)
7754 other.append((atoms, None, False))
7757 all_available = True
7758 all_use_satisfied = True
7763 # Ignore USE dependencies here since we don't want USE
7764 # settings to adversely affect || preference evaluation.
7765 avail_pkg = mydbapi.match(atom.without_use)
7767 avail_pkg = avail_pkg[-1] # highest (ascending order)
7768 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7769 mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7771 all_available = False
7772 all_use_satisfied = False
7776 avail_pkg_use = mydbapi.match(atom)
7777 if not avail_pkg_use:
7778 all_use_satisfied = False
7780 # highest (ascending order)
7781 avail_pkg_use = avail_pkg_use[-1]
7782 if avail_pkg_use != avail_pkg:
7783 avail_pkg = avail_pkg_use
7784 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7785 mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7787 versions[avail_slot] = avail_pkg
7789 this_choice = (atoms, versions, all_available)
7791 # The "all installed" criterion is not version or slot specific.
7792 # If any version of a package is already in the graph then we
7793 # assume that it is preferred over other possible packages choices.
7794 all_installed = True
7795 for atom in set(dep.Atom(atom.cp) for atom in atoms \
7796 if not atom.blocker):
7797 # New-style virtuals have zero cost to install.
7798 if not vardb.match(atom) and not atom.startswith("virtual/"):
7799 all_installed = False
7801 all_installed_slots = False
7803 all_installed_slots = True
7804 for slot_atom in versions:
7805 # New-style virtuals have zero cost to install.
7806 if not vardb.match(slot_atom) and \
7807 not slot_atom.startswith("virtual/"):
7808 all_installed_slots = False
7810 if graph_db is None:
7811 if all_use_satisfied:
7813 if all_installed_slots:
7814 preferred_installed.append(this_choice)
7816 preferred_any_slot.append(this_choice)
7818 preferred_non_installed.append(this_choice)
7820 if all_installed_slots:
7821 unsat_use_installed.append(this_choice)
7823 unsat_use_non_installed.append(this_choice)
7826 for slot_atom in versions:
7827 # New-style virtuals have zero cost to install.
7828 if not graph_db.match(slot_atom) and \
7829 not slot_atom.startswith("virtual/"):
7830 all_in_graph = False
7832 circular_atom = None
7834 if parent is None or priority is None:
7836 elif priority.buildtime:
7837 # Check if the atom would result in a direct circular
7838 # dependency and try to avoid that if it seems likely
7839 # to be unresolvable. This is only relevant for
7840 # buildtime deps that aren't already satisfied by an
7841 # installed package.
7842 cpv_slot_list = [parent]
7846 if vardb.match(atom):
7847 # If the atom is satisfied by an installed
7848 # version then it's not a circular dep.
7850 if atom.cp != parent.cp:
7852 if match_from_list(atom, cpv_slot_list):
7853 circular_atom = atom
7855 if circular_atom is not None:
7856 other.append(this_choice)
7858 if all_use_satisfied:
7860 preferred_in_graph.append(this_choice)
7862 if all_installed_slots:
7863 preferred_installed.append(this_choice)
7865 preferred_any_slot.append(this_choice)
7867 preferred_non_installed.append(this_choice)
7870 unsat_use_in_graph.append(this_choice)
7871 elif all_installed_slots:
7872 unsat_use_installed.append(this_choice)
7874 unsat_use_non_installed.append(this_choice)
7876 other.append(this_choice)
7878 # unsat_use_* must come after preferred_non_installed
7879 # for correct ordering in cases like || ( foo[a] foo[b] ).
7880 preferred = preferred_in_graph + preferred_installed + \
7881 preferred_any_slot + preferred_non_installed + \
7882 unsat_use_in_graph + unsat_use_installed + unsat_use_non_installed + \
7885 for allow_masked in (False, True):
7886 for atoms, versions, all_available in preferred:
7887 if all_available or allow_masked:
7890 assert(False) # This point should not be reachable
7892 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
7901 if isinstance(orig_dep, dep.Atom):
7905 has_cat = '/' in orig_dep
7907 alphanum = re.search(r'\w', orig_dep)
7909 mydep = orig_dep[:alphanum.start()] + "null/" + \
7910 orig_dep[alphanum.start():]
7912 mydep = dep.Atom(mydep)
7913 except exception.InvalidAtom:
7914 # Missing '=' prefix is allowed for backward compatibility.
7915 if not dep.isvalidatom("=" + mydep):
7917 mydep = dep.Atom('=' + mydep)
7918 orig_dep = '=' + orig_dep
7920 null_cat, pn = catsplit(mydep.cp)
7924 expanded = cpv_expand(mydep, mydb=mydb,
7925 use_cache=use_cache, settings=settings)
7926 return portage.dep.Atom(orig_dep.replace(mydep, expanded, 1))
7928 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
7929 use_cache=1, use_binaries=0, myroot="/", trees=None):
7930 """Takes a depend string and parses the condition."""
7931 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
7932 #check_config_instance(mysettings)
7934 trees = globals()["db"]
7938 myusesplit = mysettings["PORTAGE_USE"].split()
7941 # We've been given useflags to use.
7942 #print "USE FLAGS PASSED IN."
7944 #if "bindist" in myusesplit:
7945 # print "BINDIST is set!"
7947 # print "BINDIST NOT set."
7949 #we are being run by autouse(), don't consult USE vars yet.
7950 # WE ALSO CANNOT USE SETTINGS
7953 #convert parenthesis to sublists
7955 mysplit = portage.dep.paren_reduce(depstring)
7956 except portage.exception.InvalidDependString as e:
7961 useforce.add(mysettings["ARCH"])
7963 # This masking/forcing is only for repoman. In other cases, relevant
7964 # masking/forcing should have already been applied via
7965 # config.regenerate(). Also, binary or installed packages may have
7966 # been built with flags that are now masked, and it would be
7967 # inconsistent to mask them now. Additionally, myuse may consist of
7968 # flags from a parent package that is being merged to a $ROOT that is
7969 # different from the one that mysettings represents.
7970 mymasks.update(mysettings.usemask)
7971 mymasks.update(mysettings.archlist())
7972 mymasks.discard(mysettings["ARCH"])
7973 useforce.update(mysettings.useforce)
7974 useforce.difference_update(mymasks)
7976 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
7977 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
7978 except portage.exception.InvalidDependString as e:
7981 # Do the || conversions
7982 mysplit=portage.dep.dep_opconvert(mysplit)
7985 #dependencies were reduced to nothing
7988 # Recursively expand new-style virtuals so as to
7989 # collapse one or more levels of indirection.
7991 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
7992 use=use, mode=mode, myuse=myuse,
7993 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
7994 use_binaries=use_binaries, myroot=myroot, trees=trees)
7995 except portage.exception.ParseError as e:
7999 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
8000 if mysplit2 is None:
8001 return [0, _("Invalid token")]
8003 writemsg("\n\n\n", 1)
8004 writemsg("mysplit: %s\n" % (mysplit), 1)
8005 writemsg("mysplit2: %s\n" % (mysplit2), 1)
8008 selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
8009 use_binaries=use_binaries, trees=trees)
8010 except portage.exception.InvalidAtom as e:
8011 if portage.dep._dep_check_strict:
8012 raise # This shouldn't happen.
8013 # dbapi.match() failed due to an invalid atom in
8014 # the dependencies of an installed package.
8015 return [0, _("Invalid atom: '%s'") % (e,)]
8017 return [1, selected_atoms]
8019 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
8020 "Reduces the deplist to ones and zeros"
8021 deplist=mydeplist[:]
8022 for mypos, token in enumerate(deplist):
8023 if isinstance(deplist[mypos], list):
8025 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
8026 elif deplist[mypos]=="||":
8028 elif token[:1] == "!":
8029 deplist[mypos] = False
8031 mykey = deplist[mypos].cp
8032 if mysettings and mykey in mysettings.pprovideddict and \
8033 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
8035 elif mydbapi is None:
8036 # Assume nothing is satisfied. This forces dep_zapdeps to
8037 # return all of deps the deps that have been selected
8038 # (excluding those satisfied by package.provided).
8039 deplist[mypos] = False
8042 x = mydbapi.xmatch(mode, deplist[mypos])
8043 if mode.startswith("minimum-"):
8050 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
8053 if deplist[mypos][0]=="!":
8057 #encountered invalid string
8061 _cpv_key_re = re.compile('^' + dep._cpv + '$', re.VERBOSE)
8062 def cpv_getkey(mycpv):
8063 """Calls pkgsplit on a cpv and returns only the cp."""
8064 m = _cpv_key_re.match(mycpv)
8067 myslash = mycpv.split("/", 1)
8068 mysplit=pkgsplit(myslash[-1])
8073 return myslash[0]+"/"+mysplit[0]
8077 getCPFromCPV = cpv_getkey
8079 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
8080 """This is deprecated because it just returns the first match instead of
8081 raising AmbiguousPackageName like cpv_expand does."""
8082 warnings.warn("portage.key_expand() is deprecated", DeprecationWarning)
8083 mysplit=mykey.split("/")
8084 if settings is None:
8085 settings = globals()["settings"]
8086 virts = settings.getvirtuals("/")
8087 virts_p = settings.get_virts_p("/")
8089 if hasattr(mydb, "cp_list"):
8090 for x in mydb.categories:
8091 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
8092 return dep.Atom(x + "/" + mykey)
8093 if mykey in virts_p:
8094 return(virts_p[mykey][0])
8095 return dep.Atom("null/" + mykey)
8097 if hasattr(mydb, "cp_list"):
8098 if not mydb.cp_list(mykey, use_cache=use_cache) and \
8099 virts and mykey in virts:
8100 return virts[mykey][0]
8101 if not isinstance(mykey, dep.Atom):
8102 mykey = dep.Atom(mykey)
8105 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
8106 """Given a string (packagename or virtual) expand it into a valid
8107 cat/package string. Virtuals use the mydb to determine which provided
8108 virtual is a valid choice and defaults to the first element when there
8109 are no installed/available candidates."""
8110 myslash=mycpv.split("/")
8111 mysplit=pkgsplit(myslash[-1])
8112 if settings is None:
8113 settings = globals()["settings"]
8114 virts = settings.getvirtuals("/")
8115 virts_p = settings.get_virts_p("/")
8117 # this is illegal case.
8120 elif len(myslash)==2:
8122 mykey=myslash[0]+"/"+mysplit[0]
8125 if mydb and virts and mykey in virts:
8126 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
8127 if hasattr(mydb, "cp_list"):
8128 if not mydb.cp_list(mykey, use_cache=use_cache):
8129 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
8130 mykey_orig = mykey[:]
8131 for vkey in virts[mykey]:
8132 # The virtuals file can contain a versioned atom, so
8133 # it may be necessary to remove the operator and
8134 # version from the atom before it is passed into
8136 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
8138 writemsg(_("virts chosen: %s\n") % (mykey), 1)
8140 if mykey == mykey_orig:
8141 mykey = str(virts[mykey][0])
8142 writemsg(_("virts defaulted: %s\n") % (mykey), 1)
8143 #we only perform virtual expansion if we are passed a dbapi
8145 #specific cpv, no category, ie. "foo-1.0"
8153 if mydb and hasattr(mydb, "categories"):
8154 for x in mydb.categories:
8155 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
8156 matches.append(x+"/"+myp)
8157 if len(matches) > 1:
8158 virtual_name_collision = False
8159 if len(matches) == 2:
8161 if not x.startswith("virtual/"):
8162 # Assume that the non-virtual is desired. This helps
8163 # avoid the ValueError for invalid deps that come from
8164 # installed packages (during reverse blocker detection,
8168 virtual_name_collision = True
8169 if not virtual_name_collision:
8170 # AmbiguousPackageName inherits from ValueError,
8171 # for backward compatibility with calling code
8172 # that already handles ValueError.
8173 raise portage.exception.AmbiguousPackageName(matches)
8177 if not mykey and not isinstance(mydb, list):
8179 mykey=virts_p[myp][0]
8180 #again, we only perform virtual expansion if we have a dbapi (not a list)
8184 if mysplit[2]=="r0":
8185 return mykey+"-"+mysplit[1]
8187 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
8191 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
8192 from portage.util import grablines
8193 if settings is None:
8194 settings = globals()["settings"]
8196 portdb = globals()["portdb"]
8197 mysplit = catpkgsplit(mycpv)
8199 raise ValueError(_("invalid CPV: %s") % mycpv)
8200 if metadata is None:
8201 db_keys = list(portdb._aux_cache_keys)
8203 metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8205 if not portdb.cpv_exists(mycpv):
8207 if metadata is None:
8208 # Can't access SLOT due to corruption.
8209 cpv_slot_list = [mycpv]
8211 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8212 mycp=mysplit[0]+"/"+mysplit[1]
8214 # XXX- This is a temporary duplicate of code from the config constructor.
8215 locations = [os.path.join(settings["PORTDIR"], "profiles")]
8216 locations.extend(settings.profiles)
8217 for ov in settings["PORTDIR_OVERLAY"].split():
8218 profdir = os.path.join(normalize_path(ov), "profiles")
8219 if os.path.isdir(profdir):
8220 locations.append(profdir)
8221 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
8224 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
8226 if mycp in settings.pmaskdict:
8227 for x in settings.pmaskdict[mycp]:
8228 if match_from_list(x, cpv_slot_list):
8232 for pmask in pmasklists:
8233 pmask_filename = os.path.join(pmask[0], "package.mask")
8234 for i in range(len(pmask[1])):
8235 l = pmask[1][i].strip()
8241 comment_valid = i + 1
8243 if comment_valid != i:
8246 return (comment, pmask_filename)
8249 elif comment_valid != -1:
8250 # Apparently this comment applies to muliple masks, so
8251 # it remains valid until a blank line is encountered.
8258 def getmaskingstatus(mycpv, settings=None, portdb=None):
8259 if settings is None:
8260 settings = config(clone=globals()["settings"])
8262 portdb = globals()["portdb"]
8266 if not isinstance(mycpv, basestring):
8267 # emerge passed in a Package instance
8270 metadata = pkg.metadata
8271 installed = pkg.installed
8273 mysplit = catpkgsplit(mycpv)
8275 raise ValueError(_("invalid CPV: %s") % mycpv)
8276 if metadata is None:
8277 db_keys = list(portdb._aux_cache_keys)
8279 metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8281 if not portdb.cpv_exists(mycpv):
8283 return ["corruption"]
8284 if "?" in metadata["LICENSE"]:
8285 settings.setcpv(mycpv, mydb=metadata)
8286 metadata["USE"] = settings["PORTAGE_USE"]
8288 metadata["USE"] = ""
8289 mycp=mysplit[0]+"/"+mysplit[1]
8294 if settings._getProfileMaskAtom(mycpv, metadata):
8295 rValue.append("profile")
8297 # package.mask checking
8298 if settings._getMaskAtom(mycpv, metadata):
8299 rValue.append("package.mask")
8302 eapi = metadata["EAPI"]
8303 mygroups = settings._getKeywords(mycpv, metadata)
8304 licenses = metadata["LICENSE"]
8305 properties = metadata["PROPERTIES"]
8306 slot = metadata["SLOT"]
8307 if eapi.startswith("-"):
8309 if not eapi_is_supported(eapi):
8310 return ["EAPI %s" % eapi]
8311 elif _eapi_is_deprecated(eapi) and not installed:
8312 return ["EAPI %s" % eapi]
8313 egroups = settings.configdict["backupenv"].get(
8314 "ACCEPT_KEYWORDS", "").split()
8315 pgroups = settings["ACCEPT_KEYWORDS"].split()
8316 myarch = settings["ARCH"]
8317 if pgroups and myarch not in pgroups:
8318 """For operating systems other than Linux, ARCH is not necessarily a
8320 myarch = pgroups[0].lstrip("~")
8322 cp = dep_getkey(mycpv)
8323 pkgdict = settings.pkeywordsdict.get(cp)
8326 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8327 for atom, pkgkeywords in pkgdict.items():
8328 if match_from_list(atom, cpv_slot_list):
8330 pgroups.extend(pkgkeywords)
8331 if matches or egroups:
8332 pgroups.extend(egroups)
8335 if x.startswith("-"):
8339 inc_pgroups.discard(x[1:])
8342 pgroups = inc_pgroups
8350 for keyword in pgroups:
8351 if keyword in mygroups:
8361 elif gp=="-"+myarch and myarch in pgroups:
8364 elif gp=="~"+myarch and myarch in pgroups:
8369 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
8370 if missing_licenses:
8371 allowed_tokens = set(["||", "(", ")"])
8372 allowed_tokens.update(missing_licenses)
8373 license_split = licenses.split()
8374 license_split = [x for x in license_split \
8375 if x in allowed_tokens]
8376 msg = license_split[:]
8377 msg.append("license(s)")
8378 rValue.append(" ".join(msg))
8379 except portage.exception.InvalidDependString as e:
8380 rValue.append("LICENSE: "+str(e))
8383 missing_properties = settings._getMissingProperties(mycpv, metadata)
8384 if missing_properties:
8385 allowed_tokens = set(["||", "(", ")"])
8386 allowed_tokens.update(missing_properties)
8387 properties_split = properties.split()
8388 properties_split = [x for x in properties_split \
8389 if x in allowed_tokens]
8390 msg = properties_split[:]
8391 msg.append("properties")
8392 rValue.append(" ".join(msg))
8393 except portage.exception.InvalidDependString as e:
8394 rValue.append("PROPERTIES: "+str(e))
8396 # Only show KEYWORDS masks for installed packages
8397 # if they're not masked for any other reason.
8398 if kmask and (not installed or not rValue):
8399 rValue.append(kmask+" keyword")
8404 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
8405 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
8406 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
8407 'PDEPEND', 'PROVIDE', 'EAPI',
8408 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
8409 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
8411 auxdbkeylen=len(auxdbkeys)
8413 from portage.dbapi import dbapi
8414 from portage.dbapi.virtual import fakedbapi
8415 from portage.dbapi.bintree import bindbapi, binarytree
8416 from portage.dbapi.vartree import vardbapi, vartree, dblink
8417 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
8419 class FetchlistDict(portage.cache.mappings.Mapping):
8420 """This provide a mapping interface to retrieve fetch lists. It's used
8421 to allow portage.manifest.Manifest to access fetch lists via a standard
8422 mapping interface rather than use the dbapi directly."""
8423 def __init__(self, pkgdir, settings, mydbapi):
8424 """pkgdir is a directory containing ebuilds and settings is passed into
8425 portdbapi.getfetchlist for __getitem__ calls."""
8426 self.pkgdir = pkgdir
8427 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
8428 self.settings = settings
8429 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
8430 self.portdb = mydbapi
8431 def __getitem__(self, pkg_key):
8432 """Returns the complete fetch list for a given package."""
8433 return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
8434 def __contains__(self, cpv):
8435 return cpv in self.__iter__()
8436 def has_key(self, pkg_key):
8437 """Returns true if the given package exists within pkgdir."""
8438 return pkg_key in self
8441 return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
8444 """Returns keys for all packages within pkgdir"""
8445 return self.portdb.cp_list(self.cp, mytree=self.mytree)
8447 if sys.hexversion >= 0x3000000:
8450 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
8451 vartree=None, prev_mtimes=None, blockers=None):
8452 """will merge a .tbz2 file, returning a list of runtime dependencies
8453 that must be satisfied, or None if there was a merge error. This
8454 code assumes the package exists."""
8457 mydbapi = db[myroot]["bintree"].dbapi
8459 vartree = db[myroot]["vartree"]
8460 if mytbz2[-5:]!=".tbz2":
8461 print(_("!!! Not a .tbz2 file"))
8467 did_merge_phase = False
8470 """ Don't lock the tbz2 file because the filesytem could be readonly or
8471 shared by a cluster."""
8472 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
8474 mypkg = os.path.basename(mytbz2)[:-5]
8475 xptbz2 = portage.xpak.tbz2(mytbz2)
8476 mycat = xptbz2.getfile("CATEGORY")
8478 writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"),
8481 mycat = mycat.strip()
8483 # These are the same directories that would be used at build time.
8484 builddir = os.path.join(
8485 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
8486 catdir = os.path.dirname(builddir)
8487 pkgloc = os.path.join(builddir, "image")
8488 infloc = os.path.join(builddir, "build-info")
8489 myebuild = os.path.join(
8490 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
8491 portage.util.ensure_dirs(os.path.dirname(catdir),
8492 uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8493 catdir_lock = portage.locks.lockdir(catdir)
8494 portage.util.ensure_dirs(catdir,
8495 uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8497 shutil.rmtree(builddir)
8498 except (IOError, OSError) as e:
8499 if e.errno != errno.ENOENT:
8502 for mydir in (builddir, pkgloc, infloc):
8503 portage.util.ensure_dirs(mydir, uid=portage_uid,
8504 gid=portage_gid, mode=0o755)
8505 writemsg_stdout(_(">>> Extracting info\n"))
8506 xptbz2.unpackinfo(infloc)
8507 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
8508 # Store the md5sum in the vdb.
8509 fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
8510 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
8513 # This gives bashrc users an opportunity to do various things
8514 # such as remove binary packages after they're installed.
8515 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
8516 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
8517 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8519 # Eventually we'd like to pass in the saved ebuild env here.
8520 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8521 tree="bintree", mydbapi=mydbapi, vartree=vartree)
8522 if retval != os.EX_OK:
8523 writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
8526 writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
8527 retval = portage.process.spawn_bash(
8528 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8529 env=mysettings.environ())
8530 if retval != os.EX_OK:
8531 writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
8533 #portage.locks.unlockfile(tbz2_lock)
8536 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8537 treetype="bintree", blockers=blockers)
8538 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8539 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8540 did_merge_phase = True
8541 success = retval == os.EX_OK
8544 mysettings.pop("PORTAGE_BINPKG_FILE", None)
8546 portage.locks.unlockfile(tbz2_lock)
8548 if not did_merge_phase:
8549 # The merge phase handles this already. Callers don't know how
8550 # far this function got, so we have to call elog_process() here
8551 # so that it's only called once.
8552 from portage.elog import elog_process
8553 elog_process(mycat + "/" + mypkg, mysettings)
8556 shutil.rmtree(builddir)
8557 except (IOError, OSError) as e:
8558 if e.errno != errno.ENOENT:
8562 def deprecated_profile_check(settings=None):
8564 if settings is not None:
8565 config_root = settings["PORTAGE_CONFIGROOT"]
8566 deprecated_profile_file = os.path.join(config_root,
8567 DEPRECATED_PROFILE_FILE)
8568 if not os.access(deprecated_profile_file, os.R_OK):
8570 dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
8571 encoding=_encodings['fs'], errors='strict'),
8572 mode='r', encoding=_encodings['content'], errors='replace').readlines()
8573 writemsg(colorize("BAD", _("\n!!! Your current profile is "
8574 "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
8576 writemsg(colorize("BAD", _("!!! Please refer to the "
8577 "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
8579 newprofile = dcontent[0]
8580 writemsg(colorize("BAD", _("!!! Please upgrade to the "
8581 "following profile if possible:")) + "\n", noiselevel=-1)
8582 writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
8583 if len(dcontent) > 1:
8584 writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
8585 for myline in dcontent[1:]:
8586 writemsg(myline, noiselevel=-1)
8587 writemsg("\n\n", noiselevel=-1)
8590 # gets virtual package settings
8591 def getvirtuals(myroot):
8593 writemsg("--- DEPRECATED call to getvirtual\n")
8594 return settings.getvirtuals(myroot)
8596 def commit_mtimedb(mydict=None, filename=None):
8599 if "mtimedb" not in globals() or mtimedb is None:
8603 if filename is None:
8605 filename = mtimedbfile
8606 mydict["version"] = VERSION
8607 d = {} # for full backward compat, pickle it as a plain dict object.
8610 f = atomic_ofstream(filename, mode='wb')
8611 pickle.dump(d, f, protocol=2)
8613 portage.util.apply_secpass_permissions(filename,
8614 uid=uid, gid=portage_gid, mode=0o644)
8615 except (IOError, OSError) as e:
8619 global uid,portage_gid,portdb,db
8620 if secpass and os.environ.get("SANDBOX_ON") != "1":
8621 close_portdbapi_caches()
8624 atexit_register(portageexit)
8626 def _global_updates(trees, prev_mtimes):
8628 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8630 @param trees: A dictionary containing portage trees.
8632 @param prev_mtimes: A dictionary containing mtimes of files located in
8633 $PORTDIR/profiles/updates/.
8634 @type prev_mtimes: dict
8635 @rtype: None or List
8636 @return: None if no were no updates, otherwise a list of update commands
8637 that have been performed.
8639 # only do this if we're root and not running repoman/ebuild digest
8641 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8644 mysettings = trees["/"]["vartree"].settings
8645 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8648 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8649 update_data = grab_updates(updpath)
8651 update_data = grab_updates(updpath, prev_mtimes)
8652 except portage.exception.DirectoryNotFound:
8653 writemsg(_("--- 'profiles/updates' is empty or "
8654 "not available. Empty portage tree?\n"), noiselevel=1)
8657 if len(update_data) > 0:
8658 do_upgrade_packagesmessage = 0
8661 for mykey, mystat, mycontent in update_data:
8662 writemsg_stdout("\n\n")
8663 writemsg_stdout(colorize("GOOD",
8664 _("Performing Global Updates: "))+bold(mykey)+"\n")
8665 writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
8666 writemsg_stdout(_(" %s='update pass' %s='binary update' "
8667 "%s='/var/db update' %s='/var/db move'\n"
8668 " %s='/var/db SLOT move' %s='binary move' "
8669 "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
8670 (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
8671 valid_updates, errors = parse_updates(mycontent)
8672 myupd.extend(valid_updates)
8673 writemsg_stdout(len(valid_updates) * "." + "\n")
8674 if len(errors) == 0:
8675 # Update our internal mtime since we
8676 # processed all of our directives.
8677 timestamps[mykey] = long(mystat.st_mtime)
8680 writemsg("%s\n" % msg, noiselevel=-1)
8682 world_file = os.path.join(root, WORLD_FILE)
8683 world_list = grabfile(world_file)
8684 world_modified = False
8685 for update_cmd in myupd:
8686 for pos, atom in enumerate(world_list):
8687 new_atom = update_dbentry(update_cmd, atom)
8688 if atom != new_atom:
8689 world_list[pos] = new_atom
8690 world_modified = True
8693 write_atomic(world_file,
8694 "".join("%s\n" % (x,) for x in world_list))
8696 update_config_files("/",
8697 mysettings.get("CONFIG_PROTECT","").split(),
8698 mysettings.get("CONFIG_PROTECT_MASK","").split(),
8701 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8702 settings=mysettings)
8703 vardb = trees["/"]["vartree"].dbapi
8704 bindb = trees["/"]["bintree"].dbapi
8705 if not os.access(bindb.bintree.pkgdir, os.W_OK):
8707 for update_cmd in myupd:
8708 if update_cmd[0] == "move":
8709 moves = vardb.move_ent(update_cmd)
8711 writemsg_stdout(moves * "@")
8713 moves = bindb.move_ent(update_cmd)
8715 writemsg_stdout(moves * "%")
8716 elif update_cmd[0] == "slotmove":
8717 moves = vardb.move_slot_ent(update_cmd)
8719 writemsg_stdout(moves * "s")
8721 moves = bindb.move_slot_ent(update_cmd)
8723 writemsg_stdout(moves * "S")
8725 # The above global updates proceed quickly, so they
8726 # are considered a single mtimedb transaction.
8727 if len(timestamps) > 0:
8728 # We do not update the mtime in the mtimedb
8729 # until after _all_ of the above updates have
8730 # been processed because the mtimedb will
8731 # automatically commit when killed by ctrl C.
8732 for mykey, mtime in timestamps.items():
8733 prev_mtimes[mykey] = mtime
8735 # We gotta do the brute force updates for these now.
8736 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8737 "fixpackages" in mysettings.features:
8738 def onUpdate(maxval, curval):
8740 writemsg_stdout("#")
8741 vardb.update_ents(myupd, onUpdate=onUpdate)
8743 def onUpdate(maxval, curval):
8745 writemsg_stdout("*")
8746 bindb.update_ents(myupd, onUpdate=onUpdate)
8748 do_upgrade_packagesmessage = 1
8750 # Update progress above is indicated by characters written to stdout so
8751 # we print a couple new lines here to separate the progress output from
8756 if do_upgrade_packagesmessage and bindb and \
8758 writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
8759 writemsg_stdout(bold(_("Note: This can take a very long time.")))
8760 writemsg_stdout("\n")
8764 #continue setting up other trees
8766 class MtimeDB(dict):
8767 def __init__(self, filename):
8769 self.filename = filename
8770 self._load(filename)
8772 def _load(self, filename):
8774 f = open(_unicode_encode(filename), 'rb')
8775 mypickle = pickle.Unpickler(f)
8777 mypickle.find_global = None
8778 except AttributeError:
8779 # TODO: If py3k, override Unpickler.find_class().
8784 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
8785 if isinstance(e, pickle.UnpicklingError):
8786 writemsg(_("!!! Error loading '%s': %s\n") % \
8787 (filename, str(e)), noiselevel=-1)
8792 d["updates"] = d["old"]
8797 d.setdefault("starttime", 0)
8798 d.setdefault("version", "")
8799 for k in ("info", "ldpath", "updates"):
8802 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8803 "starttime", "updates", "version"))
8806 if k not in mtimedbkeys:
8807 writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
8810 self._clean_data = copy.deepcopy(d)
8813 if not self.filename:
8817 # Only commit if the internal state has changed.
8818 if d != self._clean_data:
8819 commit_mtimedb(mydict=d, filename=self.filename)
8820 self._clean_data = copy.deepcopy(d)
8822 def create_trees(config_root=None, target_root=None, trees=None):
8826 # clean up any existing portdbapi instances
8827 for myroot in trees:
8828 portdb = trees[myroot]["porttree"].dbapi
8829 portdb.close_caches()
8830 portdbapi.portdbapi_instances.remove(portdb)
8831 del trees[myroot]["porttree"], myroot, portdb
8833 settings = config(config_root=config_root, target_root=target_root,
8834 config_incrementals=portage.const.INCREMENTALS)
8837 myroots = [(settings["ROOT"], settings)]
8838 if settings["ROOT"] != "/":
8840 # When ROOT != "/" we only want overrides from the calling
8841 # environment to apply to the config that's associated
8842 # with ROOT != "/", so pass an empty dict for the env parameter.
8843 settings = config(config_root=None, target_root="/", env={})
8845 myroots.append((settings["ROOT"], settings))
8847 for myroot, mysettings in myroots:
8848 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
8849 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8850 trees[myroot].addLazySingleton(
8851 "vartree", vartree, myroot, categories=mysettings.categories,
8852 settings=mysettings)
8853 trees[myroot].addLazySingleton("porttree",
8854 portagetree, myroot, settings=mysettings)
8855 trees[myroot].addLazySingleton("bintree",
8856 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8859 class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
8861 Instances of these serve as proxies to global variables
8862 that are initialized on demand.
8865 __slots__ = ('_name',)
8867 def __init__(self, name):
8868 proxy.objectproxy.ObjectProxy.__init__(self)
8869 object.__setattr__(self, '_name', name)
8871 def _get_target(self):
8872 init_legacy_globals()
8873 name = object.__getattribute__(self, '_name')
8874 return globals()[name]
8876 class _PortdbProxy(proxy.objectproxy.ObjectProxy):
8878 The portdb is initialized separately from the rest
8879 of the variables, since sometimes the other variables
8880 are needed while the portdb is not.
8885 def _get_target(self):
8886 init_legacy_globals()
8887 global db, portdb, root, _portdb_initialized
8888 if not _portdb_initialized:
8889 portdb = db[root]["porttree"].dbapi
8890 _portdb_initialized = True
8893 class _MtimedbProxy(proxy.objectproxy.ObjectProxy):
8895 The mtimedb is independent from the portdb and other globals.
8898 __slots__ = ('_name',)
8900 def __init__(self, name):
8901 proxy.objectproxy.ObjectProxy.__init__(self)
8902 object.__setattr__(self, '_name', name)
8904 def _get_target(self):
8905 global mtimedb, mtimedbfile, _mtimedb_initialized
8906 if not _mtimedb_initialized:
8907 mtimedbfile = os.path.join(os.path.sep,
8908 CACHE_PATH, "mtimedb")
8909 mtimedb = MtimeDB(mtimedbfile)
8910 _mtimedb_initialized = True
8911 name = object.__getattribute__(self, '_name')
8912 return globals()[name]
8914 _legacy_global_var_names = ("archlist", "db", "features",
8915 "groups", "mtimedb", "mtimedbfile", "pkglines",
8916 "portdb", "profiledir", "root", "selinux_enabled",
8917 "settings", "thirdpartymirrors", "usedefaults")
8919 def _disable_legacy_globals():
8921 This deletes the ObjectProxy instances that are used
8922 for lazy initialization of legacy global variables.
8923 The purpose of deleting them is to prevent new code
8924 from referencing these deprecated variables.
8926 global _legacy_global_var_names
8927 for k in _legacy_global_var_names:
8928 globals().pop(k, None)
8930 # Initialization of legacy globals. No functions/classes below this point
8931 # please! When the above functions and classes become independent of the
8932 # below global variables, it will be possible to make the below code
8933 # conditional on a backward compatibility flag (backward compatibility could
8934 # be disabled via an environment variable, for example). This will enable new
8935 # code that is aware of this flag to import portage without the unnecessary
8936 # overhead (and other issues!) of initializing the legacy globals.
8938 def init_legacy_globals():
8939 global _globals_initialized
8940 if _globals_initialized:
8942 _globals_initialized = True
8944 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8945 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8946 profiledir, flushmtimedb
8948 # Portage needs to ensure a sane umask for the files it creates.
8952 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8953 kwargs[k] = os.environ.get(envvar, "/")
8955 global _initializing_globals
8956 _initializing_globals = True
8957 db = create_trees(**kwargs)
8958 del _initializing_globals
8960 settings = db["/"]["vartree"].settings
8964 settings = db[myroot]["vartree"].settings
8967 root = settings["ROOT"]
8968 output._init(config_root=settings['PORTAGE_CONFIGROOT'])
8970 # ========================================================================
8972 # These attributes should not be used
8973 # within Portage under any circumstances.
8974 # ========================================================================
8975 archlist = settings.archlist()
8976 features = settings.features
8977 groups = settings["ACCEPT_KEYWORDS"].split()
8978 pkglines = settings.packages
8979 selinux_enabled = settings.selinux_enabled()
8980 thirdpartymirrors = settings.thirdpartymirrors()
8981 usedefaults = settings.use_defs
8982 profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
8983 if not os.path.isdir(profiledir):
8985 def flushmtimedb(record):
8986 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8987 # ========================================================================
8989 # These attributes should not be used
8990 # within Portage under any circumstances.
8991 # ========================================================================
8995 _mtimedb_initialized = False
8996 mtimedb = _MtimedbProxy("mtimedb")
8997 mtimedbfile = _MtimedbProxy("mtimedbfile")
8999 _portdb_initialized = False
9000 portdb = _PortdbProxy()
9002 _globals_initialized = False
9004 for k in ("db", "settings", "root", "selinux_enabled",
9005 "archlist", "features", "groups",
9006 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
9008 globals()[k] = _LegacyGlobalProxy(k)
9013 # ============================================================================
9014 # ============================================================================