1 # portage.py -- core Portage functionality
2 # Copyright 1998-2009 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
6 from __future__ import print_function
8 VERSION="$Rev$"[6:-2] + "-svn"
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
19 if not hasattr(errno, 'ESTALE'):
20 # ESTALE may not be defined on some systems, such as interix.
27 import cPickle as pickle
33 from subprocess import getstatusoutput as subprocess_getstatusoutput
35 from commands import getstatusoutput as subprocess_getstatusoutput
36 from time import sleep
37 from random import shuffle
38 from itertools import chain
42 # Temporarily delete these imports, to ensure that only the
43 # wrapped versions are imported by portage internals.
49 except ImportError as e:
50 sys.stderr.write("\n\n")
51 sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
52 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
55 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
57 sys.stderr.write(" "+str(e)+"\n\n");
61 from portage.cache.cache_errors import CacheError
62 import portage.proxy.lazyimport
63 import portage.proxy as proxy
64 proxy.lazyimport.lazyimport(globals(),
66 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
69 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
70 'uid,userland,userpriv_groups,wheelgid',
72 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
73 'get_operator,isjustname,isspecific,isvalidatom,' + \
74 'match_from_list,match_to_list',
75 'portage.eclass_cache',
76 'portage.env.loaders',
80 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
83 'portage.output:bold,colorize',
85 'portage.process:atexit_register,run_exitfuncs',
86 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
87 'parse_updates,update_config_files,update_dbentries,' + \
90 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
91 'apply_recursive_permissions,dump_traceback,getconfig,' + \
92 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
93 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
94 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
95 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
96 'writemsg_stdout,write_atomic',
98 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \
99 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
104 from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
105 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
106 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
107 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
108 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
109 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
110 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
111 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
113 from portage.localization import _
115 except ImportError as e:
116 sys.stderr.write("\n\n")
117 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122 sys.stderr.write("!!! a recovery of portage.\n")
123 sys.stderr.write(" "+str(e)+"\n\n")
126 if sys.hexversion >= 0x3000000:
130 # Assume utf_8 fs encoding everywhere except in merge code, where the
131 # user's locale is respected.
135 'merge' : sys.getfilesystemencoding(),
136 'repo.content' : 'utf_8',
140 # This can happen if python is built with USE=build (stage 1).
141 if _encodings['merge'] is None:
142 _encodings['merge'] = 'ascii'
144 if sys.hexversion >= 0x3000000:
145 def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
146 if isinstance(s, str):
147 s = s.encode(encoding, errors)
150 def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
151 if isinstance(s, bytes):
152 s = str(s, encoding=encoding, errors=errors)
155 def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
156 if isinstance(s, unicode):
157 s = s.encode(encoding, errors)
160 def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
161 if isinstance(s, bytes):
162 s = unicode(s, encoding=encoding, errors=errors)
165 class _unicode_func_wrapper(object):
167 Wraps a function, converts arguments from unicode to bytes,
168 and return values to unicode from bytes. Function calls
169 will raise UnicodeEncodeError if an argument fails to be
170 encoded with the required encoding. Return values that
171 are single strings are decoded with errors='replace'. Return
172 values that are lists of strings are decoded with errors='strict'
173 and elements that fail to be decoded are omitted from the returned
176 __slots__ = ('_func', '_encoding')
178 def __init__(self, func, encoding=_encodings['fs']):
180 self._encoding = encoding
182 def __call__(self, *args, **kwargs):
184 encoding = self._encoding
185 wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
188 wrapped_kwargs = dict(
189 (k, _unicode_encode(v, encoding=encoding, errors='strict'))
190 for k, v in kwargs.items())
194 rval = self._func(*wrapped_args, **wrapped_kwargs)
196 if isinstance(rval, (list, tuple)):
200 x = _unicode_decode(x, encoding=encoding, errors='strict')
201 except UnicodeDecodeError:
204 decoded_rval.append(x)
206 if isinstance(rval, tuple):
207 rval = tuple(decoded_rval)
211 rval = _unicode_decode(rval, encoding=encoding, errors='replace')
215 class _unicode_module_wrapper(object):
217 Wraps a module and wraps all functions with _unicode_func_wrapper.
219 __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
221 def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
222 object.__setattr__(self, '_mod', mod)
223 object.__setattr__(self, '_encoding', encoding)
224 object.__setattr__(self, '_overrides', overrides)
229 object.__setattr__(self, '_cache', cache)
231 def __getattribute__(self, attr):
232 cache = object.__getattribute__(self, '_cache')
233 if cache is not None:
234 result = cache.get(attr)
235 if result is not None:
237 result = getattr(object.__getattribute__(self, '_mod'), attr)
238 encoding = object.__getattribute__(self, '_encoding')
239 overrides = object.__getattribute__(self, '_overrides')
241 if overrides is not None:
242 override = overrides.get(id(result))
243 if override is not None:
245 elif isinstance(result, type):
247 elif type(result) is types.ModuleType:
248 result = _unicode_module_wrapper(result,
249 encoding=encoding, overrides=overrides)
250 elif hasattr(result, '__call__'):
251 result = _unicode_func_wrapper(result, encoding=encoding)
252 if cache is not None:
258 id(_os.fdopen) : _os.fdopen,
259 id(_os.popen) : _os.popen,
260 id(_os.read) : _os.read,
261 id(_os.system) : _os.system,
264 if hasattr(_os, 'statvfs'):
265 _os_overrides[id(_os.statvfs)] = _os.statvfs
267 os = _unicode_module_wrapper(_os, overrides=_os_overrides,
268 encoding=_encodings['fs'])
269 _os_merge = _unicode_module_wrapper(_os,
270 encoding=_encodings['merge'], overrides=_os_overrides)
272 import shutil as _shutil
273 shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
275 # Imports below this point rely on the above unicode wrapper definitions.
278 _selinux_merge = None
280 import portage._selinux
281 selinux = _unicode_module_wrapper(_selinux,
282 encoding=_encodings['fs'])
283 _selinux_merge = _unicode_module_wrapper(_selinux,
284 encoding=_encodings['merge'])
286 sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
291 from portage.manifest import Manifest
293 # ===========================================================================
294 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
295 # ===========================================================================
297 def _gen_missing_encodings(missing_encodings):
301 if 'ascii' in missing_encodings:
303 class AsciiIncrementalEncoder(codecs.IncrementalEncoder):
304 def encode(self, input, final=False):
305 return codecs.ascii_encode(input, self.errors)[0]
307 class AsciiIncrementalDecoder(codecs.IncrementalDecoder):
308 def decode(self, input, final=False):
309 return codecs.ascii_decode(input, self.errors)[0]
311 class AsciiStreamWriter(codecs.StreamWriter):
312 encode = codecs.ascii_encode
314 class AsciiStreamReader(codecs.StreamReader):
315 decode = codecs.ascii_decode
317 codec_info = codecs.CodecInfo(
319 encode=codecs.ascii_encode,
320 decode=codecs.ascii_decode,
321 incrementalencoder=AsciiIncrementalEncoder,
322 incrementaldecoder=AsciiIncrementalDecoder,
323 streamwriter=AsciiStreamWriter,
324 streamreader=AsciiStreamReader,
327 for alias in ('ascii', '646', 'ansi_x3.4_1968', 'ansi_x3_4_1968',
328 'ansi_x3.4_1986', 'cp367', 'csascii', 'ibm367', 'iso646_us',
329 'iso_646.irv_1991', 'iso_ir_6', 'us', 'us_ascii'):
330 encodings[alias] = codec_info
332 if 'utf_8' in missing_encodings:
334 def utf8decode(input, errors='strict'):
335 return codecs.utf_8_decode(input, errors, True)
337 class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
338 def encode(self, input, final=False):
339 return codecs.utf_8_encode(input, self.errors)[0]
341 class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
342 _buffer_decode = codecs.utf_8_decode
344 class Utf8StreamWriter(codecs.StreamWriter):
345 encode = codecs.utf_8_encode
347 class Utf8StreamReader(codecs.StreamReader):
348 decode = codecs.utf_8_decode
350 codec_info = codecs.CodecInfo(
352 encode=codecs.utf_8_encode,
354 incrementalencoder=Utf8IncrementalEncoder,
355 incrementaldecoder=Utf8IncrementalDecoder,
356 streamreader=Utf8StreamReader,
357 streamwriter=Utf8StreamWriter,
360 for alias in ('utf_8', 'u8', 'utf', 'utf8', 'utf8_ucs2', 'utf8_ucs4'):
361 encodings[alias] = codec_info
365 def _ensure_default_encoding():
367 The python that's inside stage 1 or 2 is built with a minimal
368 configuration which does not include the /usr/lib/pythonX.Y/encodings
369 directory. This results in error like the following:
371 LookupError: no codec search functions registered: can't find encoding
373 In order to solve this problem, detect it early and manually register
374 a search function for the ascii and utf_8 codecs. Starting with python-3.0
375 this problem is more noticeable because of stricter handling of encoding
376 and decoding between strings of characters and bytes.
379 default_fallback = 'utf_8'
380 default_encoding = sys.getdefaultencoding().lower().replace('-', '_')
381 filesystem_encoding = _encodings['merge'].lower().replace('-', '_')
382 required_encodings = set(['ascii', 'utf_8'])
383 required_encodings.add(default_encoding)
384 required_encodings.add(filesystem_encoding)
385 missing_encodings = set()
386 for codec_name in required_encodings:
388 codecs.lookup(codec_name)
390 missing_encodings.add(codec_name)
392 if not missing_encodings:
395 encodings = _gen_missing_encodings(missing_encodings)
397 if default_encoding in missing_encodings and \
398 default_encoding not in encodings:
399 # Make the fallback codec correspond to whatever name happens
400 # to be returned by sys.getfilesystemencoding().
403 encodings[default_encoding] = codecs.lookup(default_fallback)
405 encodings[default_encoding] = encodings[default_fallback]
407 if filesystem_encoding in missing_encodings and \
408 filesystem_encoding not in encodings:
409 # Make the fallback codec correspond to whatever name happens
410 # to be returned by sys.getdefaultencoding().
413 encodings[filesystem_encoding] = codecs.lookup(default_fallback)
415 encodings[filesystem_encoding] = encodings[default_fallback]
417 def search_function(name):
419 name = name.replace('-', '_')
420 codec_info = encodings.get(name)
421 if codec_info is not None:
422 return codecs.CodecInfo(
423 name=codec_info.name,
424 encode=codec_info.encode,
425 decode=codec_info.decode,
426 incrementalencoder=codec_info.incrementalencoder,
427 incrementaldecoder=codec_info.incrementaldecoder,
428 streamreader=codec_info.streamreader,
429 streamwriter=codec_info.streamwriter,
433 codecs.register(search_function)
435 del codec_name, default_encoding, default_fallback, \
436 filesystem_encoding, missing_encodings, \
437 required_encodings, search_function
439 # Do this ASAP since writemsg() might not work without it.
440 _ensure_default_encoding()
444 Quote a string in double-quotes and use backslashes to
445 escape any backslashes, double-quotes, dollar signs, or
446 backquotes in the string.
448 for letter in "\\\"$`":
450 s = s.replace(letter, "\\" + letter)
455 if platform.system() in ('FreeBSD',):
457 class bsd_chflags(object):
460 def chflags(cls, path, flags, opts=""):
461 cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
462 status, output = subprocess_getstatusoutput(cmd)
463 if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
465 # Try to generate an ENOENT error if appropriate.
467 _os_merge.lstat(path)
470 # Make sure the binary exists.
471 if not portage.process.find_binary('chflags'):
472 raise portage.exception.CommandNotFound('chflags')
473 # Now we're not sure exactly why it failed or what
474 # the real errno was, so just report EPERM.
475 e = OSError(errno.EPERM, output)
476 e.errno = errno.EPERM
482 def lchflags(cls, path, flags):
483 return cls.chflags(path, flags, opts='-h')
486 modname = ".".join(name.split(".")[:-1])
487 mod = __import__(modname)
488 components = name.split('.')
489 for comp in components[1:]:
490 mod = getattr(mod, comp)
493 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
495 if x in top_dict and key in top_dict[x]:
497 return copy.deepcopy(top_dict[x][key])
499 return top_dict[x][key]
503 raise KeyError("Key not found in list; '%s'" % key)
506 "this fixes situations where the current directory doesn't exist"
509 except OSError: #dir doesn't exist
514 def abssymlink(symlink):
515 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
516 mylink=os.readlink(symlink)
518 mydir=os.path.dirname(symlink)
519 mylink=mydir+"/"+mylink
520 return os.path.normpath(mylink)
526 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
527 global cacheHit,cacheMiss,cacheStale
528 mypath = normalize_path(my_original_path)
529 if mypath in dircache:
531 cached_mtime, list, ftype = dircache[mypath]
534 cached_mtime, list, ftype = -1, [], []
536 pathstat = os.stat(mypath)
537 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
538 mtime = pathstat.st_mtime
540 raise portage.exception.DirectoryNotFound(mypath)
541 except EnvironmentError as e:
542 if e.errno == portage.exception.PermissionDenied.errno:
543 raise portage.exception.PermissionDenied(mypath)
548 except portage.exception.PortageException:
552 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
553 if mtime != cached_mtime or time.time() - mtime < 4:
554 if mypath in dircache:
557 list = os.listdir(mypath)
558 except EnvironmentError as e:
559 if e.errno != errno.EACCES:
562 raise portage.exception.PermissionDenied(mypath)
567 pathstat = os.stat(mypath+"/"+x)
569 pathstat = os.lstat(mypath+"/"+x)
571 if stat.S_ISREG(pathstat[stat.ST_MODE]):
573 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
575 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
579 except (IOError, OSError):
581 dircache[mypath] = mtime, list, ftype
585 for x in range(0, len(list)):
586 if list[x] in ignorelist:
589 if list[x][:2] != ".#":
590 ret_list.append(list[x])
591 ret_ftype.append(ftype[x])
593 ret_list.append(list[x])
594 ret_ftype.append(ftype[x])
596 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
597 return ret_list, ret_ftype
599 _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git')
601 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
602 EmptyOnError=False, dirsonly=False):
604 Portage-specific implementation of os.listdir
606 @param mypath: Path whose contents you wish to list
608 @param recursive: Recursively scan directories contained within mypath
609 @type recursive: Boolean
610 @param filesonly; Only return files, not more directories
611 @type filesonly: Boolean
612 @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
613 @type ignorecvs: Boolean
614 @param ignorelist: List of filenames/directories to exclude
615 @type ignorelist: List
616 @param followSymlinks: Follow Symlink'd files and directories
617 @type followSymlinks: Boolean
618 @param EmptyOnError: Return [] if an error occurs.
619 @type EmptyOnError: Boolean
620 @param dirsonly: Only return directories.
621 @type dirsonly: Boolean
623 @returns: A list of files and directories (or just files or just directories) or an empty list.
626 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
633 if not (filesonly or dirsonly or recursive):
639 if ftype[x] == 1 and not \
640 (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs):
641 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
645 for y in range(0,len(l)):
646 l[y]=list[x]+"/"+l[y]
652 for x in range(0,len(ftype)):
654 rlist=rlist+[list[x]]
657 for x in range(0, len(ftype)):
659 rlist = rlist + [list[x]]
665 def flatten(mytokens):
666 """this function now turns a [1,[2,3]] list into
667 a [1,2,3] list and returns it."""
670 if isinstance(x, list):
671 newlist.extend(flatten(x))
676 #beautiful directed graph object
678 class digraph(object):
680 """Create an empty digraph"""
682 # { node : ( { child : priority } , { parent : priority } ) }
686 def add(self, node, parent, priority=0):
687 """Adds the specified node with the specified parent.
689 If the dep is a soft-dep and the node already has a hard
690 relationship to the parent, the relationship is left as hard."""
692 if node not in self.nodes:
693 self.nodes[node] = ({}, {}, node)
694 self.order.append(node)
699 if parent not in self.nodes:
700 self.nodes[parent] = ({}, {}, parent)
701 self.order.append(parent)
703 priorities = self.nodes[node][1].get(parent)
704 if priorities is None:
706 self.nodes[node][1][parent] = priorities
707 self.nodes[parent][0][node] = priorities
708 priorities.append(priority)
711 def remove(self, node):
712 """Removes the specified node from the digraph, also removing
713 and ties to other nodes in the digraph. Raises KeyError if the
714 node doesn't exist."""
716 if node not in self.nodes:
719 for parent in self.nodes[node][1]:
720 del self.nodes[parent][0][node]
721 for child in self.nodes[node][0]:
722 del self.nodes[child][1][node]
725 self.order.remove(node)
727 def difference_update(self, t):
729 Remove all given nodes from node_set. This is more efficient
730 than multiple calls to the remove() method.
732 if isinstance(t, (list, tuple)) or \
733 not hasattr(t, "__contains__"):
736 for node in self.order:
740 for parent in self.nodes[node][1]:
741 del self.nodes[parent][0][node]
742 for child in self.nodes[node][0]:
743 del self.nodes[child][1][node]
747 def remove_edge(self, child, parent):
749 Remove edge in the direction from child to parent. Note that it is
750 possible for a remaining edge to exist in the opposite direction.
751 Any endpoint vertices that become isolated will remain in the graph.
754 # Nothing should be modified when a KeyError is raised.
755 for k in parent, child:
756 if k not in self.nodes:
759 # Make sure the edge exists.
760 if child not in self.nodes[parent][0]:
761 raise KeyError(child)
762 if parent not in self.nodes[child][1]:
763 raise KeyError(parent)
766 del self.nodes[child][1][parent]
767 del self.nodes[parent][0][child]
770 return iter(self.order)
772 def contains(self, node):
773 """Checks if the digraph contains mynode"""
774 return node in self.nodes
776 def get(self, key, default=None):
777 node_data = self.nodes.get(key, self)
778 if node_data is self:
783 """Return a list of all nodes in the graph"""
786 def child_nodes(self, node, ignore_priority=None):
787 """Return all children of the specified node"""
788 if ignore_priority is None:
789 return list(self.nodes[node][0])
791 if hasattr(ignore_priority, '__call__'):
792 for child, priorities in self.nodes[node][0].items():
793 for priority in priorities:
794 if not ignore_priority(priority):
795 children.append(child)
798 for child, priorities in self.nodes[node][0].items():
799 if ignore_priority < priorities[-1]:
800 children.append(child)
803 def parent_nodes(self, node, ignore_priority=None):
804 """Return all parents of the specified node"""
805 if ignore_priority is None:
806 return list(self.nodes[node][1])
808 if hasattr(ignore_priority, '__call__'):
809 for parent, priorities in self.nodes[node][1].items():
810 for priority in priorities:
811 if not ignore_priority(priority):
812 parents.append(parent)
815 for parent, priorities in self.nodes[node][1].items():
816 if ignore_priority < priorities[-1]:
817 parents.append(parent)
820 def leaf_nodes(self, ignore_priority=None):
821 """Return all nodes that have no children
823 If ignore_soft_deps is True, soft deps are not counted as
824 children in calculations."""
827 if ignore_priority is None:
828 for node in self.order:
829 if not self.nodes[node][0]:
830 leaf_nodes.append(node)
831 elif hasattr(ignore_priority, '__call__'):
832 for node in self.order:
834 for child, priorities in self.nodes[node][0].items():
835 for priority in priorities:
836 if not ignore_priority(priority):
842 leaf_nodes.append(node)
844 for node in self.order:
846 for child, priorities in self.nodes[node][0].items():
847 if ignore_priority < priorities[-1]:
851 leaf_nodes.append(node)
854 def root_nodes(self, ignore_priority=None):
855 """Return all nodes that have no parents.
857 If ignore_soft_deps is True, soft deps are not counted as
858 parents in calculations."""
861 if ignore_priority is None:
862 for node in self.order:
863 if not self.nodes[node][1]:
864 root_nodes.append(node)
865 elif hasattr(ignore_priority, '__call__'):
866 for node in self.order:
868 for parent, priorities in self.nodes[node][1].items():
869 for priority in priorities:
870 if not ignore_priority(priority):
876 root_nodes.append(node)
878 for node in self.order:
880 for parent, priorities in self.nodes[node][1].items():
881 if ignore_priority < priorities[-1]:
885 root_nodes.append(node)
889 """Checks if the digraph is empty"""
890 return len(self.nodes) == 0
896 for children, parents, node in self.nodes.values():
898 for child, priorities in children.items():
899 priorities_clone = memo.get(id(priorities))
900 if priorities_clone is None:
901 priorities_clone = priorities[:]
902 memo[id(priorities)] = priorities_clone
903 children_clone[child] = priorities_clone
905 for parent, priorities in parents.items():
906 priorities_clone = memo.get(id(priorities))
907 if priorities_clone is None:
908 priorities_clone = priorities[:]
909 memo[id(priorities)] = priorities_clone
910 parents_clone[parent] = priorities_clone
911 clone.nodes[node] = (children_clone, parents_clone, node)
912 clone.order = self.order[:]
915 # Backward compatibility
918 allzeros = leaf_nodes
920 __contains__ = contains
924 def delnode(self, node):
931 leaf_nodes = self.leaf_nodes()
936 def hasallzeros(self, ignore_priority=None):
937 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
940 def debug_print(self):
942 writemsg(s, noiselevel=-1)
943 for node in self.nodes:
944 output("%s " % (node,))
945 if self.nodes[node][0]:
946 output("depends on\n")
948 output("(no children)\n")
949 for child, priorities in self.nodes[node][0].items():
950 output(" %s (%s)\n" % (child, priorities[-1],))
952 #parse /etc/env.d and generate /etc/profile.env
954 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
955 env=None, writemsg_level=None):
956 if writemsg_level is None:
957 writemsg_level = portage.util.writemsg_level
958 if target_root is None:
960 target_root = settings["ROOT"]
961 if prev_mtimes is None:
963 prev_mtimes = mtimedb["ldpath"]
966 envd_dir = os.path.join(target_root, "etc", "env.d")
967 portage.util.ensure_dirs(envd_dir, mode=0o755)
968 fns = listdir(envd_dir, EmptyOnError=1)
974 if not x[0].isdigit() or not x[1].isdigit():
976 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
982 space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
983 colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
984 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
985 "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
986 "PYTHONPATH", "ROOTPATH"])
991 file_path = os.path.join(envd_dir, x)
993 myconfig = getconfig(file_path, expand=False)
994 except portage.exception.ParseError as e:
995 writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
999 # broken symlink or file removed by a concurrent process
1000 writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
1003 config_list.append(myconfig)
1004 if "SPACE_SEPARATED" in myconfig:
1005 space_separated.update(myconfig["SPACE_SEPARATED"].split())
1006 del myconfig["SPACE_SEPARATED"]
1007 if "COLON_SEPARATED" in myconfig:
1008 colon_separated.update(myconfig["COLON_SEPARATED"].split())
1009 del myconfig["COLON_SEPARATED"]
1013 for var in space_separated:
1015 for myconfig in config_list:
1017 for item in myconfig[var].split():
1018 if item and not item in mylist:
1020 del myconfig[var] # prepare for env.update(myconfig)
1022 env[var] = " ".join(mylist)
1023 specials[var] = mylist
1025 for var in colon_separated:
1027 for myconfig in config_list:
1029 for item in myconfig[var].split(":"):
1030 if item and not item in mylist:
1032 del myconfig[var] # prepare for env.update(myconfig)
1034 env[var] = ":".join(mylist)
1035 specials[var] = mylist
1037 for myconfig in config_list:
1038 """Cumulative variables have already been deleted from myconfig so that
1039 they won't be overwritten by this dict.update call."""
1040 env.update(myconfig)
1042 ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
1044 myld = codecs.open(_unicode_encode(ldsoconf_path,
1045 encoding=_encodings['fs'], errors='strict'),
1046 mode='r', encoding=_encodings['content'], errors='replace')
1047 myldlines=myld.readlines()
1051 #each line has at least one char (a newline)
1054 oldld.append(x[:-1])
1055 except (IOError, OSError) as e:
1056 if e.errno != errno.ENOENT:
1060 ld_cache_update=False
1062 newld = specials["LDPATH"]
1064 #ld.so.conf needs updating and ldconfig needs to be run
1065 myfd = atomic_ofstream(ldsoconf_path)
1066 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
1067 myfd.write("# contents of /etc/env.d directory\n")
1068 for x in specials["LDPATH"]:
1071 ld_cache_update=True
1073 # Update prelink.conf if we are prelink-enabled
1075 newprelink = atomic_ofstream(
1076 os.path.join(target_root, "etc", "prelink.conf"))
1077 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
1078 newprelink.write("# contents of /etc/env.d directory\n")
1080 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
1081 newprelink.write("-l "+x+"\n");
1082 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
1088 for y in specials["PRELINK_PATH_MASK"]:
1097 newprelink.write("-h "+x+"\n")
1098 for x in specials["PRELINK_PATH_MASK"]:
1099 newprelink.write("-b "+x+"\n")
1102 # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
1103 # granularity is possible. In order to avoid the potential ambiguity of
1104 # mtimes that differ by less than 1 second, sleep here if any of the
1105 # directories have been modified during the current second.
1106 sleep_for_mtime_granularity = False
1107 current_time = long(time.time())
1108 mtime_changed = False
1110 for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
1111 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
1113 newldpathtime = long(os.stat(x).st_mtime)
1114 lib_dirs.add(normalize_path(x))
1115 except OSError as oe:
1116 if oe.errno == errno.ENOENT:
1121 # ignore this path because it doesn't exist
1124 if newldpathtime == current_time:
1125 sleep_for_mtime_granularity = True
1126 if x in prev_mtimes:
1127 if prev_mtimes[x] == newldpathtime:
1130 prev_mtimes[x] = newldpathtime
1131 mtime_changed = True
1133 prev_mtimes[x] = newldpathtime
1134 mtime_changed = True
1137 ld_cache_update = True
1140 not ld_cache_update and \
1141 contents is not None:
1142 libdir_contents_changed = False
1143 for mypath, mydata in contents.items():
1144 if mydata[0] not in ("obj","sym"):
1146 head, tail = os.path.split(mypath)
1147 if head in lib_dirs:
1148 libdir_contents_changed = True
1150 if not libdir_contents_changed:
1153 ldconfig = "/sbin/ldconfig"
1154 if "CHOST" in env and "CBUILD" in env and \
1155 env["CHOST"] != env["CBUILD"]:
1156 from portage.process import find_binary
1157 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
1159 # Only run ldconfig as needed
1160 if (ld_cache_update or makelinks) and ldconfig:
1161 # ldconfig has very different behaviour between FreeBSD and Linux
1162 if ostype=="Linux" or ostype.lower().endswith("gnu"):
1163 # We can't update links if we haven't cleaned other versions first, as
1164 # an older package installed ON TOP of a newer version will cause ldconfig
1165 # to overwrite the symlinks we just made. -X means no links. After 'clean'
1166 # we can safely create links.
1167 writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
1170 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
1172 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
1173 elif ostype in ("FreeBSD","DragonFly"):
1174 writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
1176 os.system(("cd / ; %s -elf -i " + \
1177 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
1178 (ldconfig, target_root, target_root))
1180 del specials["LDPATH"]
1182 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
1183 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
1184 cenvnotice = penvnotice[:]
1185 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
1186 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
1188 #create /etc/profile.env for bash support
1189 outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
1190 outfile.write(penvnotice)
1192 env_keys = [ x for x in env if x != "LDPATH" ]
1196 if v.startswith('$') and not v.startswith('${'):
1197 outfile.write("export %s=$'%s'\n" % (k, v[1:]))
1199 outfile.write("export %s='%s'\n" % (k, v))
1202 #create /etc/csh.env for (t)csh support
1203 outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
1204 outfile.write(cenvnotice)
1206 outfile.write("setenv %s '%s'\n" % (x, env[x]))
1209 if sleep_for_mtime_granularity:
1210 while current_time == long(time.time()):
1213 def ExtractKernelVersion(base_dir):
1215 Try to figure out what kernel version we are running
1216 @param base_dir: Path to sources (usually /usr/src/linux)
1217 @type base_dir: string
1218 @rtype: tuple( version[string], error[string])
1220 1. tuple( version[string], error[string])
1221 Either version or error is populated (but never both)
1225 pathname = os.path.join(base_dir, 'Makefile')
1227 f = codecs.open(_unicode_encode(pathname,
1228 encoding=_encodings['fs'], errors='strict'), mode='r',
1229 encoding=_encodings['content'], errors='replace')
1230 except OSError as details:
1231 return (None, str(details))
1232 except IOError as details:
1233 return (None, str(details))
1237 lines.append(f.readline())
1238 except OSError as details:
1239 return (None, str(details))
1240 except IOError as details:
1241 return (None, str(details))
1243 lines = [l.strip() for l in lines]
1247 #XXX: The following code relies on the ordering of vars within the Makefile
1249 # split on the '=' then remove annoying whitespace
1250 items = line.split("=")
1251 items = [i.strip() for i in items]
1252 if items[0] == 'VERSION' or \
1253 items[0] == 'PATCHLEVEL':
1256 elif items[0] == 'SUBLEVEL':
1258 elif items[0] == 'EXTRAVERSION' and \
1259 items[-1] != items[0]:
1262 # Grab a list of files named localversion* and sort them
1263 localversions = os.listdir(base_dir)
1264 for x in range(len(localversions)-1,-1,-1):
1265 if localversions[x][:12] != "localversion":
1266 del localversions[x]
1267 localversions.sort()
1269 # Append the contents of each to the version string, stripping ALL whitespace
1270 for lv in localversions:
1271 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
1273 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
1274 kernelconfig = getconfig(base_dir+"/.config")
1275 if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
1276 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
1278 return (version,None)
1280 def autouse(myvartree, use_cache=1, mysettings=None):
1282 autuse returns a list of USE variables auto-enabled to packages being installed
1284 @param myvartree: Instance of the vartree class (from /var/db/pkg...)
1285 @type myvartree: vartree
1286 @param use_cache: read values from cache
1287 @type use_cache: Boolean
1288 @param mysettings: Instance of config
1289 @type mysettings: config
1291 @returns: A string containing a list of USE variables that are enabled via use.defaults
1293 if mysettings is None:
1295 mysettings = settings
1296 if mysettings.profile_path is None:
1299 usedefaults = mysettings.use_defs
1300 for myuse in usedefaults:
1302 for mydep in usedefaults[myuse]:
1303 if not myvartree.dep_match(mydep,use_cache=True):
1307 myusevars += " "+myuse
1310 def check_config_instance(test):
1311 if not isinstance(test, config):
1312 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
1314 def _lazy_iuse_regex(iuse_implicit):
1316 The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
1317 and the value is only used when an ebuild phase needs to be executed
1318 (it's used only to generate QA notices).
1320 # Escape anything except ".*" which is supposed to pass through from
1321 # _get_implicit_iuse().
1322 regex = sorted(re.escape(x) for x in iuse_implicit)
1323 regex = "^(%s)$" % "|".join(regex)
1324 regex = regex.replace("\\.\\*", ".*")
1327 class _local_repo_config(object):
1328 __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',)
1329 def __init__(self, name, repo_opts):
1332 aliases = repo_opts.get('aliases')
1333 if aliases is not None:
1334 aliases = tuple(aliases.split())
1335 self.aliases = aliases
1337 eclass_overrides = repo_opts.get('eclass-overrides')
1338 if eclass_overrides is not None:
1339 eclass_overrides = tuple(eclass_overrides.split())
1340 self.eclass_overrides = eclass_overrides
1342 masters = repo_opts.get('masters')
1343 if masters is not None:
1344 masters = tuple(masters.split())
1345 self.masters = masters
1347 class config(object):
1349 This class encompasses the main portage configuration. Data is pulled from
1350 ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
1351 parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
1354 Generally if you need data like USE flags, FEATURES, environment variables,
1355 virtuals ...etc you look in here.
1358 # Don't include anything that could be extremely long here (like SRC_URI)
1359 # since that could cause execve() calls to fail with E2BIG errors. For
1360 # example, see bug #262647.
1361 _setcpv_aux_keys = ('SLOT', 'RESTRICT', 'LICENSE',
1362 'KEYWORDS', 'INHERITED', 'IUSE', 'PROVIDE', 'EAPI',
1363 'PROPERTIES', 'DEFINED_PHASES', 'repository')
1366 "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
1367 "EBUILD_PHASE", "EMERGE_FROM", "HOMEPAGE", "INHERITED", "IUSE",
1368 "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
1369 "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
1370 "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
1371 "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
1372 "ROOT", "SLOT", "SRC_URI"
1375 _environ_whitelist = []
1377 # Whitelisted variables are always allowed to enter the ebuild
1378 # environment. Generally, this only includes special portage
1379 # variables. Ebuilds can unset variables that are not whitelisted
1380 # and rely on them remaining unset for future phases, without them
1381 # leaking back in from various locations (bug #189417). It's very
1382 # important to set our special BASH_ENV variable in the ebuild
1383 # environment in order to prevent sandbox from sourcing /etc/profile
1384 # in it's bashrc (causing major leakage).
1385 _environ_whitelist += [
1386 "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
1387 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
1388 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
1389 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
1390 "FEATURES", "FILESDIR", "HOME", "NOCOLOR", "PATH",
1392 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
1393 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
1395 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
1396 "PORTAGE_BINPKG_TMPFILE",
1398 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
1399 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
1400 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
1402 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
1403 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
1404 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
1405 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
1406 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
1407 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
1408 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
1409 "USE_EXPAND", "USE_ORDER", "WORKDIR",
1413 # user config variables
1414 _environ_whitelist += [
1415 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
1418 _environ_whitelist += [
1419 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
1422 # misc variables inherited from the calling environment
1423 _environ_whitelist += [
1424 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
1425 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
1426 "TERM", "TERMCAP", "USER",
1429 # other variables inherited from the calling environment
1430 _environ_whitelist += [
1431 "CVS_RSH", "ECHANGELOG_USER",
1433 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
1434 "STY", "WINDOW", "XAUTHORITY",
1437 _environ_whitelist = frozenset(_environ_whitelist)
1439 _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
1441 # Filter selected variables in the config.environ() method so that
1442 # they don't needlessly propagate down into the ebuild environment.
1443 _environ_filter = []
1445 # misc variables inherited from the calling environment
1446 _environ_filter += [
1447 "INFOPATH", "MANPATH",
1450 # variables that break bash
1451 _environ_filter += [
1452 "HISTFILE", "POSIXLY_CORRECT",
1455 # portage config variables and variables set directly by portage
1456 _environ_filter += [
1457 "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
1458 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1459 "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
1461 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1462 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1463 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1464 "PORTAGE_BACKGROUND",
1465 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1466 "PORTAGE_ELOG_CLASSES",
1467 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1468 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1469 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1471 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1472 "PORTAGE_PACKAGE_EMPTY_ABORT",
1473 "PORTAGE_REPO_DUPLICATE_WARN",
1474 "PORTAGE_RO_DISTDIRS",
1475 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1476 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1477 "QUICKPKG_DEFAULT_OPTS",
1478 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1479 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1482 _environ_filter = frozenset(_environ_filter)
1484 _undef_lic_groups = set()
1485 _default_globals = (
1486 ('ACCEPT_LICENSE', '* -@EULA'),
1487 ('ACCEPT_PROPERTIES', '*'),
1490 # To enhance usability, make some vars case insensitive
1491 # by forcing them to lower case.
1492 _case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
1494 def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1495 config_incrementals=None, config_root=None, target_root=None,
1496 local_config=True, env=None):
1498 @param clone: If provided, init will use deepcopy to copy by value the instance.
1499 @type clone: Instance of config class.
1500 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1501 and then calling instance.setcpv(mycpv).
1503 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1504 @type config_profile_path: String
1505 @param config_incrementals: List of incremental variables
1506 (defaults to portage.const.INCREMENTALS)
1507 @type config_incrementals: List
1508 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1509 @type config_root: String
1510 @param target_root: __init__ override of $ROOT env variable.
1511 @type target_root: String
1512 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1513 ignore local config (keywording and unmasking)
1514 @type local_config: Boolean
1515 @param env: The calling environment which is used to override settings.
1516 Defaults to os.environ if unspecified.
1520 # When initializing the global portage.settings instance, avoid
1521 # raising exceptions whenever possible since exceptions thrown
1522 # from 'import portage' or 'import portage.exceptions' statements
1523 # can practically render the api unusable for api consumers.
1524 tolerant = "_initializing_globals" in globals()
1526 self.already_in_regenerate = 0
1530 self._setcpv_args_hash = None
1532 self.modifiedkeys = []
1534 self._accept_chost_re = None
1535 self._accept_license = None
1536 self._accept_license_str = None
1537 self._license_groups = {}
1538 self._accept_properties = None
1542 self.dirVirtuals = None
1545 # Virtuals obtained from the vartree
1546 self.treeVirtuals = {}
1547 # Virtuals by user specification. Includes negatives.
1548 self.userVirtuals = {}
1549 # Virtual negatives from user specifications.
1550 self.negVirtuals = {}
1551 # Virtuals added by the depgraph via self.setinst().
1552 self._depgraphVirtuals = {}
1554 self.user_profile_dir = None
1555 self.local_config = local_config
1556 self._local_repo_configs = None
1557 self._local_repo_conf_path = None
1560 self.incrementals = copy.deepcopy(clone.incrementals)
1561 self.profile_path = copy.deepcopy(clone.profile_path)
1562 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1563 self.local_config = copy.deepcopy(clone.local_config)
1564 self._local_repo_configs = \
1565 copy.deepcopy(clone._local_repo_configs)
1566 self._local_repo_conf_path = \
1567 copy.deepcopy(clone._local_repo_conf_path)
1569 self.module_priority = copy.deepcopy(clone.module_priority)
1570 self.modules = copy.deepcopy(clone.modules)
1572 self.depcachedir = copy.deepcopy(clone.depcachedir)
1574 self.packages = copy.deepcopy(clone.packages)
1575 self.virtuals = copy.deepcopy(clone.virtuals)
1577 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1578 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1579 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1580 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
1581 self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1583 self.use_defs = copy.deepcopy(clone.use_defs)
1584 self.usemask = copy.deepcopy(clone.usemask)
1585 self.usemask_list = copy.deepcopy(clone.usemask_list)
1586 self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1587 self.useforce = copy.deepcopy(clone.useforce)
1588 self.useforce_list = copy.deepcopy(clone.useforce_list)
1589 self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1590 self.puse = copy.deepcopy(clone.puse)
1591 self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1592 self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1593 self.mycpv = copy.deepcopy(clone.mycpv)
1594 self._setcpv_args_hash = copy.deepcopy(clone._setcpv_args_hash)
1596 self.configdict = copy.deepcopy(clone.configdict)
1598 self.configdict['env.d'],
1599 self.configdict['pkginternal'],
1600 self.configdict['globals'],
1601 self.configdict['defaults'],
1602 self.configdict['conf'],
1603 self.configdict['pkg'],
1604 self.configdict['auto'],
1605 self.configdict['env'],
1607 self.lookuplist = self.configlist[:]
1608 self.lookuplist.reverse()
1609 self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
1610 self.profiles = copy.deepcopy(clone.profiles)
1611 self.backupenv = self.configdict["backupenv"]
1612 self.pusedict = copy.deepcopy(clone.pusedict)
1613 self.categories = copy.deepcopy(clone.categories)
1614 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1615 self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1616 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1617 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1618 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1619 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1620 self.features = copy.deepcopy(clone.features)
1622 self._accept_license = copy.deepcopy(clone._accept_license)
1623 self._plicensedict = copy.deepcopy(clone._plicensedict)
1624 self._license_groups = copy.deepcopy(clone._license_groups)
1625 self._accept_properties = copy.deepcopy(clone._accept_properties)
1626 self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
1629 def check_var_directory(varname, var):
1630 if not os.path.isdir(var):
1631 writemsg(_("!!! Error: %s='%s' is not a directory. "
1632 "Please correct this.\n") % (varname, var),
1634 raise portage.exception.DirectoryNotFound(var)
1636 if config_root is None:
1639 config_root = normalize_path(os.path.abspath(
1640 config_root)).rstrip(os.path.sep) + os.path.sep
1642 check_var_directory("PORTAGE_CONFIGROOT", config_root)
1644 self.depcachedir = DEPCACHE_PATH
1646 if not config_profile_path:
1647 config_profile_path = \
1648 os.path.join(config_root, PROFILE_PATH)
1649 if os.path.isdir(config_profile_path):
1650 self.profile_path = config_profile_path
1652 self.profile_path = None
1654 self.profile_path = config_profile_path[:]
1656 if config_incrementals is None:
1657 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1659 self.incrementals = copy.deepcopy(config_incrementals)
1661 self.module_priority = ["user","default"]
1663 modules_loader = portage.env.loaders.KeyValuePairFileLoader(
1664 os.path.join(config_root, MODULES_FILE_PATH), None, None)
1665 modules_dict, modules_errors = modules_loader.load()
1666 self.modules["user"] = modules_dict
1667 if self.modules["user"] is None:
1668 self.modules["user"] = {}
1669 self.modules["default"] = {
1670 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1671 "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
1677 # back up our incremental variables:
1679 self._use_expand_dict = {}
1680 # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1681 self.configlist.append({})
1682 self.configdict["env.d"] = self.configlist[-1]
1684 self.configlist.append({})
1685 self.configdict["pkginternal"] = self.configlist[-1]
1687 # The symlink might not exist or might not be a symlink.
1688 if self.profile_path is None:
1692 def addProfile(currentPath):
1693 parentsFile = os.path.join(currentPath, "parent")
1694 eapi_file = os.path.join(currentPath, "eapi")
1696 eapi = codecs.open(_unicode_encode(eapi_file,
1697 encoding=_encodings['fs'], errors='strict'),
1698 mode='r', encoding=_encodings['content'], errors='replace'
1699 ).readline().strip()
1703 if not eapi_is_supported(eapi):
1704 raise portage.exception.ParseError(_(
1705 "Profile contains unsupported "
1706 "EAPI '%s': '%s'") % \
1707 (eapi, os.path.realpath(eapi_file),))
1708 if os.path.exists(parentsFile):
1709 parents = grabfile(parentsFile)
1711 raise portage.exception.ParseError(
1712 _("Empty parent file: '%s'") % parentsFile)
1713 for parentPath in parents:
1714 parentPath = normalize_path(os.path.join(
1715 currentPath, parentPath))
1716 if os.path.exists(parentPath):
1717 addProfile(parentPath)
1719 raise portage.exception.ParseError(
1720 _("Parent '%s' not found: '%s'") % \
1721 (parentPath, parentsFile))
1722 self.profiles.append(currentPath)
1724 addProfile(os.path.realpath(self.profile_path))
1725 except portage.exception.ParseError as e:
1726 writemsg(_("!!! Unable to parse profile: '%s'\n") % \
1727 self.profile_path, noiselevel=-1)
1728 writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1731 if local_config and self.profiles:
1732 custom_prof = os.path.join(
1733 config_root, CUSTOM_PROFILE_PATH)
1734 if os.path.exists(custom_prof):
1735 self.user_profile_dir = custom_prof
1736 self.profiles.append(custom_prof)
1739 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1740 self.packages = stack_lists(self.packages_list, incremental=1)
1741 del self.packages_list
1742 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1745 self.prevmaskdict={}
1746 for x in self.packages:
1747 # Negative atoms are filtered by the above stack_lists() call.
1748 if not isinstance(x, dep.Atom):
1749 x = dep.Atom(x.lstrip('*'))
1750 self.prevmaskdict.setdefault(x.cp, []).append(x)
1752 self._pkeywords_list = []
1753 rawpkeywords = [grabdict_package(
1754 os.path.join(x, "package.keywords"), recursive=1) \
1755 for x in self.profiles]
1756 for pkeyworddict in rawpkeywords:
1758 for k, v in pkeyworddict.items():
1759 cpdict.setdefault(k.cp, {})[k] = v
1760 self._pkeywords_list.append(cpdict)
1762 # get profile-masked use flags -- INCREMENTAL Child over parent
1763 self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1764 recursive=1) for x in self.profiles]
1765 self.usemask = set(stack_lists(
1766 self.usemask_list, incremental=True))
1767 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1768 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1771 self.pusemask_list = []
1772 rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1773 recursive=1) for x in self.profiles]
1774 for pusemaskdict in rawpusemask:
1776 for k, v in pusemaskdict.items():
1777 cpdict.setdefault(k.cp, {})[k] = v
1778 self.pusemask_list.append(cpdict)
1781 self.pkgprofileuse = []
1782 rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1783 juststrings=True, recursive=1) for x in self.profiles]
1784 for rawpusedict in rawprofileuse:
1786 for k, v in rawpusedict.items():
1787 cpdict.setdefault(k.cp, {})[k] = v
1788 self.pkgprofileuse.append(cpdict)
1791 self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1792 recursive=1) for x in self.profiles]
1793 self.useforce = set(stack_lists(
1794 self.useforce_list, incremental=True))
1796 self.puseforce_list = []
1797 rawpuseforce = [grabdict_package(
1798 os.path.join(x, "package.use.force"), recursive=1) \
1799 for x in self.profiles]
1800 for rawpusefdict in rawpuseforce:
1802 for k, v in rawpusefdict.items():
1803 cpdict.setdefault(k.cp, {})[k] = v
1804 self.puseforce_list.append(cpdict)
1807 make_conf = getconfig(
1808 os.path.join(config_root, MAKE_CONF_FILE),
1809 tolerant=tolerant, allow_sourcing=True)
1810 if make_conf is None:
1813 # Allow ROOT setting to come from make.conf if it's not overridden
1814 # by the constructor argument (from the calling environment).
1815 if target_root is None and "ROOT" in make_conf:
1816 target_root = make_conf["ROOT"]
1817 if not target_root.strip():
1819 if target_root is None:
1822 target_root = normalize_path(os.path.abspath(
1823 target_root)).rstrip(os.path.sep) + os.path.sep
1825 portage.util.ensure_dirs(target_root)
1826 check_var_directory("ROOT", target_root)
1828 # The expand_map is used for variable substitution
1829 # in getconfig() calls, and the getconfig() calls
1830 # update expand_map with the value of each variable
1831 # assignment that occurs. Variable substitution occurs
1832 # in the following order, which corresponds to the
1833 # order of appearance in self.lookuplist:
1840 # Notably absent is "env", since we want to avoid any
1841 # interaction with the calling environment that might
1842 # lead to unexpected results.
1845 env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1847 # env_d will be None if profile.env doesn't exist.
1849 self.configdict["env.d"].update(env_d)
1850 expand_map.update(env_d)
1852 # backupenv is used for calculating incremental variables.
1856 # Avoid potential UnicodeDecodeError exceptions later.
1857 env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
1858 for k, v in env.items())
1860 self.backupenv = env_unicode
1863 # Remove duplicate values so they don't override updated
1864 # profile.env values later (profile.env is reloaded in each
1865 # call to self.regenerate).
1866 for k, v in env_d.items():
1868 if self.backupenv[k] == v:
1869 del self.backupenv[k]
1874 self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1876 # make.globals should not be relative to config_root
1877 # because it only contains constants.
1878 for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1879 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1884 if self.mygcfg is None:
1887 for k, v in self._default_globals:
1888 self.mygcfg.setdefault(k, v)
1890 self.configlist.append(self.mygcfg)
1891 self.configdict["globals"]=self.configlist[-1]
1893 self.make_defaults_use = []
1896 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1897 expand=expand_map) for x in self.profiles]
1899 for cfg in mygcfg_dlists:
1901 self.make_defaults_use.append(cfg.get("USE", ""))
1903 self.make_defaults_use.append("")
1904 self.mygcfg = stack_dicts(mygcfg_dlists,
1905 incrementals=portage.const.INCREMENTALS)
1906 if self.mygcfg is None:
1908 self.configlist.append(self.mygcfg)
1909 self.configdict["defaults"]=self.configlist[-1]
1911 self.mygcfg = getconfig(
1912 os.path.join(config_root, MAKE_CONF_FILE),
1913 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1914 if self.mygcfg is None:
1917 # Don't allow the user to override certain variables in make.conf
1918 profile_only_variables = self.configdict["defaults"].get(
1919 "PROFILE_ONLY_VARIABLES", "").split()
1920 for k in profile_only_variables:
1921 self.mygcfg.pop(k, None)
1923 self.configlist.append(self.mygcfg)
1924 self.configdict["conf"]=self.configlist[-1]
1926 self.configlist.append(util.LazyItemsDict())
1927 self.configdict["pkg"]=self.configlist[-1]
1930 self.configlist.append({})
1931 self.configdict["auto"]=self.configlist[-1]
1933 self.configdict["backupenv"] = self.backupenv
1935 # Don't allow the user to override certain variables in the env
1936 for k in profile_only_variables:
1937 self.backupenv.pop(k, None)
1939 self.configlist.append(self.configdict["env"])
1941 # make lookuplist for loading package.*
1942 self.lookuplist=self.configlist[:]
1943 self.lookuplist.reverse()
1945 # Blacklist vars that could interfere with portage internals.
1946 for blacklisted in self._env_blacklist:
1947 for cfg in self.lookuplist:
1948 cfg.pop(blacklisted, None)
1949 self.backupenv.pop(blacklisted, None)
1950 del blacklisted, cfg
1952 self["PORTAGE_CONFIGROOT"] = config_root
1953 self.backup_changes("PORTAGE_CONFIGROOT")
1954 self["ROOT"] = target_root
1955 self.backup_changes("ROOT")
1958 self.pkeywordsdict = {}
1959 self._plicensedict = {}
1960 self._ppropertiesdict = {}
1961 self.punmaskdict = {}
1962 abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
1964 # locations for "categories" and "arch.list" files
1965 locations = [os.path.join(self["PORTDIR"], "profiles")]
1966 pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1967 pmask_locations.extend(self.profiles)
1969 """ repoman controls PORTDIR_OVERLAY via the environment, so no
1970 special cases are needed here."""
1971 overlay_profiles = []
1972 for ov in self["PORTDIR_OVERLAY"].split():
1973 ov = normalize_path(ov)
1974 profiles_dir = os.path.join(ov, "profiles")
1975 if os.path.isdir(profiles_dir):
1976 overlay_profiles.append(profiles_dir)
1977 locations += overlay_profiles
1979 pmask_locations.extend(overlay_profiles)
1982 locations.append(abs_user_config)
1983 pmask_locations.append(abs_user_config)
1984 pusedict = grabdict_package(
1985 os.path.join(abs_user_config, "package.use"), recursive=1)
1986 for k, v in pusedict.items():
1987 self.pusedict.setdefault(k.cp, {})[k] = v
1990 pkgdict = grabdict_package(
1991 os.path.join(abs_user_config, "package.keywords"),
1993 for k, v in pkgdict.items():
1994 # default to ~arch if no specific keyword is given
1997 if self.configdict["defaults"] and \
1998 "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1999 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
2002 for keyword in groups:
2003 if not keyword[0] in "~-":
2004 mykeywordlist.append("~"+keyword)
2006 self.pkeywordsdict.setdefault(k.cp, {})[k] = v
2009 licdict = grabdict_package(os.path.join(
2010 abs_user_config, "package.license"), recursive=1)
2011 for k, v in licdict.items():
2013 cp_dict = self._plicensedict.get(cp)
2016 self._plicensedict[cp] = cp_dict
2017 cp_dict[k] = self.expandLicenseTokens(v)
2020 propdict = grabdict_package(os.path.join(
2021 abs_user_config, "package.properties"), recursive=1)
2022 for k, v in propdict.items():
2024 cp_dict = self._ppropertiesdict.get(cp)
2027 self._ppropertiesdict[cp] = cp_dict
2030 self._local_repo_configs = {}
2031 self._local_repo_conf_path = \
2032 os.path.join(abs_user_config, 'repos.conf')
2034 from configparser import SafeConfigParser, ParsingError
2036 from ConfigParser import SafeConfigParser, ParsingError
2037 repo_conf_parser = SafeConfigParser()
2039 repo_conf_parser.readfp(
2041 _unicode_encode(self._local_repo_conf_path,
2042 encoding=_encodings['fs'], errors='strict'),
2043 mode='r', encoding=_encodings['content'], errors='replace')
2045 except EnvironmentError as e:
2046 if e.errno != errno.ENOENT:
2049 except ParsingError as e:
2050 portage.util.writemsg_level(
2051 _("!!! Error parsing '%s': %s\n") % \
2052 (self._local_repo_conf_path, e),
2053 level=logging.ERROR, noiselevel=-1)
2056 repo_defaults = repo_conf_parser.defaults()
2058 self._local_repo_configs['DEFAULT'] = \
2059 _local_repo_config('DEFAULT', repo_defaults)
2060 for repo_name in repo_conf_parser.sections():
2061 repo_opts = repo_defaults.copy()
2062 for opt_name in repo_conf_parser.options(repo_name):
2063 repo_opts[opt_name] = \
2064 repo_conf_parser.get(repo_name, opt_name)
2065 self._local_repo_configs[repo_name] = \
2066 _local_repo_config(repo_name, repo_opts)
2068 #getting categories from an external file now
2069 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
2070 self.categories = tuple(sorted(
2071 stack_lists(categories, incremental=1)))
2074 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
2075 archlist = stack_lists(archlist, incremental=1)
2076 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
2078 # package.mask and package.unmask
2081 for x in pmask_locations:
2082 pkgmasklines.append(grabfile_package(
2083 os.path.join(x, "package.mask"), recursive=1))
2084 pkgunmasklines.append(grabfile_package(
2085 os.path.join(x, "package.unmask"), recursive=1))
2086 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
2087 pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
2090 for x in pkgmasklines:
2091 self.pmaskdict.setdefault(x.cp, []).append(x)
2093 for x in pkgunmasklines:
2094 self.punmaskdict.setdefault(x.cp, []).append(x)
2096 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
2097 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
2098 has_invalid_data = False
2099 for x in range(len(pkgprovidedlines)-1, -1, -1):
2100 myline = pkgprovidedlines[x]
2101 if not isvalidatom("=" + myline):
2102 writemsg(_("Invalid package name in package.provided: %s\n") % \
2103 myline, noiselevel=-1)
2104 has_invalid_data = True
2105 del pkgprovidedlines[x]
2107 cpvr = catpkgsplit(pkgprovidedlines[x])
2108 if not cpvr or cpvr[0] == "null":
2109 writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
2111 has_invalid_data = True
2112 del pkgprovidedlines[x]
2114 if cpvr[0] == "virtual":
2115 writemsg(_("Virtual package in package.provided: %s\n") % \
2116 myline, noiselevel=-1)
2117 has_invalid_data = True
2118 del pkgprovidedlines[x]
2120 if has_invalid_data:
2121 writemsg(_("See portage(5) for correct package.provided usage.\n"),
2123 self.pprovideddict = {}
2124 for x in pkgprovidedlines:
2128 mycatpkg = cpv_getkey(x)
2129 if mycatpkg in self.pprovideddict:
2130 self.pprovideddict[mycatpkg].append(x)
2132 self.pprovideddict[mycatpkg]=[x]
2134 # parse licensegroups
2136 self._license_groups.update(
2137 grabdict(os.path.join(x, "license_groups")))
2139 # reasonable defaults; this is important as without USE_ORDER,
2140 # USE will always be "" (nothing set)!
2141 if "USE_ORDER" not in self:
2142 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
2144 self["PORTAGE_GID"] = str(portage_gid)
2145 self.backup_changes("PORTAGE_GID")
2147 if self.get("PORTAGE_DEPCACHEDIR", None):
2148 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
2149 self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
2150 self.backup_changes("PORTAGE_DEPCACHEDIR")
2152 overlays = self.get("PORTDIR_OVERLAY","").split()
2156 ov = normalize_path(ov)
2157 if os.path.isdir(ov):
2160 writemsg(_("!!! Invalid PORTDIR_OVERLAY"
2161 " (not a dir): '%s'\n") % ov, noiselevel=-1)
2162 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
2163 self.backup_changes("PORTDIR_OVERLAY")
2165 if "CBUILD" not in self and "CHOST" in self:
2166 self["CBUILD"] = self["CHOST"]
2167 self.backup_changes("CBUILD")
2169 self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
2170 self.backup_changes("PORTAGE_BIN_PATH")
2171 self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
2172 self.backup_changes("PORTAGE_PYM_PATH")
2174 for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
2176 self[var] = str(int(self.get(var, "0")))
2178 writemsg(_("!!! %s='%s' is not a valid integer. "
2179 "Falling back to '0'.\n") % (var, self[var]),
2182 self.backup_changes(var)
2184 # initialize self.features
2187 if not portage.process.sandbox_capable and \
2188 ("sandbox" in self.features or "usersandbox" in self.features):
2189 if self.profile_path is not None and \
2190 os.path.realpath(self.profile_path) == \
2191 os.path.realpath(os.path.join(config_root, PROFILE_PATH)):
2192 """ Don't show this warning when running repoman and the
2193 sandbox feature came from a profile that doesn't belong to
2195 writemsg(colorize("BAD", _("!!! Problem with sandbox"
2196 " binary. Disabling...\n\n")), noiselevel=-1)
2197 if "sandbox" in self.features:
2198 self.features.remove("sandbox")
2199 if "usersandbox" in self.features:
2200 self.features.remove("usersandbox")
2203 self.features.add('chflags')
2205 self["FEATURES"] = " ".join(sorted(self.features))
2206 self.backup_changes("FEATURES")
2207 global _glep_55_enabled, _validate_cache_for_unsupported_eapis
2208 if 'parse-eapi-ebuild-head' in self.features:
2209 _validate_cache_for_unsupported_eapis = False
2210 if 'parse-eapi-glep-55' in self.features:
2211 _validate_cache_for_unsupported_eapis = False
2212 _glep_55_enabled = True
2216 for k in self._case_insensitive_vars:
2218 self[k] = self[k].lower()
2219 self.backup_changes(k)
2224 def _init_dirs(self):
2226 Create a few directories that are critical to portage operation
2228 if not os.access(self["ROOT"], os.W_OK):
2231 # gid, mode, mask, preserve_perms
2233 "tmp" : ( -1, 0o1777, 0, True),
2234 "var/tmp" : ( -1, 0o1777, 0, True),
2235 PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
2236 CACHE_PATH : (portage_gid, 0o755, 0o2, False)
2239 for mypath, (gid, mode, modemask, preserve_perms) \
2240 in dir_mode_map.items():
2241 mydir = os.path.join(self["ROOT"], mypath)
2242 if preserve_perms and os.path.isdir(mydir):
2243 # Only adjust permissions on some directories if
2244 # they don't exist yet. This gives freedom to the
2245 # user to adjust permissions to suit their taste.
2248 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
2249 except portage.exception.PortageException as e:
2250 writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
2252 writemsg("!!! %s\n" % str(e),
2255 def expandLicenseTokens(self, tokens):
2256 """ Take a token from ACCEPT_LICENSE or package.license and expand it
2257 if it's a group token (indicated by @) or just return it if it's not a
2258 group. If a group is negated then negate all group elements."""
2259 expanded_tokens = []
2261 expanded_tokens.extend(self._expandLicenseToken(x, None))
2262 return expanded_tokens
2264 def _expandLicenseToken(self, token, traversed_groups):
2267 if token.startswith("-"):
2269 license_name = token[1:]
2271 license_name = token
2272 if not license_name.startswith("@"):
2273 rValue.append(token)
2275 group_name = license_name[1:]
2276 if not traversed_groups:
2277 traversed_groups = set()
2278 license_group = self._license_groups.get(group_name)
2279 if group_name in traversed_groups:
2280 writemsg(_("Circular license group reference"
2281 " detected in '%s'\n") % group_name, noiselevel=-1)
2282 rValue.append("@"+group_name)
2284 traversed_groups.add(group_name)
2285 for l in license_group:
2286 if l.startswith("-"):
2287 writemsg(_("Skipping invalid element %s"
2288 " in license group '%s'\n") % (l, group_name),
2291 rValue.extend(self._expandLicenseToken(l, traversed_groups))
2293 if self._license_groups and \
2294 group_name not in self._undef_lic_groups:
2295 self._undef_lic_groups.add(group_name)
2296 writemsg(_("Undefined license group '%s'\n") % group_name,
2298 rValue.append("@"+group_name)
2300 rValue = ["-" + token for token in rValue]
2304 """Validate miscellaneous settings and display warnings if necessary.
2305 (This code was previously in the global scope of portage.py)"""
2307 groups = self["ACCEPT_KEYWORDS"].split()
2308 archlist = self.archlist()
2310 writemsg(_("--- 'profiles/arch.list' is empty or "
2311 "not available. Empty portage tree?\n"), noiselevel=1)
2313 for group in groups:
2314 if group not in archlist and \
2315 not (group.startswith("-") and group[1:] in archlist) and \
2316 group not in ("*", "~*", "**"):
2317 writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
2320 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
2322 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
2323 not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
2324 os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
2325 writemsg(_("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
2327 writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
2328 writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
2330 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
2332 if os.path.exists(abs_user_virtuals):
2333 writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
2334 writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
2335 writemsg("!!! this new location.\n\n")
2337 if "fakeroot" in self.features and \
2338 not portage.process.fakeroot_capable:
2339 writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
2340 "fakeroot binary is not installed.\n"), noiselevel=-1)
2342 def loadVirtuals(self,root):
2343 """Not currently used by portage."""
2344 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
2345 self.getvirtuals(root)
2347 def load_best_module(self,property_string):
2348 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
2351 mod = load_mod(best_mod)
2353 if best_mod.startswith("cache."):
2354 best_mod = "portage." + best_mod
2356 mod = load_mod(best_mod)
2369 def modifying(self):
2371 raise Exception(_("Configuration is locked."))
2373 def backup_changes(self,key=None):
2375 if key and key in self.configdict["env"]:
2376 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
2378 raise KeyError(_("No such key defined in environment: %s") % key)
2380 def reset(self,keeping_pkg=0,use_cache=1):
2382 Restore environment from self.backupenv, call self.regenerate()
2383 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
2384 @type keeping_pkg: Boolean
2385 @param use_cache: Should self.regenerate use the cache or not
2386 @type use_cache: Boolean
2390 self.configdict["env"].clear()
2391 self.configdict["env"].update(self.backupenv)
2393 self.modifiedkeys = []
2397 self.configdict["pkg"].clear()
2398 self.configdict["pkginternal"].clear()
2399 self.configdict["defaults"]["USE"] = \
2400 " ".join(self.make_defaults_use)
2401 self.usemask = set(stack_lists(
2402 self.usemask_list, incremental=True))
2403 self.useforce = set(stack_lists(
2404 self.useforce_list, incremental=True))
2405 self.regenerate(use_cache=use_cache)
2407 def load_infodir(self,infodir):
2408 warnings.warn("portage.config.load_infodir() is deprecated",
2412 class _lazy_vars(object):
2414 __slots__ = ('built_use', 'settings', 'values')
2416 def __init__(self, built_use, settings):
2417 self.built_use = built_use
2418 self.settings = settings
2421 def __getitem__(self, k):
2422 if self.values is None:
2423 self.values = self._init_values()
2424 return self.values[k]
2426 def _init_values(self):
2428 settings = self.settings
2429 use = self.built_use
2431 use = frozenset(settings['PORTAGE_USE'].split())
2432 values['ACCEPT_LICENSE'] = self._accept_license(use, settings)
2433 values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
2436 def _accept_license(self, use, settings):
2438 Generate a pruned version of ACCEPT_LICENSE, by intersection with
2439 LICENSE. This is required since otherwise ACCEPT_LICENSE might be
2440 too big (bigger than ARG_MAX), causing execve() calls to fail with
2441 E2BIG errors as in bug #262647.
2444 licenses = set(flatten(
2445 dep.use_reduce(dep.paren_reduce(
2446 settings['LICENSE']),
2448 except exception.InvalidDependString:
2450 licenses.discard('||')
2451 if settings._accept_license:
2452 acceptable_licenses = set()
2453 for x in settings._accept_license:
2455 acceptable_licenses.update(licenses)
2457 acceptable_licenses.clear()
2459 acceptable_licenses.discard(x[1:])
2461 acceptable_licenses.add(x)
2463 licenses = acceptable_licenses
2464 return ' '.join(sorted(licenses))
2466 def _restrict(self, use, settings):
2468 restrict = set(flatten(
2469 dep.use_reduce(dep.paren_reduce(
2470 settings['RESTRICT']),
2472 except exception.InvalidDependString:
2474 return ' '.join(sorted(restrict))
2476 class _lazy_use_expand(object):
2478 Lazily evaluate USE_EXPAND variables since they are only needed when
2479 an ebuild shell is spawned. Variables values are made consistent with
2480 the previously calculated USE settings.
2483 def __init__(self, use, usemask, iuse_implicit,
2484 use_expand_split, use_expand_dict):
2486 self._usemask = usemask
2487 self._iuse_implicit = iuse_implicit
2488 self._use_expand_split = use_expand_split
2489 self._use_expand_dict = use_expand_dict
2491 def __getitem__(self, key):
2492 prefix = key.lower() + '_'
2493 prefix_len = len(prefix)
2494 expand_flags = set( x[prefix_len:] for x in self._use \
2495 if x[:prefix_len] == prefix )
2496 var_split = self._use_expand_dict.get(key, '').split()
2497 # Preserve the order of var_split because it can matter for things
2499 var_split = [ x for x in var_split if x in expand_flags ]
2500 var_split.extend(expand_flags.difference(var_split))
2501 has_wildcard = '*' in expand_flags
2503 var_split = [ x for x in var_split if x != "*" ]
2505 for x in self._iuse_implicit:
2506 if x[:prefix_len] == prefix:
2507 has_iuse.add(x[prefix_len:])
2509 # * means to enable everything in IUSE that's not masked
2511 usemask = self._usemask
2512 for suffix in has_iuse:
2514 if x not in usemask:
2515 if suffix not in expand_flags:
2516 var_split.append(suffix)
2518 # If there is a wildcard and no matching flags in IUSE then
2519 # LINGUAS should be unset so that all .mo files are
2522 # Make the flags unique and filter them according to IUSE.
2523 # Also, continue to preserve order for things like LINGUAS
2524 # and filter any duplicates that variable may contain.
2525 filtered_var_split = []
2526 remaining = has_iuse.intersection(var_split)
2530 filtered_var_split.append(x)
2531 var_split = filtered_var_split
2534 value = ' '.join(var_split)
2536 # Don't export empty USE_EXPAND vars unless the user config
2537 # exports them as empty. This is required for vars such as
2538 # LINGUAS, where unset and empty have different meanings.
2540 # ebuild.sh will see this and unset the variable so
2541 # that things like LINGUAS work properly
2547 # It's not in IUSE, so just allow the variable content
2548 # to pass through if it is defined somewhere. This
2549 # allows packages that support LINGUAS but don't
2550 # declare it in IUSE to use the variable outside of the
2551 # USE_EXPAND context.
2556 def setcpv(self, mycpv, use_cache=1, mydb=None):
2558 Load a particular CPV into the config, this lets us see the
2559 Default USE flags for a particular ebuild as well as the USE
2560 flags from package.use.
2562 @param mycpv: A cpv to load
2564 @param use_cache: Enables caching
2565 @type use_cache: Boolean
2566 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
2567 @type mydb: dbapi or derivative.
2575 if not isinstance(mycpv, basestring):
2579 args_hash = (mycpv, id(pkg))
2581 built_use = pkg.use.enabled
2583 args_hash = (mycpv, id(mydb))
2585 if args_hash == self._setcpv_args_hash:
2587 self._setcpv_args_hash = args_hash
2591 cat, pf = catsplit(mycpv)
2592 cp = dep_getkey(mycpv)
2593 cpv_slot = self.mycpv
2596 pkg_configdict = self.configdict["pkg"]
2597 previous_iuse = pkg_configdict.get("IUSE")
2599 aux_keys = self._setcpv_aux_keys
2601 # Discard any existing metadata from the previous package, but
2602 # preserve things like USE_EXPAND values and PORTAGE_USE which
2605 pkg_configdict.pop(k, None)
2607 pkg_configdict["CATEGORY"] = cat
2608 pkg_configdict["PF"] = pf
2610 if not hasattr(mydb, "aux_get"):
2613 # Make these lazy, since __getitem__ triggers
2614 # evaluation of USE conditionals which can't
2615 # occur until PORTAGE_USE is calculated below.
2616 pkg_configdict.addLazySingleton(k,
2617 mydb.__getitem__, k)
2619 for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
2620 pkg_configdict[k] = v
2621 repository = pkg_configdict.pop("repository", None)
2622 if repository is not None:
2623 pkg_configdict["PORTAGE_REPO_NAME"] = repository
2624 slot = pkg_configdict["SLOT"]
2625 iuse = pkg_configdict["IUSE"]
2627 cpv_slot = "%s:%s" % (self.mycpv, slot)
2631 for x in iuse.split():
2632 if x.startswith("+"):
2633 pkginternaluse.append(x[1:])
2634 elif x.startswith("-"):
2635 pkginternaluse.append(x)
2636 pkginternaluse = " ".join(pkginternaluse)
2637 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
2638 self.configdict["pkginternal"]["USE"] = pkginternaluse
2643 for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
2644 cpdict = pkgprofileuse_dict.get(cp)
2648 bestmatch = best_match_to_list(cpv_slot, keys)
2650 keys.remove(bestmatch)
2651 defaults.insert(pos, cpdict[bestmatch])
2655 if self.make_defaults_use[i]:
2656 defaults.insert(pos, self.make_defaults_use[i])
2658 defaults = " ".join(defaults)
2659 if defaults != self.configdict["defaults"].get("USE",""):
2660 self.configdict["defaults"]["USE"] = defaults
2663 useforce = self._getUseForce(cpv_slot)
2664 if useforce != self.useforce:
2665 self.useforce = useforce
2668 usemask = self._getUseMask(cpv_slot)
2669 if usemask != self.usemask:
2670 self.usemask = usemask
2674 cpdict = self.pusedict.get(cp)
2678 self.pusekey = best_match_to_list(cpv_slot, keys)
2680 keys.remove(self.pusekey)
2681 self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2685 if oldpuse != self.puse:
2687 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2688 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
2691 self.reset(keeping_pkg=1,use_cache=use_cache)
2693 # Ensure that "pkg" values are always preferred over "env" values.
2694 # This must occur _after_ the above reset() call, since reset()
2695 # copies values from self.backupenv.
2696 env_configdict = self.configdict['env']
2697 for k in pkg_configdict:
2699 env_configdict.pop(k, None)
2701 lazy_vars = self._lazy_vars(built_use, self)
2702 env_configdict.addLazySingleton('ACCEPT_LICENSE',
2703 lazy_vars.__getitem__, 'ACCEPT_LICENSE')
2704 env_configdict.addLazySingleton('PORTAGE_RESTRICT',
2705 lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
2707 # If reset() has not been called, it's safe to return
2708 # early if IUSE has not changed.
2709 if not has_changed and previous_iuse == iuse:
2712 # Filter out USE flags that aren't part of IUSE. This has to
2713 # be done for every setcpv() call since practically every
2714 # package has different IUSE.
2715 use = set(self["USE"].split())
2716 iuse_implicit = self._get_implicit_iuse()
2717 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2719 # PORTAGE_IUSE is not always needed so it's lazily evaluated.
2720 self.configdict["pkg"].addLazySingleton(
2721 "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
2723 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2724 if ebuild_force_test and \
2725 not hasattr(self, "_ebuild_force_test_msg_shown"):
2726 self._ebuild_force_test_msg_shown = True
2727 writemsg(_("Forcing test.\n"), noiselevel=-1)
2728 if "test" in self.features:
2729 if "test" in self.usemask and not ebuild_force_test:
2730 # "test" is in IUSE and USE=test is masked, so execution
2731 # of src_test() probably is not reliable. Therefore,
2732 # temporarily disable FEATURES=test just for this package.
2733 self["FEATURES"] = " ".join(x for x in self.features \
2738 if ebuild_force_test:
2739 self.usemask.discard("test")
2741 # Allow _* flags from USE_EXPAND wildcards to pass through here.
2742 use.difference_update([x for x in use \
2743 if x not in iuse_implicit and x[-2:] != '_*'])
2745 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2746 # that they are consistent. For optimal performance, use slice
2747 # comparison instead of startswith().
2748 use_expand_split = set(x.lower() for \
2749 x in self.get('USE_EXPAND', '').split())
2750 lazy_use_expand = self._lazy_use_expand(use, self.usemask,
2751 iuse_implicit, use_expand_split, self._use_expand_dict)
2753 use_expand_iuses = {}
2754 for x in iuse_implicit:
2755 x_split = x.split('_')
2756 if len(x_split) == 1:
2758 for i in range(len(x_split) - 1):
2759 k = '_'.join(x_split[:i+1])
2760 if k in use_expand_split:
2761 v = use_expand_iuses.get(k)
2764 use_expand_iuses[k] = v
2768 # If it's not in IUSE, variable content is allowed
2769 # to pass through if it is defined somewhere. This
2770 # allows packages that support LINGUAS but don't
2771 # declare it in IUSE to use the variable outside of the
2772 # USE_EXPAND context.
2773 for k, use_expand_iuse in use_expand_iuses.items():
2775 use.update( x for x in use_expand_iuse if x not in usemask )
2777 self.configdict['env'].addLazySingleton(k,
2778 lazy_use_expand.__getitem__, k)
2780 # Filtered for the ebuild environment. Store this in a separate
2781 # attribute since we still want to be able to see global USE
2782 # settings for things like emerge --info.
2784 self.configdict["pkg"]["PORTAGE_USE"] = \
2785 " ".join(sorted(x for x in use if x[-2:] != '_*'))
2787 def _get_implicit_iuse(self):
2789 Some flags are considered to
2790 be implicit members of IUSE:
2791 * Flags derived from ARCH
2792 * Flags derived from USE_EXPAND_HIDDEN variables
2793 * Masked flags, such as those from {,package}use.mask
2794 * Forced flags, such as those from {,package}use.force
2795 * build and bootstrap flags used by bootstrap.sh
2797 iuse_implicit = set()
2798 # Flags derived from ARCH.
2799 arch = self.configdict["defaults"].get("ARCH")
2801 iuse_implicit.add(arch)
2802 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2804 # Flags derived from USE_EXPAND_HIDDEN variables
2805 # such as ELIBC, KERNEL, and USERLAND.
2806 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2807 for x in use_expand_hidden:
2808 iuse_implicit.add(x.lower() + "_.*")
2810 # Flags that have been masked or forced.
2811 iuse_implicit.update(self.usemask)
2812 iuse_implicit.update(self.useforce)
2814 # build and bootstrap flags used by bootstrap.sh
2815 iuse_implicit.add("build")
2816 iuse_implicit.add("bootstrap")
2818 # Controlled by FEATURES=test. Make this implicit, so handling
2819 # of FEATURES=test is consistent regardless of explicit IUSE.
2820 # Users may use use.mask/package.use.mask to control
2821 # FEATURES=test for all ebuilds, regardless of explicit IUSE.
2822 iuse_implicit.add("test")
2824 return iuse_implicit
2826 def _getUseMask(self, pkg):
2827 cp = getattr(pkg, "cp", None)
2829 cp = dep_getkey(pkg)
2832 for i, pusemask_dict in enumerate(self.pusemask_list):
2833 cpdict = pusemask_dict.get(cp)
2837 best_match = best_match_to_list(pkg, keys)
2839 keys.remove(best_match)
2840 usemask.insert(pos, cpdict[best_match])
2844 if self.usemask_list[i]:
2845 usemask.insert(pos, self.usemask_list[i])
2847 return set(stack_lists(usemask, incremental=True))
2849 def _getUseForce(self, pkg):
2850 cp = getattr(pkg, "cp", None)
2852 cp = dep_getkey(pkg)
2855 for i, puseforce_dict in enumerate(self.puseforce_list):
2856 cpdict = puseforce_dict.get(cp)
2860 best_match = best_match_to_list(pkg, keys)
2862 keys.remove(best_match)
2863 useforce.insert(pos, cpdict[best_match])
2867 if self.useforce_list[i]:
2868 useforce.insert(pos, self.useforce_list[i])
2870 return set(stack_lists(useforce, incremental=True))
2872 def _getMaskAtom(self, cpv, metadata):
2874 Take a package and return a matching package.mask atom, or None if no
2875 such atom exists or it has been cancelled by package.unmask. PROVIDE
2876 is not checked, so atoms will not be found for old-style virtuals.
2878 @param cpv: The package name
2880 @param metadata: A dictionary of raw package metadata
2881 @type metadata: dict
2883 @return: An matching atom string or None if one is not found.
2886 cp = cpv_getkey(cpv)
2887 mask_atoms = self.pmaskdict.get(cp)
2889 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2890 unmask_atoms = self.punmaskdict.get(cp)
2891 for x in mask_atoms:
2892 if not match_from_list(x, pkg_list):
2895 for y in unmask_atoms:
2896 if match_from_list(y, pkg_list):
2901 def _getProfileMaskAtom(self, cpv, metadata):
2903 Take a package and return a matching profile atom, or None if no
2904 such atom exists. Note that a profile atom may or may not have a "*"
2905 prefix. PROVIDE is not checked, so atoms will not be found for
2908 @param cpv: The package name
2910 @param metadata: A dictionary of raw package metadata
2911 @type metadata: dict
2913 @return: An matching profile atom string or None if one is not found.
2916 cp = cpv_getkey(cpv)
2917 profile_atoms = self.prevmaskdict.get(cp)
2919 pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2920 for x in profile_atoms:
2921 if match_from_list(x, pkg_list):
2926 def _getKeywords(self, cpv, metadata):
2927 cp = cpv_getkey(cpv)
2928 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2929 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2931 for pkeywords_dict in self._pkeywords_list:
2932 cpdict = pkeywords_dict.get(cp)
2936 best_match = best_match_to_list(pkg, keys)
2938 keys.remove(best_match)
2939 keywords.insert(pos, cpdict[best_match])
2943 return stack_lists(keywords, incremental=True)
2945 def _getMissingKeywords(self, cpv, metadata):
2947 Take a package and return a list of any KEYWORDS that the user may
2948 may need to accept for the given package. If the KEYWORDS are empty
2949 and the the ** keyword has not been accepted, the returned list will
2950 contain ** alone (in order to distiguish from the case of "none
2953 @param cpv: The package name (for package.keywords support)
2955 @param metadata: A dictionary of raw package metadata
2956 @type metadata: dict
2958 @return: A list of KEYWORDS that have not been accepted.
2961 # Hack: Need to check the env directly here as otherwise stacking
2962 # doesn't work properly as negative values are lost in the config
2963 # object (bug #139600)
2964 egroups = self.configdict["backupenv"].get(
2965 "ACCEPT_KEYWORDS", "").split()
2966 mygroups = self._getKeywords(cpv, metadata)
2967 # Repoman may modify this attribute as necessary.
2968 pgroups = self["ACCEPT_KEYWORDS"].split()
2970 cp = cpv_getkey(cpv)
2971 pkgdict = self.pkeywordsdict.get(cp)
2974 cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2975 for atom, pkgkeywords in pkgdict.items():
2976 if match_from_list(atom, cpv_slot_list):
2978 pgroups.extend(pkgkeywords)
2979 if matches or egroups:
2980 pgroups.extend(egroups)
2983 if x.startswith("-"):
2987 inc_pgroups.discard(x[1:])
2990 pgroups = inc_pgroups
2995 if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2996 writemsg(_("--- WARNING: Package '%(cpv)s' uses"
2997 " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, noiselevel=-1)
3004 elif gp.startswith("~"):
3006 elif not gp.startswith("-"):
3009 ((hastesting and "~*" in pgroups) or \
3010 (hasstable and "*" in pgroups) or "**" in pgroups):
3016 # If KEYWORDS is empty then we still have to return something
3017 # in order to distiguish from the case of "none missing".
3018 mygroups.append("**")
3022 def _getMissingLicenses(self, cpv, metadata):
3024 Take a LICENSE string and return a list any licenses that the user may
3025 may need to accept for the given package. The returned list will not
3026 contain any licenses that have already been accepted. This method
3027 can throw an InvalidDependString exception.
3029 @param cpv: The package name (for package.license support)
3031 @param metadata: A dictionary of raw package metadata
3032 @type metadata: dict
3034 @return: A list of licenses that have not been accepted.
3036 accept_license = self._accept_license
3037 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
3039 accept_license = list(self._accept_license)
3040 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3041 for atom in match_to_list(cpv_slot, list(cpdict)):
3042 accept_license.extend(cpdict[atom])
3044 licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
3045 metadata["LICENSE"]), matchall=1)))
3046 licenses.discard('||')
3048 acceptable_licenses = set()
3049 for x in accept_license:
3051 acceptable_licenses.update(licenses)
3053 acceptable_licenses.clear()
3055 acceptable_licenses.discard(x[1:])
3057 acceptable_licenses.add(x)
3059 license_str = metadata["LICENSE"]
3060 if "?" in license_str:
3061 use = metadata["USE"].split()
3065 license_struct = portage.dep.use_reduce(
3066 portage.dep.paren_reduce(license_str), uselist=use)
3067 license_struct = portage.dep.dep_opconvert(license_struct)
3068 return self._getMaskedLicenses(license_struct, acceptable_licenses)
3070 def _getMaskedLicenses(self, license_struct, acceptable_licenses):
3071 if not license_struct:
3073 if license_struct[0] == "||":
3075 for element in license_struct[1:]:
3076 if isinstance(element, list):
3078 ret.append(self._getMaskedLicenses(
3079 element, acceptable_licenses))
3083 if element in acceptable_licenses:
3086 # Return all masked licenses, since we don't know which combination
3087 # (if any) the user will decide to unmask.
3091 for element in license_struct:
3092 if isinstance(element, list):
3094 ret.extend(self._getMaskedLicenses(element,
3095 acceptable_licenses))
3097 if element not in acceptable_licenses:
3101 def _getMissingProperties(self, cpv, metadata):
3103 Take a PROPERTIES string and return a list of any properties the user may
3104 may need to accept for the given package. The returned list will not
3105 contain any properties that have already been accepted. This method
3106 can throw an InvalidDependString exception.
3108 @param cpv: The package name (for package.properties support)
3110 @param metadata: A dictionary of raw package metadata
3111 @type metadata: dict
3113 @return: A list of properties that have not been accepted.
3115 accept_properties = self._accept_properties
3116 cpdict = self._ppropertiesdict.get(dep_getkey(cpv), None)
3118 accept_properties = list(self._accept_properties)
3119 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3120 for atom in match_to_list(cpv_slot, list(cpdict)):
3121 accept_properties.extend(cpdict[atom])
3123 properties = set(flatten(dep.use_reduce(dep.paren_reduce(
3124 metadata["PROPERTIES"]), matchall=1)))
3125 properties.discard('||')
3127 acceptable_properties = set()
3128 for x in accept_properties:
3130 acceptable_properties.update(properties)
3132 acceptable_properties.clear()
3134 acceptable_properties.discard(x[1:])
3136 acceptable_properties.add(x)
3138 properties_str = metadata["PROPERTIES"]
3139 if "?" in properties_str:
3140 use = metadata["USE"].split()
3144 properties_struct = portage.dep.use_reduce(
3145 portage.dep.paren_reduce(properties_str), uselist=use)
3146 properties_struct = portage.dep.dep_opconvert(properties_struct)
3147 return self._getMaskedProperties(properties_struct, acceptable_properties)
3149 def _getMaskedProperties(self, properties_struct, acceptable_properties):
3150 if not properties_struct:
3152 if properties_struct[0] == "||":
3154 for element in properties_struct[1:]:
3155 if isinstance(element, list):
3157 ret.append(self._getMaskedProperties(
3158 element, acceptable_properties))
3162 if element in acceptable_properties:
3165 # Return all masked properties, since we don't know which combination
3166 # (if any) the user will decide to unmask
3170 for element in properties_struct:
3171 if isinstance(element, list):
3173 ret.extend(self._getMaskedProperties(element,
3174 acceptable_properties))
3176 if element not in acceptable_properties:
3180 def _accept_chost(self, cpv, metadata):
3182 @return True if pkg CHOST is accepted, False otherwise.
3184 if self._accept_chost_re is None:
3185 accept_chost = self.get("ACCEPT_CHOSTS", "").split()
3186 if not accept_chost:
3187 chost = self.get("CHOST")
3189 accept_chost.append(chost)
3190 if not accept_chost:
3191 self._accept_chost_re = re.compile(".*")
3192 elif len(accept_chost) == 1:
3194 self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
3195 except re.error as e:
3196 writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3197 (accept_chost[0], e), noiselevel=-1)
3198 self._accept_chost_re = re.compile("^$")
3201 self._accept_chost_re = re.compile(
3202 r'^(%s)$' % "|".join(accept_chost))
3203 except re.error as e:
3204 writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3205 (" ".join(accept_chost), e), noiselevel=-1)
3206 self._accept_chost_re = re.compile("^$")
3208 return self._accept_chost_re.match(
3209 metadata.get('CHOST', '')) is not None
3211 def setinst(self,mycpv,mydbapi):
3212 """This updates the preferences for old-style virtuals,
3213 affecting the behavior of dep_expand() and dep_check()
3214 calls. It can change dbapi.match() behavior since that
3215 calls dep_expand(). However, dbapi instances have
3216 internal match caches that are not invalidated when
3217 preferences are updated here. This can potentially
3218 lead to some inconsistency (relevant to bug #1343)."""
3220 if len(self.virtuals) == 0:
3222 # Grab the virtuals this package provides and add them into the tree virtuals.
3223 if not hasattr(mydbapi, "aux_get"):
3224 provides = mydbapi["PROVIDE"]
3226 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
3229 if isinstance(mydbapi, portdbapi):
3230 self.setcpv(mycpv, mydb=mydbapi)
3231 myuse = self["PORTAGE_USE"]
3232 elif not hasattr(mydbapi, "aux_get"):
3233 myuse = mydbapi["USE"]
3235 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
3236 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
3239 cp = dep.Atom(cpv_getkey(mycpv))
3241 virt = dep_getkey(virt)
3242 providers = self.virtuals.get(virt)
3243 if providers and cp in providers:
3245 providers = self._depgraphVirtuals.get(virt)
3246 if providers is None:
3248 self._depgraphVirtuals[virt] = providers
3249 if cp not in providers:
3250 providers.append(cp)
3254 self.virtuals = self.__getvirtuals_compile()
3257 """Reload things like /etc/profile.env that can change during runtime."""
3258 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
3259 self.configdict["env.d"].clear()
3260 env_d = getconfig(env_d_filename, expand=False)
3262 # env_d will be None if profile.env doesn't exist.
3263 self.configdict["env.d"].update(env_d)
3265 def _prune_incremental(self, split):
3267 Prune off any parts of an incremental variable that are
3268 made irrelevant by the latest occuring * or -*. This
3269 could be more aggressive but that might be confusing
3270 and the point is just to reduce noise a bit.
3272 for i, x in enumerate(reversed(split)):
3274 split = split[-i-1:]
3284 def regenerate(self,useonly=0,use_cache=1):
3287 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
3288 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
3289 variables. This also updates the env.d configdict; useful in case an ebuild
3290 changes the environment.
3292 If FEATURES has already stacked, it is not stacked twice.
3294 @param useonly: Only regenerate USE flags (not any other incrementals)
3295 @type useonly: Boolean
3296 @param use_cache: Enable Caching (only for autouse)
3297 @type use_cache: Boolean
3302 if self.already_in_regenerate:
3303 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
3304 writemsg("!!! Looping in regenerate.\n",1)
3307 self.already_in_regenerate = 1
3310 myincrementals=["USE"]
3312 myincrementals = self.incrementals
3313 myincrementals = set(myincrementals)
3314 # If self.features exists, it has already been stacked and may have
3315 # been mutated, so don't stack it again or else any mutations will be
3317 if "FEATURES" in myincrementals and hasattr(self, "features"):
3318 myincrementals.remove("FEATURES")
3320 if "USE" in myincrementals:
3321 # Process USE last because it depends on USE_EXPAND which is also
3323 myincrementals.remove("USE")
3325 mydbs = self.configlist[:-1]
3326 mydbs.append(self.backupenv)
3328 # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
3329 # used to match all licenses without every having to explicitly expand
3330 # it to all licenses.
3331 if self.local_config:
3334 mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
3335 mysplit = self._prune_incremental(mysplit)
3336 accept_license_str = ' '.join(mysplit)
3337 self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
3338 if accept_license_str != self._accept_license_str:
3339 self._accept_license_str = accept_license_str
3340 self._accept_license = tuple(self.expandLicenseTokens(mysplit))
3342 # repoman will accept any license
3343 self._accept_license = ('*',)
3345 # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
3346 if self.local_config:
3349 mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
3350 mysplit = self._prune_incremental(mysplit)
3351 self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
3352 if tuple(mysplit) != self._accept_properties:
3353 self._accept_properties = tuple(mysplit)
3355 # repoman will accept any property
3356 self._accept_properties = ('*',)
3358 for mykey in myincrementals:
3362 if mykey not in curdb:
3364 #variables are already expanded
3365 mysplit = curdb[mykey].split()
3369 # "-*" is a special "minus" var that means "unset all settings".
3370 # so USE="-* gnome" will have *just* gnome enabled.
3375 # Not legal. People assume too much. Complain.
3376 writemsg(colorize("BAD",
3377 _("USE flags should not start with a '+': %s") % x) \
3378 + "\n", noiselevel=-1)
3384 if (x[1:] in myflags):
3386 del myflags[myflags.index(x[1:])]
3389 # We got here, so add it now.
3390 if x not in myflags:
3394 #store setting in last element of configlist, the original environment:
3395 if myflags or mykey in self:
3396 self.configlist[-1][mykey] = " ".join(myflags)
3399 # Do the USE calculation last because it depends on USE_EXPAND.
3400 if "auto" in self["USE_ORDER"].split(":"):
3401 self.configdict["auto"]["USE"] = autouse(
3402 vartree(root=self["ROOT"], categories=self.categories,
3404 use_cache=use_cache, mysettings=self)
3406 self.configdict["auto"]["USE"] = ""
3408 use_expand = self.get("USE_EXPAND", "").split()
3409 use_expand_dict = self._use_expand_dict
3410 use_expand_dict.clear()
3411 for k in use_expand:
3414 use_expand_dict[k] = v
3417 for x in self["USE_ORDER"].split(":"):
3418 if x in self.configdict:
3419 self.uvlist.append(self.configdict[x])
3420 self.uvlist.reverse()
3422 # For optimal performance, use slice
3423 # comparison instead of startswith().
3425 for curdb in self.uvlist:
3426 cur_use_expand = [x for x in use_expand if x in curdb]
3427 mysplit = curdb.get("USE", "").split()
3428 if not mysplit and not cur_use_expand:
3436 writemsg(colorize("BAD", _("USE flags should not start "
3437 "with a '+': %s\n") % x), noiselevel=-1)
3443 myflags.discard(x[1:])
3448 for var in cur_use_expand:
3449 var_lower = var.lower()
3450 is_not_incremental = var not in myincrementals
3451 if is_not_incremental:
3452 prefix = var_lower + "_"
3453 prefix_len = len(prefix)
3454 for x in list(myflags):
3455 if x[:prefix_len] == prefix:
3457 for x in curdb[var].split():
3459 if is_not_incremental:
3460 writemsg(colorize("BAD", _("Invalid '+' "
3461 "operator in non-incremental variable "
3462 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3465 writemsg(colorize("BAD", _("Invalid '+' "
3466 "operator in incremental variable "
3467 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3470 if is_not_incremental:
3471 writemsg(colorize("BAD", _("Invalid '-' "
3472 "operator in non-incremental variable "
3473 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3475 myflags.discard(var_lower + "_" + x[1:])
3477 myflags.add(var_lower + "_" + x)
3479 if hasattr(self, "features"):
3480 self.features.clear()
3482 self.features = set()
3483 self.features.update(self.configlist[-1].get('FEATURES', '').split())
3484 self['FEATURES'] = ' '.join(sorted(self.features))
3486 myflags.update(self.useforce)
3487 arch = self.configdict["defaults"].get("ARCH")
3491 myflags.difference_update(self.usemask)
3492 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
3494 self.already_in_regenerate = 0
3496 def get_virts_p(self, myroot=None):
3499 virts = self.getvirtuals()
3502 vkeysplit = x.split("/")
3503 if vkeysplit[1] not in self.virts_p:
3504 self.virts_p[vkeysplit[1]] = virts[x]
3507 def getvirtuals(self, myroot=None):
3508 """myroot is now ignored because, due to caching, it has always been
3509 broken for all but the first call."""
3510 myroot = self["ROOT"]
3512 return self.virtuals
3515 for x in self.profiles:
3516 virtuals_file = os.path.join(x, "virtuals")
3517 virtuals_dict = grabdict(virtuals_file)
3519 for k, v in virtuals_dict.items():
3521 virt_atom = portage.dep.Atom(k)
3522 except portage.exception.InvalidAtom:
3525 if virt_atom.blocker or \
3526 str(virt_atom) != str(virt_atom.cp):
3528 if virt_atom is None:
3529 writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
3530 (virtuals_file, k), noiselevel=-1)
3536 # allow incrementals
3539 atom = portage.dep.Atom(atom)
3540 except portage.exception.InvalidAtom:
3546 writemsg(_("--- Invalid atom in %s: %s\n") % \
3547 (virtuals_file, myatom), noiselevel=-1)
3549 if atom_orig == str(atom):
3550 # normal atom, so return as Atom instance
3551 providers.append(atom)
3553 # atom has special prefix, so return as string
3554 providers.append(atom_orig)
3556 atoms_dict[virt_atom] = providers
3558 virtuals_list.append(atoms_dict)
3560 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
3563 for virt in self.dirVirtuals:
3564 # Preference for virtuals decreases from left to right.
3565 self.dirVirtuals[virt].reverse()
3567 # Repoman does not use user or tree virtuals.
3568 if self.local_config and not self.treeVirtuals:
3569 temp_vartree = vartree(myroot, None,
3570 categories=self.categories, settings=self)
3571 self._populate_treeVirtuals(temp_vartree)
3573 self.virtuals = self.__getvirtuals_compile()
3574 return self.virtuals
3576 def _populate_treeVirtuals(self, vartree):
3577 """Reduce the provides into a list by CP."""
3578 for provide, cpv_list in vartree.get_all_provides().items():
3580 provide = dep.Atom(provide)
3581 except exception.InvalidAtom:
3583 self.treeVirtuals[provide.cp] = \
3584 [dep.Atom(cpv_getkey(cpv)) for cpv in cpv_list]
3586 def __getvirtuals_compile(self):
3587 """Stack installed and profile virtuals. Preference for virtuals
3588 decreases from left to right.
3589 Order of preference:
3590 1. installed and in profile
3595 # Virtuals by profile+tree preferences.
3598 for virt, installed_list in self.treeVirtuals.items():
3599 profile_list = self.dirVirtuals.get(virt, None)
3600 if not profile_list:
3602 for cp in installed_list:
3603 if cp in profile_list:
3604 ptVirtuals.setdefault(virt, [])
3605 ptVirtuals[virt].append(cp)
3607 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
3608 self.dirVirtuals, self._depgraphVirtuals])
3611 def __delitem__(self,mykey):
3613 for x in self.lookuplist:
3618 def __getitem__(self,mykey):
3619 for d in self.lookuplist:
3622 return '' # for backward compat, don't raise KeyError
3624 def get(self, k, x=None):
3625 for d in self.lookuplist:
3630 def pop(self, key, *args):
3633 "pop expected at most 2 arguments, got " + \
3634 repr(1 + len(args)))
3636 for d in reversed(self.lookuplist):
3644 def has_key(self,mykey):
3645 warnings.warn("portage.config.has_key() is deprecated, "
3646 "use the in operator instead",
3648 return mykey in self
3650 def __contains__(self, mykey):
3651 """Called to implement membership test operators (in and not in)."""
3652 for d in self.lookuplist:
3657 def setdefault(self, k, x=None):
3670 for d in self.lookuplist:
3677 def iteritems(self):
3682 return list(self.iteritems())
3684 def __setitem__(self,mykey,myvalue):
3685 "set a value; will be thrown away at reset() time"
3686 if not isinstance(myvalue, basestring):
3687 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
3689 # Avoid potential UnicodeDecodeError exceptions later.
3690 mykey = _unicode_decode(mykey)
3691 myvalue = _unicode_decode(myvalue)
3694 self.modifiedkeys.append(mykey)
3695 self.configdict["env"][mykey]=myvalue
3698 "return our locally-maintained environment"
3700 environ_filter = self._environ_filter
3702 eapi = self.get('EAPI')
3703 phase = self.get('EBUILD_PHASE')
3704 filter_calling_env = False
3705 if phase not in ('clean', 'cleanrm', 'depend'):
3706 temp_dir = self.get('T')
3707 if temp_dir is not None and \
3708 os.path.exists(os.path.join(temp_dir, 'environment')):
3709 filter_calling_env = True
3711 environ_whitelist = self._environ_whitelist
3712 env_d = self.configdict["env.d"]
3714 if x in environ_filter:
3717 if not isinstance(myvalue, basestring):
3718 writemsg(_("!!! Non-string value in config: %s=%s\n") % \
3719 (x, myvalue), noiselevel=-1)
3721 if filter_calling_env and \
3722 x not in environ_whitelist and \
3723 not self._environ_whitelist_re.match(x):
3724 # Do not allow anything to leak into the ebuild
3725 # environment unless it is explicitly whitelisted.
3726 # This ensures that variables unset by the ebuild
3730 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
3731 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
3732 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
3734 if filter_calling_env:
3738 whitelist.append("RPMDIR")
3744 # Filtered by IUSE and implicit IUSE.
3745 mydict["USE"] = self.get("PORTAGE_USE", "")
3747 # Don't export AA to the ebuild environment in EAPIs that forbid it
3748 if eapi not in ("0", "1", "2"):
3749 mydict.pop("AA", None)
3751 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
3752 # so we have to back it up and restore it.
3753 rootpath = mydict.get("ROOTPATH")
3755 mydict["PORTAGE_ROOTPATH"] = rootpath
3759 def thirdpartymirrors(self):
3760 if getattr(self, "_thirdpartymirrors", None) is None:
3761 profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3762 for x in self["PORTDIR_OVERLAY"].split():
3763 profileroots.insert(0, os.path.join(x, "profiles"))
3764 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3765 self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3766 return self._thirdpartymirrors
3769 return flatten([[myarch, "~" + myarch] \
3770 for myarch in self["PORTAGE_ARCHLIST"].split()])
3772 def selinux_enabled(self):
3773 if getattr(self, "_selinux_enabled", None) is None:
3774 self._selinux_enabled = 0
3775 if "selinux" in self["USE"].split():
3777 if selinux.is_selinux_enabled() == 1:
3778 self._selinux_enabled = 1
3780 self._selinux_enabled = 0
3782 writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
3784 self._selinux_enabled = 0
3786 return self._selinux_enabled
3788 if sys.hexversion >= 0x3000000:
3792 def _can_test_pty_eof():
3794 The _test_pty_eof() function seems to hang on most
3795 kernels other than Linux.
3797 @returns: True if _test_pty_eof() won't hang, False otherwise.
3799 return platform.system() in ("Linux",)
3801 def _test_pty_eof():
3803 Returns True if this issues is fixed for the currently
3804 running version of python: http://bugs.python.org/issue5380
3805 Raises an EnvironmentError from openpty() if it fails.
3810 import array, fcntl, pty, select, termios
3811 test_string = 2 * "blah blah blah\n"
3812 test_string = _unicode_decode(test_string,
3813 encoding='utf_8', errors='strict')
3815 # may raise EnvironmentError
3816 master_fd, slave_fd = pty.openpty()
3818 # Non-blocking mode is required for Darwin kernel.
3819 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3820 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3822 # Disable post-processing of output since otherwise weird
3823 # things like \n -> \r\n transformations may occur.
3824 mode = termios.tcgetattr(slave_fd)
3825 mode[1] &= ~termios.OPOST
3826 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3828 # Simulate a subprocess writing some data to the
3829 # slave end of the pipe, and then exiting.
3832 pids = process.spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
3833 encoding='utf_8', errors='strict'), env=os.environ,
3834 fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
3836 if isinstance(pids, int):
3839 raise EnvironmentError('spawn failed')
3842 os.write(slave_fd, _unicode_encode(test_string,
3843 encoding='utf_8', errors='strict'))
3846 # If using a fork, we must wait for the child here,
3847 # in order to avoid a race condition that would
3848 # lead to inconsistent results.
3852 master_file = os.fdopen(master_fd, 'rb')
3855 iwtd = [master_file]
3861 events = select.select(iwtd, owtd, ewtd)
3866 buf = array.array('B')
3868 buf.fromfile(master_file, 1024)
3872 # This is where data loss occurs.
3878 data.append(_unicode_decode(buf.tostring(),
3879 encoding='utf_8', errors='strict'))
3883 return test_string == ''.join(data)
3885 # If _test_pty_eof() can't be used for runtime detection of
3886 # http://bugs.python.org/issue5380, openpty can't safely be used
3887 # unless we can guarantee that the current version of python has
3888 # been fixed (affects all current versions of python3). When
3889 # this issue is fixed in python3, we can add another sys.hexversion
3890 # conditional to enable openpty support in the fixed versions.
3891 if sys.hexversion >= 0x3000000 and not _can_test_pty_eof():
3892 # Disable the use of openpty on Solaris as it seems Python's openpty
3893 # implementation doesn't play nice on Solaris with Portage's
3894 # behaviour causing hangs/deadlocks.
3895 # Disable on Darwin also, it used to work fine, but since the
3896 # introduction of _test_pty_eof Portage hangs (on the
3897 # slave_file.close()) indicating some other problems with openpty on
3899 # On AIX, haubi reported that the openpty code doesn't work any
3900 # longer since the introduction of _test_pty_eof either.
3901 # Looks like Python's openpty module is too fragile to use on UNIX,
3902 # so only use it on Linux
3903 _disable_openpty = True
3905 _disable_openpty = False
3908 if not _can_test_pty_eof():
3909 # Skip _test_pty_eof() on systems where it hangs.
3912 def _create_pty_or_pipe(copy_term_size=None):
3914 Try to create a pty and if then fails then create a normal
3917 @param copy_term_size: If a tty file descriptor is given
3918 then the term size will be copied to the pty.
3919 @type copy_term_size: int
3921 @returns: A tuple of (is_pty, master_fd, slave_fd) where
3922 is_pty is True if a pty was successfully allocated, and
3923 False if a normal pipe was allocated.
3928 global _disable_openpty, _tested_pty
3929 if not (_tested_pty or _disable_openpty):
3931 if not _test_pty_eof():
3932 _disable_openpty = True
3933 except EnvironmentError as e:
3934 _disable_openpty = True
3935 writemsg("openpty failed: '%s'\n" % str(e),
3940 if _disable_openpty:
3941 master_fd, slave_fd = os.pipe()
3943 from pty import openpty
3945 master_fd, slave_fd = openpty()
3947 except EnvironmentError as e:
3948 _disable_openpty = True
3949 writemsg("openpty failed: '%s'\n" % str(e),
3952 master_fd, slave_fd = os.pipe()
3955 # Disable post-processing of output since otherwise weird
3956 # things like \n -> \r\n transformations may occur.
3958 mode = termios.tcgetattr(slave_fd)
3959 mode[1] &= ~termios.OPOST
3960 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3963 copy_term_size is not None and \
3964 os.isatty(copy_term_size):
3965 from portage.output import get_term_size, set_term_size
3966 rows, columns = get_term_size()
3967 set_term_size(rows, columns, slave_fd)
3969 return (got_pty, master_fd, slave_fd)
3971 # XXX This would be to replace getstatusoutput completely.
3972 # XXX Issue: cannot block execution. Deadlock condition.
3973 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3975 Spawn a subprocess with extra portage-specific options.
3978 Sandbox: Sandbox means the spawned process will be limited in its ability t
3979 read and write files (normally this means it is restricted to ${IMAGE}/)
3980 SElinux Sandbox: Enables sandboxing on SElinux
3981 Reduced Privileges: Drops privilages such that the process runs as portage:portage
3984 Notes: os.system cannot be used because it messes with signal handling. Instead we
3985 use the portage.process spawn* family of functions.
3987 This function waits for the process to terminate.
3989 @param mystring: Command to run
3990 @type mystring: String
3991 @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3992 @type mysettings: Dictionary or config instance
3993 @param debug: Ignored
3994 @type debug: Boolean
3995 @param free: Enable sandboxing for this process
3997 @param droppriv: Drop to portage:portage when running this command
3998 @type droppriv: Boolean
3999 @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
4000 @type sesandbox: Boolean
4001 @param fakeroot: Run this command with faked root privileges
4002 @type fakeroot: Boolean
4003 @param keywords: Extra options encoded as a dict, to be passed to spawn
4004 @type keywords: Dictionary
4007 1. The return code of the spawned process.
4010 if isinstance(mysettings, dict):
4012 keywords["opt_name"]="[ %s ]" % "portage"
4014 check_config_instance(mysettings)
4015 env=mysettings.environ()
4016 if mysettings.mycpv is not None:
4017 keywords["opt_name"] = "[%s]" % mysettings.mycpv
4019 keywords["opt_name"] = "[%s/%s]" % \
4020 (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
4022 fd_pipes = keywords.get("fd_pipes")
4023 if fd_pipes is None:
4025 0:sys.stdin.fileno(),
4026 1:sys.stdout.fileno(),
4027 2:sys.stderr.fileno(),
4029 # In some cases the above print statements don't flush stdout, so
4030 # it needs to be flushed before allowing a child process to use it
4031 # so that output always shows in the correct order.
4032 stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
4033 for fd in fd_pipes.values():
4034 if fd in stdout_filenos:
4039 # The default policy for the sesandbox domain only allows entry (via exec)
4040 # from shells and from binaries that belong to portage (the number of entry
4041 # points is minimized). The "tee" binary is not among the allowed entry
4042 # points, so it is spawned outside of the sesandbox domain and reads from a
4043 # pseudo-terminal that connects two domains.
4044 logfile = keywords.get("logfile")
4048 fd_pipes_orig = None
4051 del keywords["logfile"]
4052 if 1 not in fd_pipes or 2 not in fd_pipes:
4053 raise ValueError(fd_pipes)
4055 fd_pipes.setdefault(0, sys.stdin.fileno())
4056 fd_pipes_orig = fd_pipes.copy()
4058 got_pty, master_fd, slave_fd = \
4059 _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
4061 # We must set non-blocking mode before we close the slave_fd
4062 # since otherwise the fcntl call can fail on FreeBSD (the child
4063 # process might have already exited and closed slave_fd so we
4064 # have to keep it open in order to avoid FreeBSD potentially
4065 # generating an EAGAIN exception).
4067 fcntl.fcntl(master_fd, fcntl.F_SETFL,
4068 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
4070 fd_pipes[0] = fd_pipes_orig[0]
4071 fd_pipes[1] = slave_fd
4072 fd_pipes[2] = slave_fd
4073 keywords["fd_pipes"] = fd_pipes
4075 features = mysettings.features
4076 # TODO: Enable fakeroot to be used together with droppriv. The
4077 # fake ownership/permissions will have to be converted to real
4078 # permissions in the merge phase.
4079 fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
4080 if droppriv and not uid and portage_gid and portage_uid:
4081 keywords.update({"uid":portage_uid,"gid":portage_gid,
4082 "groups":userpriv_groups,"umask":0o02})
4084 free=((droppriv and "usersandbox" not in features) or \
4085 (not droppriv and "sandbox" not in features and \
4086 "usersandbox" not in features and not fakeroot))
4088 if free or "SANDBOX_ACTIVE" in os.environ:
4089 keywords["opt_name"] += " bash"
4090 spawn_func = portage.process.spawn_bash
4092 keywords["opt_name"] += " fakeroot"
4093 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
4094 spawn_func = portage.process.spawn_fakeroot
4096 keywords["opt_name"] += " sandbox"
4097 spawn_func = portage.process.spawn_sandbox
4100 spawn_func = selinux.spawn_wrapper(spawn_func,
4101 mysettings["PORTAGE_SANDBOX_T"])
4103 returnpid = keywords.get("returnpid")
4104 keywords["returnpid"] = True
4106 mypids.extend(spawn_func(mystring, env=env, **keywords))
4115 log_file = open(_unicode_encode(logfile), mode='ab')
4116 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
4117 master_file = os.fdopen(master_fd, 'rb')
4118 iwtd = [master_file]
4121 import array, select
4125 events = select.select(iwtd, owtd, ewtd)
4127 # Use non-blocking mode to prevent read
4128 # calls from blocking indefinitely.
4129 buf = array.array('B')
4131 buf.fromfile(f, buffsize)
4137 if f is master_file:
4138 buf.tofile(stdout_file)
4140 buf.tofile(log_file)
4146 retval = os.waitpid(pid, 0)[1]
4147 portage.process.spawned_pids.remove(pid)
4148 if retval != os.EX_OK:
4150 return (retval & 0xff) << 8
4154 _userpriv_spawn_kwargs = (
4155 ("uid", portage_uid),
4156 ("gid", portage_gid),
4157 ("groups", userpriv_groups),
4161 def _spawn_fetch(settings, args, **kwargs):
4163 Spawn a process with appropriate settings for fetching, including
4164 userfetch and selinux support.
4167 global _userpriv_spawn_kwargs
4169 # Redirect all output to stdout since some fetchers like
4170 # wget pollute stderr (if portage detects a problem then it
4171 # can send it's own message to stderr).
4172 if "fd_pipes" not in kwargs:
4174 kwargs["fd_pipes"] = {
4175 0 : sys.stdin.fileno(),
4176 1 : sys.stdout.fileno(),
4177 2 : sys.stdout.fileno(),
4180 if "userfetch" in settings.features and \
4181 os.getuid() == 0 and portage_gid and portage_uid:
4182 kwargs.update(_userpriv_spawn_kwargs)
4184 spawn_func = portage.process.spawn
4186 if settings.selinux_enabled():
4187 spawn_func = selinux.spawn_wrapper(spawn_func,
4188 settings["PORTAGE_FETCH_T"])
4190 # bash is an allowed entrypoint, while most binaries are not
4191 if args[0] != BASH_BINARY:
4192 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
4194 rval = spawn_func(args, env=dict(iter(settings.items())), **kwargs)
4198 _userpriv_test_write_file_cache = {}
4199 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
4200 "rm -f %(file_path)s ; exit $rval"
4202 def _userpriv_test_write_file(settings, file_path):
4204 Drop privileges and try to open a file for writing. The file may or
4205 may not exist, and the parent directory is assumed to exist. The file
4206 is removed before returning.
4208 @param settings: A config instance which is passed to _spawn_fetch()
4209 @param file_path: A file path to open and write.
4210 @return: True if write succeeds, False otherwise.
4213 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
4214 rval = _userpriv_test_write_file_cache.get(file_path)
4215 if rval is not None:
4218 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
4219 {"file_path" : _shell_quote(file_path)}]
4221 returncode = _spawn_fetch(settings, args)
4223 rval = returncode == os.EX_OK
4224 _userpriv_test_write_file_cache[file_path] = rval
4227 def _checksum_failure_temp_file(distdir, basename):
4229 First try to find a duplicate temp file with the same checksum and return
4230 that filename if available. Otherwise, use mkstemp to create a new unique
4231 filename._checksum_failure_.$RANDOM, rename the given file, and return the
4232 new filename. In any case, filename will be renamed or removed before this
4233 function returns a temp filename.
4236 filename = os.path.join(distdir, basename)
4237 size = os.stat(filename).st_size
4239 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
4240 for temp_filename in os.listdir(distdir):
4241 if not tempfile_re.match(temp_filename):
4243 temp_filename = os.path.join(distdir, temp_filename)
4245 if size != os.stat(temp_filename).st_size:
4250 temp_checksum = portage.checksum.perform_md5(temp_filename)
4251 except portage.exception.FileNotFound:
4252 # Apparently the temp file disappeared. Let it go.
4254 if checksum is None:
4255 checksum = portage.checksum.perform_md5(filename)
4256 if checksum == temp_checksum:
4258 return temp_filename
4260 from tempfile import mkstemp
4261 fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
4263 os.rename(filename, temp_filename)
4264 return temp_filename
4266 def _check_digests(filename, digests, show_errors=1):
4268 Check digests and display a message if an error occurs.
4269 @return True if all digests match, False otherwise.
4271 verified_ok, reason = portage.checksum.verify_all(filename, digests)
4274 writemsg(_("!!! Previously fetched"
4275 " file: '%s'\n") % filename, noiselevel=-1)
4276 writemsg(_("!!! Reason: %s\n") % reason[0],
4278 writemsg(_("!!! Got: %s\n"
4279 "!!! Expected: %s\n") % \
4280 (reason[1], reason[2]), noiselevel=-1)
4284 def _check_distfile(filename, digests, eout, show_errors=1):
4286 @return a tuple of (match, stat_obj) where match is True if filename
4287 matches all given digests (if any) and stat_obj is a stat result, or
4288 None if the file does not exist.
4292 size = digests.get("size")
4293 if size is not None and len(digests) == 1:
4297 st = os.stat(filename)
4299 return (False, None)
4300 if size is not None and size != st.st_size:
4303 if size is not None:
4304 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
4306 elif st.st_size == 0:
4307 # Zero-byte distfiles are always invalid.
4310 if _check_digests(filename, digests, show_errors=show_errors):
4311 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
4312 " ".join(sorted(digests))))
4318 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
4320 _size_suffix_map = {
4332 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
4333 "fetch files. Will use digest file if available."
4338 features = mysettings.features
4339 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4341 from portage.data import secpass
4342 userfetch = secpass >= 2 and "userfetch" in features
4343 userpriv = secpass >= 2 and "userpriv" in features
4345 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
4346 if "mirror" in restrict or \
4347 "nomirror" in restrict:
4348 if ("mirror" in features) and ("lmirror" not in features):
4349 # lmirror should allow you to bypass mirror restrictions.
4350 # XXX: This is not a good thing, and is temporary at best.
4351 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
4354 # Generally, downloading the same file repeatedly from
4355 # every single available mirror is a waste of bandwidth
4356 # and time, so there needs to be a cap.
4357 checksum_failure_max_tries = 5
4358 v = checksum_failure_max_tries
4360 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
4361 checksum_failure_max_tries))
4362 except (ValueError, OverflowError):
4363 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4364 " contains non-integer value: '%s'\n") % \
4365 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
4366 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4367 "default value: %s\n") % checksum_failure_max_tries,
4369 v = checksum_failure_max_tries
4371 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4372 " contains value less than 1: '%s'\n") % v, noiselevel=-1)
4373 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4374 "default value: %s\n") % checksum_failure_max_tries,
4376 v = checksum_failure_max_tries
4377 checksum_failure_max_tries = v
4380 fetch_resume_size_default = "350K"
4381 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
4382 if fetch_resume_size is not None:
4383 fetch_resume_size = "".join(fetch_resume_size.split())
4384 if not fetch_resume_size:
4385 # If it's undefined or empty, silently use the default.
4386 fetch_resume_size = fetch_resume_size_default
4387 match = _fetch_resume_size_re.match(fetch_resume_size)
4388 if match is None or \
4389 (match.group(2).upper() not in _size_suffix_map):
4390 writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
4391 " contains an unrecognized format: '%s'\n") % \
4392 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
4393 writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
4394 "default value: %s\n") % fetch_resume_size_default,
4396 fetch_resume_size = None
4397 if fetch_resume_size is None:
4398 fetch_resume_size = fetch_resume_size_default
4399 match = _fetch_resume_size_re.match(fetch_resume_size)
4400 fetch_resume_size = int(match.group(1)) * \
4401 2 ** _size_suffix_map[match.group(2).upper()]
4403 # Behave like the package has RESTRICT="primaryuri" after a
4404 # couple of checksum failures, to increase the probablility
4405 # of success before checksum_failure_max_tries is reached.
4406 checksum_failure_primaryuri = 2
4407 thirdpartymirrors = mysettings.thirdpartymirrors()
4409 # In the background parallel-fetch process, it's safe to skip checksum
4410 # verification of pre-existing files in $DISTDIR that have the correct
4411 # file size. The parent process will verify their checksums prior to
4414 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
4415 if parallel_fetchonly:
4418 check_config_instance(mysettings)
4420 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
4421 CUSTOM_MIRRORS_FILE), recursive=1)
4425 if listonly or ("distlocks" not in features):
4429 if "skiprocheck" in features:
4432 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
4434 writemsg(colorize("BAD",
4435 _("!!! For fetching to a read-only filesystem, "
4436 "locking should be turned off.\n")), noiselevel=-1)
4437 writemsg(_("!!! This can be done by adding -distlocks to "
4438 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
4441 # local mirrors are always added
4442 if "local" in custommirrors:
4443 mymirrors += custommirrors["local"]
4445 if "nomirror" in restrict or \
4446 "mirror" in restrict:
4447 # We don't add any mirrors.
4451 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
4453 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
4454 pkgdir = mysettings.get("O")
4455 if not (pkgdir is None or skip_manifest):
4456 mydigests = Manifest(
4457 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
4459 # no digests because fetch was not called for a specific package
4462 ro_distdirs = [x for x in \
4463 util.shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
4464 if os.path.isdir(x)]
4467 for x in range(len(mymirrors)-1,-1,-1):
4468 if mymirrors[x] and mymirrors[x][0]=='/':
4469 fsmirrors += [mymirrors[x]]
4472 restrict_fetch = "fetch" in restrict
4473 custom_local_mirrors = custommirrors.get("local", [])
4475 # With fetch restriction, a normal uri may only be fetched from
4476 # custom local mirrors (if available). A mirror:// uri may also
4477 # be fetched from specific mirrors (effectively overriding fetch
4478 # restriction, but only for specific mirrors).
4479 locations = custom_local_mirrors
4481 locations = mymirrors
4483 file_uri_tuples = []
4484 if isinstance(myuris, dict):
4485 for myfile, uri_set in myuris.items():
4486 for myuri in uri_set:
4487 file_uri_tuples.append((myfile, myuri))
4489 for myuri in myuris:
4490 file_uri_tuples.append((os.path.basename(myuri), myuri))
4493 primaryuri_indexes={}
4494 primaryuri_dict = {}
4495 thirdpartymirror_uris = {}
4496 for myfile, myuri in file_uri_tuples:
4497 if myfile not in filedict:
4499 for y in range(0,len(locations)):
4500 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
4501 if myuri[:9]=="mirror://":
4502 eidx = myuri.find("/", 9)
4504 mirrorname = myuri[9:eidx]
4505 path = myuri[eidx+1:]
4507 # Try user-defined mirrors first
4508 if mirrorname in custommirrors:
4509 for cmirr in custommirrors[mirrorname]:
4510 filedict[myfile].append(
4511 cmirr.rstrip("/") + "/" + path)
4513 # now try the official mirrors
4514 if mirrorname in thirdpartymirrors:
4515 shuffle(thirdpartymirrors[mirrorname])
4517 uris = [locmirr.rstrip("/") + "/" + path \
4518 for locmirr in thirdpartymirrors[mirrorname]]
4519 filedict[myfile].extend(uris)
4520 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
4522 if not filedict[myfile]:
4523 writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
4525 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
4526 writemsg(" %s\n" % (myuri), noiselevel=-1)
4529 # Only fetch from specific mirrors is allowed.
4531 if "primaryuri" in restrict:
4532 # Use the source site first.
4533 if myfile in primaryuri_indexes:
4534 primaryuri_indexes[myfile] += 1
4536 primaryuri_indexes[myfile] = 0
4537 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
4539 filedict[myfile].append(myuri)
4540 primaryuris = primaryuri_dict.get(myfile)
4541 if primaryuris is None:
4543 primaryuri_dict[myfile] = primaryuris
4544 primaryuris.append(myuri)
4546 # Prefer thirdpartymirrors over normal mirrors in cases when
4547 # the file does not yet exist on the normal mirrors.
4548 for myfile, uris in thirdpartymirror_uris.items():
4549 primaryuri_dict.setdefault(myfile, []).extend(uris)
4556 if can_fetch and not fetch_to_ro:
4557 global _userpriv_test_write_file_cache
4561 dir_gid = portage_gid
4562 if "FAKED_MODE" in mysettings:
4563 # When inside fakeroot, directories with portage's gid appear
4564 # to have root's gid. Therefore, use root's gid instead of
4565 # portage's gid to avoid spurrious permissions adjustments
4566 # when inside fakeroot.
4569 if "distlocks" in features:
4570 distdir_dirs.append(".locks")
4573 for x in distdir_dirs:
4574 mydir = os.path.join(mysettings["DISTDIR"], x)
4575 write_test_file = os.path.join(
4576 mydir, ".__portage_test_write__")
4583 if st is not None and stat.S_ISDIR(st.st_mode):
4584 if not (userfetch or userpriv):
4586 if _userpriv_test_write_file(mysettings, write_test_file):
4589 _userpriv_test_write_file_cache.pop(write_test_file, None)
4590 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
4592 # The directory has just been created
4593 # and therefore it must be empty.
4595 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
4598 raise # bail out on the first error that occurs during recursion
4599 if not apply_recursive_permissions(mydir,
4600 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
4601 filemode=filemode, filemask=modemask, onerror=onerror):
4602 raise portage.exception.OperationNotPermitted(
4603 _("Failed to apply recursive permissions for the portage group."))
4604 except portage.exception.PortageException as e:
4605 if not os.path.isdir(mysettings["DISTDIR"]):
4606 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4607 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
4608 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
4611 not fetch_to_ro and \
4612 not os.access(mysettings["DISTDIR"], os.W_OK):
4613 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
4617 if can_fetch and use_locks and locks_in_subdir:
4618 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
4619 if not os.access(distlocks_subdir, os.W_OK):
4620 writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir,
4623 del distlocks_subdir
4625 distdir_writable = can_fetch and not fetch_to_ro
4626 failed_files = set()
4627 restrict_fetch_msg = False
4629 for myfile in filedict:
4633 1 partially downloaded
4634 2 completely downloaded
4638 orig_digests = mydigests.get(myfile, {})
4639 size = orig_digests.get("size")
4641 # Zero-byte distfiles are always invalid, so discard their digests.
4642 del mydigests[myfile]
4643 orig_digests.clear()
4645 pruned_digests = orig_digests
4646 if parallel_fetchonly:
4648 if size is not None:
4649 pruned_digests["size"] = size
4651 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
4653 has_space_superuser = True
4656 writemsg_stdout("\n", noiselevel=-1)
4658 # check if there is enough space in DISTDIR to completely store myfile
4659 # overestimate the filesize so we aren't bitten by FS overhead
4660 if size is not None and hasattr(os, "statvfs"):
4661 vfs_stat = os.statvfs(mysettings["DISTDIR"])
4663 mysize = os.stat(myfile_path).st_size
4664 except OSError as e:
4665 if e.errno not in (errno.ENOENT, errno.ESTALE):
4669 if (size - mysize + vfs_stat.f_bsize) >= \
4670 (vfs_stat.f_bsize * vfs_stat.f_bavail):
4672 if (size - mysize + vfs_stat.f_bsize) >= \
4673 (vfs_stat.f_bsize * vfs_stat.f_bfree):
4674 has_space_superuser = False
4676 if not has_space_superuser:
4684 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
4685 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
4687 if has_space_superuser:
4688 writemsg(_("!!! Insufficient privileges to use "
4689 "remaining space.\n"), noiselevel=-1)
4691 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
4692 " in /etc/make.conf in order to fetch with\n"
4693 "!!! superuser privileges.\n"), noiselevel=-1)
4695 if distdir_writable and use_locks:
4698 lock_file = os.path.join(mysettings["DISTDIR"],
4699 locks_in_subdir, myfile)
4701 lock_file = myfile_path
4705 lock_kwargs["flags"] = os.O_NONBLOCK
4708 file_lock = portage.locks.lockfile(myfile_path,
4709 wantnewlockfile=1, **lock_kwargs)
4710 except portage.exception.TryAgain:
4711 writemsg(_(">>> File '%s' is already locked by "
4712 "another fetcher. Continuing...\n") % myfile,
4718 eout = portage.output.EOutput()
4719 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
4720 match, mystat = _check_distfile(
4721 myfile_path, pruned_digests, eout)
4723 if distdir_writable:
4725 apply_secpass_permissions(myfile_path,
4726 gid=portage_gid, mode=0o664, mask=0o2,
4728 except portage.exception.PortageException as e:
4729 if not os.access(myfile_path, os.R_OK):
4730 writemsg(_("!!! Failed to adjust permissions:"
4731 " %s\n") % str(e), noiselevel=-1)
4735 if distdir_writable and mystat is None:
4736 # Remove broken symlinks if necessary.
4738 os.unlink(myfile_path)
4742 if mystat is not None:
4743 if stat.S_ISDIR(mystat.st_mode):
4744 portage.util.writemsg_level(
4745 _("!!! Unable to fetch file since "
4746 "a directory is in the way: \n"
4747 "!!! %s\n") % myfile_path,
4748 level=logging.ERROR, noiselevel=-1)
4751 if mystat.st_size == 0:
4752 if distdir_writable:
4754 os.unlink(myfile_path)
4757 elif distdir_writable:
4758 if mystat.st_size < fetch_resume_size and \
4759 mystat.st_size < size:
4760 # If the file already exists and the size does not
4761 # match the existing digests, it may be that the
4762 # user is attempting to update the digest. In this
4763 # case, the digestgen() function will advise the
4764 # user to use `ebuild --force foo.ebuild manifest`
4765 # in order to force the old digests to be replaced.
4766 # Since the user may want to keep this file, rename
4767 # it instead of deleting it.
4768 writemsg(_(">>> Renaming distfile with size "
4769 "%d (smaller than " "PORTAGE_FETCH_RESU"
4770 "ME_MIN_SIZE)\n") % mystat.st_size)
4772 _checksum_failure_temp_file(
4773 mysettings["DISTDIR"], myfile)
4774 writemsg_stdout(_("Refetching... "
4775 "File renamed to '%s'\n\n") % \
4776 temp_filename, noiselevel=-1)
4777 elif mystat.st_size >= size:
4779 _checksum_failure_temp_file(
4780 mysettings["DISTDIR"], myfile)
4781 writemsg_stdout(_("Refetching... "
4782 "File renamed to '%s'\n\n") % \
4783 temp_filename, noiselevel=-1)
4785 if distdir_writable and ro_distdirs:
4786 readonly_file = None
4787 for x in ro_distdirs:
4788 filename = os.path.join(x, myfile)
4789 match, mystat = _check_distfile(
4790 filename, pruned_digests, eout)
4792 readonly_file = filename
4794 if readonly_file is not None:
4796 os.unlink(myfile_path)
4797 except OSError as e:
4798 if e.errno not in (errno.ENOENT, errno.ESTALE):
4801 os.symlink(readonly_file, myfile_path)
4804 if fsmirrors and not os.path.exists(myfile_path) and has_space:
4805 for mydir in fsmirrors:
4806 mirror_file = os.path.join(mydir, myfile)
4808 shutil.copyfile(mirror_file, myfile_path)
4809 writemsg(_("Local mirror has file: %s\n") % myfile)
4811 except (IOError, OSError) as e:
4812 if e.errno not in (errno.ENOENT, errno.ESTALE):
4817 mystat = os.stat(myfile_path)
4818 except OSError as e:
4819 if e.errno not in (errno.ENOENT, errno.ESTALE):
4824 apply_secpass_permissions(
4825 myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
4827 except portage.exception.PortageException as e:
4828 if not os.access(myfile_path, os.R_OK):
4829 writemsg(_("!!! Failed to adjust permissions:"
4830 " %s\n") % str(e), noiselevel=-1)
4832 # If the file is empty then it's obviously invalid. Remove
4833 # the empty file and try to download if possible.
4834 if mystat.st_size == 0:
4835 if distdir_writable:
4837 os.unlink(myfile_path)
4838 except EnvironmentError:
4840 elif myfile not in mydigests:
4841 # We don't have a digest, but the file exists. We must
4842 # assume that it is fully downloaded.
4845 if mystat.st_size < mydigests[myfile]["size"] and \
4847 fetched = 1 # Try to resume this download.
4848 elif parallel_fetchonly and \
4849 mystat.st_size == mydigests[myfile]["size"]:
4850 eout = portage.output.EOutput()
4852 mysettings.get("PORTAGE_QUIET") == "1"
4854 "%s size ;-)" % (myfile, ))
4858 verified_ok, reason = portage.checksum.verify_all(
4859 myfile_path, mydigests[myfile])
4861 writemsg(_("!!! Previously fetched"
4862 " file: '%s'\n") % myfile, noiselevel=-1)
4863 writemsg(_("!!! Reason: %s\n") % reason[0],
4865 writemsg(_("!!! Got: %s\n"
4866 "!!! Expected: %s\n") % \
4867 (reason[1], reason[2]), noiselevel=-1)
4868 if reason[0] == _("Insufficient data for checksum verification"):
4870 if distdir_writable:
4872 _checksum_failure_temp_file(
4873 mysettings["DISTDIR"], myfile)
4874 writemsg_stdout(_("Refetching... "
4875 "File renamed to '%s'\n\n") % \
4876 temp_filename, noiselevel=-1)
4878 eout = portage.output.EOutput()
4880 mysettings.get("PORTAGE_QUIET", None) == "1"
4881 digests = mydigests.get(myfile)
4883 digests = list(digests)
4886 "%s %s ;-)" % (myfile, " ".join(digests)))
4888 continue # fetch any remaining files
4890 # Create a reversed list since that is optimal for list.pop().
4891 uri_list = filedict[myfile][:]
4893 checksum_failure_count = 0
4894 tried_locations = set()
4896 loc = uri_list.pop()
4897 # Eliminate duplicates here in case we've switched to
4898 # "primaryuri" mode on the fly due to a checksum failure.
4899 if loc in tried_locations:
4901 tried_locations.add(loc)
4903 writemsg_stdout(loc+" ", noiselevel=-1)
4905 # allow different fetchcommands per protocol
4906 protocol = loc[0:loc.find("://")]
4908 missing_file_param = False
4909 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
4910 fetchcommand = mysettings.get(fetchcommand_var)
4911 if fetchcommand is None:
4912 fetchcommand_var = "FETCHCOMMAND"
4913 fetchcommand = mysettings.get(fetchcommand_var)
4914 if fetchcommand is None:
4915 portage.util.writemsg_level(
4916 _("!!! %s is unset. It should "
4917 "have been defined in\n!!! %s/make.globals.\n") \
4918 % (fetchcommand_var,
4919 portage.const.GLOBAL_CONFIG_PATH),
4920 level=logging.ERROR, noiselevel=-1)
4922 if "${FILE}" not in fetchcommand:
4923 portage.util.writemsg_level(
4924 _("!!! %s does not contain the required ${FILE}"
4925 " parameter.\n") % fetchcommand_var,
4926 level=logging.ERROR, noiselevel=-1)
4927 missing_file_param = True
4929 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
4930 resumecommand = mysettings.get(resumecommand_var)
4931 if resumecommand is None:
4932 resumecommand_var = "RESUMECOMMAND"
4933 resumecommand = mysettings.get(resumecommand_var)
4934 if resumecommand is None:
4935 portage.util.writemsg_level(
4936 _("!!! %s is unset. It should "
4937 "have been defined in\n!!! %s/make.globals.\n") \
4938 % (resumecommand_var,
4939 portage.const.GLOBAL_CONFIG_PATH),
4940 level=logging.ERROR, noiselevel=-1)
4942 if "${FILE}" not in resumecommand:
4943 portage.util.writemsg_level(
4944 _("!!! %s does not contain the required ${FILE}"
4945 " parameter.\n") % resumecommand_var,
4946 level=logging.ERROR, noiselevel=-1)
4947 missing_file_param = True
4949 if missing_file_param:
4950 portage.util.writemsg_level(
4951 _("!!! Refer to the make.conf(5) man page for "
4952 "information about how to\n!!! correctly specify "
4953 "FETCHCOMMAND and RESUMECOMMAND.\n"),
4954 level=logging.ERROR, noiselevel=-1)
4955 if myfile != os.path.basename(loc):
4961 mysize = os.stat(myfile_path).st_size
4962 except OSError as e:
4963 if e.errno not in (errno.ENOENT, errno.ESTALE):
4969 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
4971 elif size is None or size > mysize:
4972 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
4975 writemsg(_("!!! File %s is incorrect size, "
4976 "but unable to retry.\n") % myfile, noiselevel=-1)
4981 if fetched != 2 and has_space:
4982 #we either need to resume or start the download
4985 mystat = os.stat(myfile_path)
4986 except OSError as e:
4987 if e.errno not in (errno.ENOENT, errno.ESTALE):
4992 if mystat.st_size < fetch_resume_size:
4993 writemsg(_(">>> Deleting distfile with size "
4994 "%d (smaller than " "PORTAGE_FETCH_RESU"
4995 "ME_MIN_SIZE)\n") % mystat.st_size)
4997 os.unlink(myfile_path)
4998 except OSError as e:
5000 (errno.ENOENT, errno.ESTALE):
5006 writemsg(_(">>> Resuming download...\n"))
5007 locfetch=resumecommand
5008 command_var = resumecommand_var
5011 locfetch=fetchcommand
5012 command_var = fetchcommand_var
5013 writemsg_stdout(_(">>> Downloading '%s'\n") % \
5014 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
5016 "DISTDIR": mysettings["DISTDIR"],
5021 myfetch = util.shlex_split(locfetch)
5022 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
5026 myret = _spawn_fetch(mysettings, myfetch)
5030 apply_secpass_permissions(myfile_path,
5031 gid=portage_gid, mode=0o664, mask=0o2)
5032 except portage.exception.FileNotFound as e:
5034 except portage.exception.PortageException as e:
5035 if not os.access(myfile_path, os.R_OK):
5036 writemsg(_("!!! Failed to adjust permissions:"
5037 " %s\n") % str(e), noiselevel=-1)
5039 # If the file is empty then it's obviously invalid. Don't
5040 # trust the return value from the fetcher. Remove the
5041 # empty file and try to download again.
5043 if os.stat(myfile_path).st_size == 0:
5044 os.unlink(myfile_path)
5047 except EnvironmentError:
5050 if mydigests is not None and myfile in mydigests:
5052 mystat = os.stat(myfile_path)
5053 except OSError as e:
5054 if e.errno not in (errno.ENOENT, errno.ESTALE):
5060 if stat.S_ISDIR(mystat.st_mode):
5061 # This can happen if FETCHCOMMAND erroneously
5062 # contains wget's -P option where it should
5064 portage.util.writemsg_level(
5065 _("!!! The command specified in the "
5066 "%s variable appears to have\n!!! "
5067 "created a directory instead of a "
5068 "normal file.\n") % command_var,
5069 level=logging.ERROR, noiselevel=-1)
5070 portage.util.writemsg_level(
5071 _("!!! Refer to the make.conf(5) "
5072 "man page for information about how "
5073 "to\n!!! correctly specify "
5074 "FETCHCOMMAND and RESUMECOMMAND.\n"),
5075 level=logging.ERROR, noiselevel=-1)
5078 # no exception? file exists. let digestcheck() report
5079 # an appropriately for size or checksum errors
5081 # If the fetcher reported success and the file is
5082 # too small, it's probably because the digest is
5083 # bad (upstream changed the distfile). In this
5084 # case we don't want to attempt to resume. Show a
5085 # digest verification failure to that the user gets
5086 # a clue about what just happened.
5087 if myret != os.EX_OK and \
5088 mystat.st_size < mydigests[myfile]["size"]:
5089 # Fetch failed... Try the next one... Kill 404 files though.
5090 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
5091 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
5092 if html404.search(codecs.open(
5093 _unicode_encode(myfile_path,
5094 encoding=_encodings['fs'], errors='strict'),
5095 mode='r', encoding=_encodings['content'], errors='replace'
5098 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
5099 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
5102 except (IOError, OSError):
5107 # File is the correct size--check the checksums for the fetched
5108 # file NOW, for those users who don't have a stable/continuous
5109 # net connection. This way we have a chance to try to download
5110 # from another mirror...
5111 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
5114 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
5116 writemsg(_("!!! Reason: %s\n") % reason[0],
5118 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
5119 (reason[1], reason[2]), noiselevel=-1)
5120 if reason[0] == _("Insufficient data for checksum verification"):
5123 _checksum_failure_temp_file(
5124 mysettings["DISTDIR"], myfile)
5125 writemsg_stdout(_("Refetching... "
5126 "File renamed to '%s'\n\n") % \
5127 temp_filename, noiselevel=-1)
5129 checksum_failure_count += 1
5130 if checksum_failure_count == \
5131 checksum_failure_primaryuri:
5132 # Switch to "primaryuri" mode in order
5133 # to increase the probablility of
5136 primaryuri_dict.get(myfile)
5139 reversed(primaryuris))
5140 if checksum_failure_count >= \
5141 checksum_failure_max_tries:
5144 eout = portage.output.EOutput()
5145 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5146 digests = mydigests.get(myfile)
5148 eout.ebegin("%s %s ;-)" % \
5149 (myfile, " ".join(sorted(digests))))
5157 elif mydigests!=None:
5158 writemsg(_("No digest file available and download failed.\n\n"),
5161 if use_locks and file_lock:
5162 portage.locks.unlockfile(file_lock)
5165 writemsg_stdout("\n", noiselevel=-1)
5167 if restrict_fetch and not restrict_fetch_msg:
5168 restrict_fetch_msg = True
5169 msg = _("\n!!! %s/%s"
5170 " has fetch restriction turned on.\n"
5171 "!!! This probably means that this "
5172 "ebuild's files must be downloaded\n"
5173 "!!! manually. See the comments in"
5174 " the ebuild for more information.\n\n") % \
5175 (mysettings["CATEGORY"], mysettings["PF"])
5176 portage.util.writemsg_level(msg,
5177 level=logging.ERROR, noiselevel=-1)
5178 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
5179 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
5181 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
5182 private_tmpdir = None
5183 if not parallel_fetchonly and not have_builddir:
5184 # When called by digestgen(), it's normal that
5185 # PORTAGE_BUILDDIR doesn't exist. It's helpful
5186 # to show the pkg_nofetch output though, so go
5187 # ahead and create a temporary PORTAGE_BUILDDIR.
5188 # Use a temporary config instance to avoid altering
5189 # the state of the one that's been passed in.
5190 mysettings = config(clone=mysettings)
5191 from tempfile import mkdtemp
5193 private_tmpdir = mkdtemp("", "._portage_fetch_.",
5195 except OSError as e:
5196 if e.errno != portage.exception.PermissionDenied.errno:
5198 raise portage.exception.PermissionDenied(global_tmpdir)
5199 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
5200 mysettings.backup_changes("PORTAGE_TMPDIR")
5201 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5202 portage.doebuild_environment(mysettings["EBUILD"], "fetch",
5203 mysettings["ROOT"], mysettings, debug, 1, None)
5204 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
5205 have_builddir = True
5207 if not parallel_fetchonly and have_builddir:
5208 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
5209 # ensuring sane $PWD (bug #239560) and storing elog
5210 # messages. Therefore, calling code needs to ensure that
5211 # PORTAGE_BUILDDIR is already clean and locked here.
5213 # All the pkg_nofetch goes to stderr since it's considered
5214 # to be an error message.
5216 0 : sys.stdin.fileno(),
5217 1 : sys.stderr.fileno(),
5218 2 : sys.stderr.fileno(),
5221 ebuild_phase = mysettings.get("EBUILD_PHASE")
5223 mysettings["EBUILD_PHASE"] = "nofetch"
5224 spawn(_shell_quote(EBUILD_SH_BINARY) + \
5225 " nofetch", mysettings, fd_pipes=fd_pipes)
5227 if ebuild_phase is None:
5228 mysettings.pop("EBUILD_PHASE", None)
5230 mysettings["EBUILD_PHASE"] = ebuild_phase
5231 if private_tmpdir is not None:
5232 shutil.rmtree(private_tmpdir)
5234 elif restrict_fetch:
5238 elif not filedict[myfile]:
5239 writemsg(_("Warning: No mirrors available for file"
5240 " '%s'\n") % (myfile), noiselevel=-1)
5242 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
5248 failed_files.add(myfile)
5255 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
5257 Generates a digest file if missing. Assumes all files are available.
5258 DEPRECATED: this now only is a compability wrapper for
5259 portage.manifest.Manifest()
5260 NOTE: manifestonly and overwrite are useless with manifest2 and
5261 are therefore ignored."""
5262 if myportdb is None:
5263 writemsg("Warning: myportdb not specified to digestgen\n")
5266 global _doebuild_manifest_exempt_depend
5268 _doebuild_manifest_exempt_depend += 1
5270 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
5271 for cpv in fetchlist_dict:
5273 for myfile in fetchlist_dict[cpv]:
5274 distfiles_map.setdefault(myfile, []).append(cpv)
5275 except portage.exception.InvalidDependString as e:
5276 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5279 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
5280 manifest1_compat = False
5281 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
5282 fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
5283 # Don't require all hashes since that can trigger excessive
5284 # fetches when sufficient digests already exist. To ease transition
5285 # while Manifest 1 is being removed, only require hashes that will
5286 # exist before and after the transition.
5287 required_hash_types = set()
5288 required_hash_types.add("size")
5289 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
5290 dist_hashes = mf.fhashdict.get("DIST", {})
5292 # To avoid accidental regeneration of digests with the incorrect
5293 # files (such as partially downloaded files), trigger the fetch
5294 # code if the file exists and it's size doesn't match the current
5295 # manifest entry. If there really is a legitimate reason for the
5296 # digest to change, `ebuild --force digest` can be used to avoid
5297 # triggering this code (or else the old digests can be manually
5298 # removed from the Manifest).
5300 for myfile in distfiles_map:
5301 myhashes = dist_hashes.get(myfile)
5304 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5307 if st is None or st.st_size == 0:
5308 missing_files.append(myfile)
5310 size = myhashes.get("size")
5313 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5314 except OSError as e:
5315 if e.errno != errno.ENOENT:
5319 missing_files.append(myfile)
5321 if required_hash_types.difference(myhashes):
5322 missing_files.append(myfile)
5325 if st.st_size == 0 or size is not None and size != st.st_size:
5326 missing_files.append(myfile)
5330 mytree = os.path.realpath(os.path.dirname(
5331 os.path.dirname(mysettings["O"])))
5332 fetch_settings = config(clone=mysettings)
5333 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5334 for myfile in missing_files:
5336 for cpv in distfiles_map[myfile]:
5337 myebuild = os.path.join(mysettings["O"],
5338 catsplit(cpv)[1] + ".ebuild")
5339 # for RESTRICT=fetch, mirror, etc...
5340 doebuild_environment(myebuild, "fetch",
5341 mysettings["ROOT"], fetch_settings,
5343 uris.update(myportdb.getFetchMap(
5344 cpv, mytree=mytree)[myfile])
5346 fetch_settings["A"] = myfile # for use by pkg_nofetch()
5349 st = os.stat(os.path.join(
5350 mysettings["DISTDIR"],myfile))
5354 if not fetch({myfile : uris}, fetch_settings):
5355 writemsg(_("!!! Fetch failed for %s, can't update "
5356 "Manifest\n") % myfile, noiselevel=-1)
5357 if myfile in dist_hashes and \
5358 st is not None and st.st_size > 0:
5359 # stat result is obtained before calling fetch(),
5360 # since fetch may rename the existing file if the
5361 # digest does not match.
5362 writemsg(_("!!! If you would like to "
5363 "forcefully replace the existing "
5364 "Manifest entry\n!!! for %s, use "
5365 "the following command:\n") % myfile + \
5366 "!!! " + colorize("INFORM",
5367 "ebuild --force %s manifest" % \
5368 os.path.basename(myebuild)) + "\n",
5371 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
5373 mf.create(requiredDistfiles=myarchives,
5374 assumeDistHashesSometimes=True,
5375 assumeDistHashesAlways=(
5376 "assume-digests" in mysettings.features))
5377 except portage.exception.FileNotFound as e:
5378 writemsg(_("!!! File %s doesn't exist, can't update "
5379 "Manifest\n") % e, noiselevel=-1)
5381 except portage.exception.PortagePackageException as e:
5382 writemsg(("!!! %s\n") % (e,), noiselevel=-1)
5385 mf.write(sign=False)
5386 except portage.exception.PermissionDenied as e:
5387 writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
5389 if "assume-digests" not in mysettings.features:
5390 distlist = list(mf.fhashdict.get("DIST", {}))
5393 for filename in distlist:
5394 if not os.path.exists(
5395 os.path.join(mysettings["DISTDIR"], filename)):
5396 auto_assumed.append(filename)
5398 mytree = os.path.realpath(
5399 os.path.dirname(os.path.dirname(mysettings["O"])))
5400 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
5401 pkgs = myportdb.cp_list(cp, mytree=mytree)
5403 writemsg_stdout(" digest.assumed" + portage.output.colorize("WARN",
5404 str(len(auto_assumed)).rjust(18)) + "\n")
5405 for pkg_key in pkgs:
5406 fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
5407 pv = pkg_key.split("/")[1]
5408 for filename in auto_assumed:
5409 if filename in fetchlist:
5411 " %s::%s\n" % (pv, filename))
5414 _doebuild_manifest_exempt_depend -= 1
5416 def digestParseFile(myfilename, mysettings=None):
5417 """(filename) -- Parses a given file for entries matching:
5418 <checksumkey> <checksum_hex_string> <filename> <filesize>
5419 Ignores lines that don't start with a valid checksum identifier
5420 and returns a dict with the filenames as keys and {checksumkey:checksum}
5422 DEPRECATED: this function is now only a compability wrapper for
5423 portage.manifest.Manifest()."""
5425 mysplit = myfilename.split(os.sep)
5426 if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
5427 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
5428 elif mysplit[-1] == "Manifest":
5429 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
5431 if mysettings is None:
5433 mysettings = config(clone=settings)
5435 return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
5437 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
5438 """Verifies checksums. Assumes all files have been downloaded.
5439 DEPRECATED: this is now only a compability wrapper for
5440 portage.manifest.Manifest()."""
5441 if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
5443 pkgdir = mysettings["O"]
5444 manifest_path = os.path.join(pkgdir, "Manifest")
5445 if not os.path.exists(manifest_path):
5446 writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
5452 mf = Manifest(pkgdir, mysettings["DISTDIR"])
5453 manifest_empty = True
5454 for d in mf.fhashdict.values():
5456 manifest_empty = False
5459 writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
5465 eout = portage.output.EOutput()
5466 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5468 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
5469 eout.ebegin(_("checking ebuild checksums ;-)"))
5470 mf.checkTypeHashes("EBUILD")
5472 eout.ebegin(_("checking auxfile checksums ;-)"))
5473 mf.checkTypeHashes("AUX")
5475 eout.ebegin(_("checking miscfile checksums ;-)"))
5476 mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
5479 eout.ebegin(_("checking %s ;-)") % f)
5480 ftype = mf.findFile(f)
5483 mf.checkFileHashes(ftype, f)
5485 except KeyError as e:
5487 writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
5489 except portage.exception.FileNotFound as e:
5491 writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
5494 except portage.exception.DigestException as e:
5496 writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
5497 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
5498 writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
5499 writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
5500 writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
5502 # Make sure that all of the ebuilds are actually listed in the Manifest.
5503 glep55 = 'parse-eapi-glep-55' in mysettings.features
5504 for f in os.listdir(pkgdir):
5507 pf, eapi = _split_ebuild_name_glep55(f)
5508 elif f[-7:] == '.ebuild':
5510 if pf is not None and not mf.hasFile("EBUILD", f):
5511 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5512 os.path.join(pkgdir, f), noiselevel=-1)
5515 """ epatch will just grab all the patches out of a directory, so we have to
5516 make sure there aren't any foreign files that it might grab."""
5517 filesdir = os.path.join(pkgdir, "files")
5519 for parent, dirs, files in os.walk(filesdir):
5521 parent = _unicode_decode(parent,
5522 encoding=_encodings['fs'], errors='strict')
5523 except UnicodeDecodeError:
5524 parent = _unicode_decode(parent,
5525 encoding=_encodings['fs'], errors='replace')
5526 writemsg(_("!!! Path contains invalid "
5527 "character(s) for encoding '%s': '%s'") \
5528 % (_encodings['fs'], parent), noiselevel=-1)
5535 d = _unicode_decode(d,
5536 encoding=_encodings['fs'], errors='strict')
5537 except UnicodeDecodeError:
5538 d = _unicode_decode(d,
5539 encoding=_encodings['fs'], errors='replace')
5540 writemsg(_("!!! Path contains invalid "
5541 "character(s) for encoding '%s': '%s'") \
5542 % (_encodings['fs'], os.path.join(parent, d)),
5546 dirs.remove(d_bytes)
5548 if d.startswith(".") or d == "CVS":
5549 dirs.remove(d_bytes)
5552 f = _unicode_decode(f,
5553 encoding=_encodings['fs'], errors='strict')
5554 except UnicodeDecodeError:
5555 f = _unicode_decode(f,
5556 encoding=_encodings['fs'], errors='replace')
5557 if f.startswith("."):
5559 f = os.path.join(parent, f)[len(filesdir) + 1:]
5560 writemsg(_("!!! File name contains invalid "
5561 "character(s) for encoding '%s': '%s'") \
5562 % (_encodings['fs'], f), noiselevel=-1)
5566 if f.startswith("."):
5568 f = os.path.join(parent, f)[len(filesdir) + 1:]
5569 file_type = mf.findFile(f)
5570 if file_type != "AUX" and not f.startswith("digest-"):
5571 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5572 os.path.join(filesdir, f), noiselevel=-1)
5577 # parse actionmap to spawn ebuild with the appropriate args
5578 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
5579 logfile=None, fd_pipes=None, returnpid=False):
5580 if not returnpid and \
5581 (alwaysdep or "noauto" not in mysettings.features):
5582 # process dependency first
5583 if "dep" in actionmap[mydo]:
5584 retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
5585 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
5586 fd_pipes=fd_pipes, returnpid=returnpid)
5590 eapi = mysettings["EAPI"]
5592 if mydo == "configure" and eapi in ("0", "1"):
5595 if mydo == "prepare" and eapi in ("0", "1"):
5598 if mydo == "pretend" and eapi in ("0", "1", "2"):
5601 kwargs = actionmap[mydo]["args"]
5602 mysettings["EBUILD_PHASE"] = mydo
5603 _doebuild_exit_status_unlink(
5604 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5607 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
5608 mysettings, debug=debug, logfile=logfile,
5609 fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
5611 mysettings["EBUILD_PHASE"] = ""
5616 msg = _doebuild_exit_status_check(mydo, mysettings)
5618 if phase_retval == os.EX_OK:
5620 from textwrap import wrap
5621 from portage.elog.messages import eerror
5622 for l in wrap(msg, 72):
5623 eerror(l, phase=mydo, key=mysettings.mycpv)
5625 _post_phase_userpriv_perms(mysettings)
5626 if mydo == "install":
5627 _check_build_log(mysettings)
5628 if phase_retval == os.EX_OK:
5629 _post_src_install_chost_fix(mysettings)
5630 phase_retval = _post_src_install_checks(mysettings)
5632 if mydo == "test" and phase_retval != os.EX_OK and \
5633 "test-fail-continue" in mysettings.features:
5634 phase_retval = os.EX_OK
5638 _post_phase_cmds = {
5642 "install_symlink_html_docs"],
5647 "preinst_selinux_labels",
5648 "preinst_suid_scan",
5652 "postinst_bsdflags"]
5655 def _post_phase_userpriv_perms(mysettings):
5656 if "userpriv" in mysettings.features and secpass >= 2:
5657 """ Privileged phases may have left files that need to be made
5658 writable to a less privileged user."""
5659 apply_recursive_permissions(mysettings["T"],
5660 uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
5661 filemode=0o60, filemask=0)
5663 def _post_src_install_checks(mysettings):
5664 _post_src_install_uid_fix(mysettings)
5665 global _post_phase_cmds
5666 retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
5667 if retval != os.EX_OK:
5668 writemsg(_("!!! install_qa_check failed; exiting.\n"),
5672 def _check_build_log(mysettings, out=None):
5674 Search the content of $PORTAGE_LOG_FILE if it exists
5675 and generate the following QA Notices when appropriate:
5677 * Automake "maintainer mode"
5679 * Unrecognized configure options
5681 logfile = mysettings.get("PORTAGE_LOG_FILE")
5685 f = codecs.open(_unicode_encode(logfile,
5686 encoding=_encodings['fs'], errors='strict'),
5687 mode='r', encoding=_encodings['content'], errors='replace')
5688 except EnvironmentError:
5691 am_maintainer_mode = []
5692 bash_command_not_found = []
5693 bash_command_not_found_re = re.compile(
5694 r'(.*): line (\d*): (.*): command not found$')
5695 command_not_found_exclude_re = re.compile(r'/configure: line ')
5696 helper_missing_file = []
5697 helper_missing_file_re = re.compile(
5698 r'^!!! (do|new).*: .* does not exist$')
5700 configure_opts_warn = []
5701 configure_opts_warn_re = re.compile(
5702 r'^configure: WARNING: [Uu]nrecognized options: ')
5704 # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
5707 # Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
5708 am_maintainer_mode_re = re.compile(r'/missing --run ')
5709 am_maintainer_mode_exclude_re = \
5710 re.compile(r'(/missing --run (autoheader|makeinfo)|^\s*Automake:\s)')
5712 make_jobserver_re = \
5713 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
5718 if am_maintainer_mode_re.search(line) is not None and \
5719 am_maintainer_mode_exclude_re.search(line) is None:
5720 am_maintainer_mode.append(line.rstrip("\n"))
5722 if bash_command_not_found_re.match(line) is not None and \
5723 command_not_found_exclude_re.search(line) is None:
5724 bash_command_not_found.append(line.rstrip("\n"))
5726 if helper_missing_file_re.match(line) is not None:
5727 helper_missing_file.append(line.rstrip("\n"))
5729 if configure_opts_warn_re.match(line) is not None:
5730 configure_opts_warn.append(line.rstrip("\n"))
5732 if make_jobserver_re.match(line) is not None:
5733 make_jobserver.append(line.rstrip("\n"))
5738 from portage.elog.messages import eqawarn
5739 def _eqawarn(lines):
5741 eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
5742 from textwrap import wrap
5745 if am_maintainer_mode:
5746 msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
5748 msg.extend("\t" + line for line in am_maintainer_mode)
5751 "If you patch Makefile.am, "
5752 "configure.in, or configure.ac then you "
5753 "should use autotools.eclass and "
5754 "eautomake or eautoreconf. Exceptions "
5755 "are limited to system packages "
5756 "for which it is impossible to run "
5757 "autotools during stage building. "
5758 "See http://www.gentoo.org/p"
5759 "roj/en/qa/autofailure.xml for more information."),
5763 if bash_command_not_found:
5764 msg = [_("QA Notice: command not found:")]
5766 msg.extend("\t" + line for line in bash_command_not_found)
5769 if helper_missing_file:
5770 msg = [_("QA Notice: file does not exist:")]
5772 msg.extend("\t" + line[4:] for line in helper_missing_file)
5775 if configure_opts_warn:
5776 msg = [_("QA Notice: Unrecognized configure options:")]
5778 msg.extend("\t" + line for line in configure_opts_warn)
5782 msg = [_("QA Notice: make jobserver unavailable:")]
5784 msg.extend("\t" + line for line in make_jobserver)
5787 def _post_src_install_chost_fix(settings):
5789 It's possible that the ebuild has changed the
5790 CHOST variable, so revert it to the initial
5793 chost = settings.get('CHOST')
5795 write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
5796 'build-info', 'CHOST'), chost + '\n')
5798 def _post_src_install_uid_fix(mysettings, out=None):
5800 Files in $D with user and group bits that match the "portage"
5801 user or group are automatically mapped to PORTAGE_INST_UID and
5802 PORTAGE_INST_GID if necessary. The chown system call may clear
5803 S_ISUID and S_ISGID bits, so those bits are restored if
5809 inst_uid = int(mysettings["PORTAGE_INST_UID"])
5810 inst_gid = int(mysettings["PORTAGE_INST_GID"])
5813 # Temporarily remove all of the flags in order to avoid EPERM errors.
5814 os.system("mtree -c -p %s -k flags > %s" % \
5815 (_shell_quote(mysettings["D"]),
5816 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5817 os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
5818 (_shell_quote(mysettings["D"]),))
5819 os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
5820 (_shell_quote(mysettings["D"]),))
5822 destdir = mysettings["D"]
5827 unicode_error = False
5829 counted_inodes = set()
5831 for parent, dirs, files in os.walk(destdir):
5833 parent = _unicode_decode(parent,
5834 encoding=_encodings['merge'], errors='strict')
5835 except UnicodeDecodeError:
5836 new_parent = _unicode_decode(parent,
5837 encoding=_encodings['merge'], errors='replace')
5838 new_parent = _unicode_encode(new_parent,
5839 encoding=_encodings['merge'], errors='backslashreplace')
5840 new_parent = _unicode_decode(new_parent,
5841 encoding=_encodings['merge'], errors='replace')
5842 os.rename(parent, new_parent)
5843 unicode_error = True
5844 unicode_errors.append(new_parent[len(destdir):])
5847 for fname in chain(dirs, files):
5849 fname = _unicode_decode(fname,
5850 encoding=_encodings['merge'], errors='strict')
5851 except UnicodeDecodeError:
5852 fpath = _os.path.join(
5853 parent.encode(_encodings['merge']), fname)
5854 new_fname = _unicode_decode(fname,
5855 encoding=_encodings['merge'], errors='replace')
5856 new_fname = _unicode_encode(new_fname,
5857 encoding=_encodings['merge'], errors='backslashreplace')
5858 new_fname = _unicode_decode(new_fname,
5859 encoding=_encodings['merge'], errors='replace')
5860 new_fpath = os.path.join(parent, new_fname)
5861 os.rename(fpath, new_fpath)
5862 unicode_error = True
5863 unicode_errors.append(new_fpath[len(destdir):])
5867 fpath = os.path.join(parent, fname)
5869 mystat = os.lstat(fpath)
5870 if stat.S_ISREG(mystat.st_mode) and \
5871 mystat.st_ino not in counted_inodes:
5872 counted_inodes.add(mystat.st_ino)
5873 size += mystat.st_size
5874 if mystat.st_uid != portage_uid and \
5875 mystat.st_gid != portage_gid:
5879 if mystat.st_uid == portage_uid:
5881 if mystat.st_gid == portage_gid:
5883 apply_secpass_permissions(
5884 _unicode_encode(fpath, encoding=_encodings['merge']),
5885 uid=myuid, gid=mygid,
5886 mode=mystat.st_mode, stat_cached=mystat,
5892 if not unicode_error:
5896 from portage.elog.messages import eerror
5897 for l in _merge_unicode_error(unicode_errors):
5898 eerror(l, phase='install', key=mysettings.mycpv, out=out)
5900 open(_unicode_encode(os.path.join(mysettings['PORTAGE_BUILDDIR'],
5901 'build-info', 'SIZE')), 'w').write(str(size) + '\n')
5904 # Restore all of the flags saved above.
5905 os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
5906 (_shell_quote(mysettings["D"]),
5907 _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5909 def _merge_unicode_error(errors):
5910 from textwrap import wrap
5913 msg = _("This package installs one or more file names containing "
5914 "characters that do not match your current locale "
5915 "settings. The current setting for filesystem encoding is '%s'.") \
5916 % _encodings['merge']
5917 lines.extend(wrap(msg, 72))
5921 lines.extend("\t" + x for x in errors)
5924 if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
5925 msg = _("For best results, UTF-8 encoding is recommended. See "
5926 "the Gentoo Linux Localization Guide for instructions "
5927 "about how to configure your locale for UTF-8 encoding:")
5928 lines.extend(wrap(msg, 72))
5930 lines.append("\t" + \
5931 "http://www.gentoo.org/doc/en/guide-localization.xml")
5936 def _post_pkg_preinst_cmd(mysettings):
5938 Post phase logic and tasks that have been factored out of
5939 ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
5940 can be used to wipe out any gmon.out files created during
5941 previous functions (in case any tools were built with -pg
5945 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5946 misc_sh_binary = os.path.join(portage_bin_path,
5947 os.path.basename(MISC_SH_BINARY))
5949 mysettings["EBUILD_PHASE"] = ""
5950 global _post_phase_cmds
5951 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
5955 def _post_pkg_postinst_cmd(mysettings):
5957 Post phase logic and tasks that have been factored out of
5961 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5962 misc_sh_binary = os.path.join(portage_bin_path,
5963 os.path.basename(MISC_SH_BINARY))
5965 mysettings["EBUILD_PHASE"] = ""
5966 global _post_phase_cmds
5967 myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
5971 def _spawn_misc_sh(mysettings, commands, **kwargs):
5973 @param mysettings: the ebuild config
5974 @type mysettings: config
5975 @param commands: a list of function names to call in misc-functions.sh
5976 @type commands: list
5978 @returns: the return value from the spawn() call
5981 # Note: PORTAGE_BIN_PATH may differ from the global
5982 # constant when portage is reinstalling itself.
5983 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5984 misc_sh_binary = os.path.join(portage_bin_path,
5985 os.path.basename(MISC_SH_BINARY))
5986 mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
5987 _doebuild_exit_status_unlink(
5988 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5989 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5990 logfile = mysettings.get("PORTAGE_LOG_FILE")
5991 mydo = mysettings["EBUILD_PHASE"]
5993 rval = spawn(mycommand, mysettings, debug=debug,
5994 logfile=logfile, **kwargs)
5998 msg = _doebuild_exit_status_check(mydo, mysettings)
6000 if rval == os.EX_OK:
6002 from textwrap import wrap
6003 from portage.elog.messages import eerror
6004 for l in wrap(msg, 72):
6005 eerror(l, phase=mydo, key=mysettings.mycpv)
6009 _testing_eapis = frozenset()
6010 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
6012 def _eapi_is_deprecated(eapi):
6013 return eapi in _deprecated_eapis
6015 def eapi_is_supported(eapi):
6016 eapi = str(eapi).strip()
6018 if _eapi_is_deprecated(eapi):
6021 if eapi in _testing_eapis:
6030 return eapi <= portage.const.EAPI
6032 # Generally, it's best not to assume that cache entries for unsupported EAPIs
6033 # can be validated. However, the current package manager specification does not
6034 # guarantee that the EAPI can be parsed without sourcing the ebuild, so
6035 # it's too costly to discard existing cache entries for unsupported EAPIs.
6036 # Therefore, by default, assume that cache entries for unsupported EAPIs can be
6037 # validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
6038 # since the EAPI can be determined without the incurring the cost of sourcing
6040 _validate_cache_for_unsupported_eapis = True
6042 _parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
6043 _parse_eapi_ebuild_head_max_lines = 30
6045 def _parse_eapi_ebuild_head(f):
6048 m = _parse_eapi_ebuild_head_re.match(line)
6050 return m.group(1).strip()
6052 if count >= _parse_eapi_ebuild_head_max_lines:
6056 # True when FEATURES=parse-eapi-glep-55 is enabled.
6057 _glep_55_enabled = False
6059 _split_ebuild_name_glep55_re = re.compile(r'^(.*)\.ebuild(-([^.]+))?$')
6061 def _split_ebuild_name_glep55(name):
6063 @returns: (pkg-ver-rev, eapi)
6065 m = _split_ebuild_name_glep55_re.match(name)
6068 return (m.group(1), m.group(3))
6070 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
6072 ebuild_path = os.path.abspath(myebuild)
6073 pkg_dir = os.path.dirname(ebuild_path)
6075 if "CATEGORY" in mysettings.configdict["pkg"]:
6076 cat = mysettings.configdict["pkg"]["CATEGORY"]
6078 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
6081 if 'parse-eapi-glep-55' in mysettings.features:
6082 mypv, eapi = portage._split_ebuild_name_glep55(
6083 os.path.basename(myebuild))
6085 mypv = os.path.basename(ebuild_path)[:-7]
6087 mycpv = cat+"/"+mypv
6088 mysplit = versions._pkgsplit(mypv)
6090 raise portage.exception.IncorrectParameter(
6091 _("Invalid ebuild path: '%s'") % myebuild)
6093 # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
6094 # so that the caller can override it.
6095 tmpdir = mysettings["PORTAGE_TMPDIR"]
6097 if mydo == 'depend':
6098 if mycpv != mysettings.mycpv:
6099 # Don't pass in mydbapi here since the resulting aux_get
6100 # call would lead to infinite 'depend' phase recursion.
6101 mysettings.setcpv(mycpv)
6103 # If IUSE isn't in configdict['pkg'], it means that setcpv()
6104 # hasn't been called with the mydb argument, so we have to
6105 # call it here (portage code always calls setcpv properly,
6106 # but api consumers might not).
6107 if mycpv != mysettings.mycpv or \
6108 'IUSE' not in mysettings.configdict['pkg']:
6109 # Reload env.d variables and reset any previous settings.
6112 mysettings.setcpv(mycpv, mydb=mydbapi)
6114 # config.reset() might have reverted a change made by the caller,
6115 # so restore it to it's original value.
6116 mysettings["PORTAGE_TMPDIR"] = tmpdir
6118 mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
6119 mysettings["EBUILD_PHASE"] = mydo
6121 mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
6123 # We are disabling user-specific bashrc files.
6124 mysettings["BASH_ENV"] = INVALID_ENV_FILE
6126 if debug: # Otherwise it overrides emerge's settings.
6127 # We have no other way to set debug... debug can't be passed in
6128 # due to how it's coded... Don't overwrite this so we can use it.
6129 mysettings["PORTAGE_DEBUG"] = "1"
6131 mysettings["ROOT"] = myroot
6132 mysettings["STARTDIR"] = getcwd()
6133 mysettings["EBUILD"] = ebuild_path
6134 mysettings["O"] = pkg_dir
6135 mysettings.configdict["pkg"]["CATEGORY"] = cat
6136 mysettings["FILESDIR"] = pkg_dir+"/files"
6137 mysettings["PF"] = mypv
6139 if hasattr(mydbapi, '_repo_info'):
6140 mytree = os.path.dirname(os.path.dirname(pkg_dir))
6141 repo_info = mydbapi._repo_info[mytree]
6142 mysettings['PORTDIR'] = repo_info.portdir
6143 mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
6145 mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
6146 mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
6147 mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
6149 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
6150 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
6152 mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
6153 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
6154 mysettings["PN"] = mysplit[0]
6155 mysettings["PV"] = mysplit[1]
6156 mysettings["PR"] = mysplit[2]
6158 if portage.util.noiselimit < 0:
6159 mysettings["PORTAGE_QUIET"] = "1"
6161 if mydo == 'depend' and \
6162 'EAPI' not in mysettings.configdict['pkg']:
6164 if eapi is not None:
6165 # From parse-eapi-glep-55 above.
6167 elif 'parse-eapi-ebuild-head' in mysettings.features:
6168 eapi = _parse_eapi_ebuild_head(
6169 codecs.open(_unicode_encode(ebuild_path,
6170 encoding=_encodings['fs'], errors='strict'),
6171 mode='r', encoding=_encodings['content'], errors='replace'))
6173 if eapi is not None:
6174 if not eapi_is_supported(eapi):
6175 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
6176 mysettings.configdict['pkg']['EAPI'] = eapi
6178 if mydo != "depend":
6179 # Metadata vars such as EAPI and RESTRICT are
6180 # set by the above config.setcpv() call.
6181 eapi = mysettings["EAPI"]
6182 if not eapi_is_supported(eapi):
6183 # can't do anything with this.
6184 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
6186 if mysplit[2] == "r0":
6187 mysettings["PVR"]=mysplit[1]
6189 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
6191 if "PATH" in mysettings:
6192 mysplit=mysettings["PATH"].split(":")
6195 # Note: PORTAGE_BIN_PATH may differ from the global constant
6196 # when portage is reinstalling itself.
6197 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6198 if portage_bin_path not in mysplit:
6199 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
6201 # Sandbox needs cannonical paths.
6202 mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
6203 mysettings["PORTAGE_TMPDIR"])
6204 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
6205 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
6207 # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
6208 # locations in order to prevent interference.
6209 if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
6210 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6211 mysettings["PKG_TMPDIR"],
6212 mysettings["CATEGORY"], mysettings["PF"])
6214 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6215 mysettings["BUILD_PREFIX"],
6216 mysettings["CATEGORY"], mysettings["PF"])
6218 mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
6219 mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
6220 mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
6221 mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
6223 mysettings["PORTAGE_BASHRC"] = os.path.join(
6224 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
6225 mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
6226 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
6228 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
6229 if eapi not in ('0', '1', '2'):
6230 # Discard KV for EAPIs that don't support it. Cache KV is restored
6231 # from the backupenv whenever config.reset() is called.
6232 mysettings.pop('KV', None)
6233 elif mydo != 'depend' and 'KV' not in mysettings and \
6234 mydo in ('compile', 'config', 'configure', 'info',
6235 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
6236 'prepare', 'prerm', 'setup', 'test', 'unpack'):
6237 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
6239 # Regular source tree
6240 mysettings["KV"]=mykv
6243 mysettings.backup_changes("KV")
6245 # Allow color.map to control colors associated with einfo, ewarn, etc...
6247 for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
6248 mycolors.append("%s=$'%s'" % \
6249 (c, portage.output.style_to_ansi_code(c)))
6250 mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
6252 def prepare_build_dirs(myroot, mysettings, cleanup):
6254 clean_dirs = [mysettings["HOME"]]
6256 # We enable cleanup when we want to make sure old cruft (such as the old
6257 # environment) doesn't interfere with the current phase.
6259 clean_dirs.append(mysettings["T"])
6261 for clean_dir in clean_dirs:
6263 shutil.rmtree(clean_dir)
6264 except OSError as oe:
6265 if errno.ENOENT == oe.errno:
6267 elif errno.EPERM == oe.errno:
6268 writemsg("%s\n" % oe, noiselevel=-1)
6269 writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
6270 clean_dir, noiselevel=-1)
6275 def makedirs(dir_path):
6277 os.makedirs(dir_path)
6278 except OSError as oe:
6279 if errno.EEXIST == oe.errno:
6281 elif errno.EPERM == oe.errno:
6282 writemsg("%s\n" % oe, noiselevel=-1)
6283 writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
6284 dir_path, noiselevel=-1)
6290 mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
6292 mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
6293 mydirs.append(os.path.dirname(mydirs[-1]))
6296 for mydir in mydirs:
6297 portage.util.ensure_dirs(mydir)
6298 portage.util.apply_secpass_permissions(mydir,
6299 gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
6300 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
6301 """These directories don't necessarily need to be group writable.
6302 However, the setup phase is commonly run as a privileged user prior
6303 to the other phases being run by an unprivileged user. Currently,
6304 we use the portage group to ensure that the unprivleged user still
6305 has write access to these directories in any case."""
6306 portage.util.ensure_dirs(mysettings[dir_key], mode=0o775)
6307 portage.util.apply_secpass_permissions(mysettings[dir_key],
6308 uid=portage_uid, gid=portage_gid)
6309 except portage.exception.PermissionDenied as e:
6310 writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
6312 except portage.exception.OperationNotPermitted as e:
6313 writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
6315 except portage.exception.FileNotFound as e:
6316 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
6319 _prepare_workdir(mysettings)
6320 if mysettings.get('EBUILD_PHASE') != 'fetch':
6321 # Avoid spurious permissions adjustments when fetching with
6322 # a temporary PORTAGE_TMPDIR setting (for fetchonly).
6323 _prepare_features_dirs(mysettings)
6325 def _adjust_perms_msg(settings, msg):
6328 writemsg(msg, noiselevel=-1)
6330 background = settings.get("PORTAGE_BACKGROUND") == "1"
6331 log_path = settings.get("PORTAGE_LOG_FILE")
6334 if background and log_path is not None:
6336 log_file = codecs.open(_unicode_encode(log_path,
6337 encoding=_encodings['fs'], errors='strict'),
6338 mode='a', encoding=_encodings['content'], errors='replace')
6344 log_file.write(_unicode_decode(msg))
6350 if log_file is not None:
6353 def _prepare_features_dirs(mysettings):
6357 "path_dir": "/usr/lib/ccache/bin",
6358 "basedir_var":"CCACHE_DIR",
6359 "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
6360 "always_recurse":False},
6362 "path_dir": "/usr/lib/distcc/bin",
6363 "basedir_var":"DISTCC_DIR",
6364 "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
6365 "subdirs":("lock", "state"),
6366 "always_recurse":True}
6371 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
6372 from portage.data import secpass
6373 droppriv = secpass >= 2 and \
6374 "userpriv" in mysettings.features and \
6375 "userpriv" not in restrict
6376 for myfeature, kwargs in features_dirs.items():
6377 if myfeature in mysettings.features:
6379 basedir = mysettings.get(kwargs["basedir_var"])
6380 if basedir is None or not basedir.strip():
6381 basedir = kwargs["default_dir"]
6382 mysettings[kwargs["basedir_var"]] = basedir
6384 path_dir = kwargs["path_dir"]
6385 if not os.path.isdir(path_dir):
6386 raise portage.exception.DirectoryNotFound(path_dir)
6388 mydirs = [mysettings[kwargs["basedir_var"]]]
6389 if "subdirs" in kwargs:
6390 for subdir in kwargs["subdirs"]:
6391 mydirs.append(os.path.join(basedir, subdir))
6392 for mydir in mydirs:
6393 modified = portage.util.ensure_dirs(mydir)
6394 # Generally, we only want to apply permissions for
6395 # initial creation. Otherwise, we don't know exactly what
6396 # permissions the user wants, so should leave them as-is.
6397 droppriv_fix = False
6400 if st.st_gid != portage_gid or \
6401 not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
6403 if not droppriv_fix:
6404 # Check permissions of files in the directory.
6405 for filename in os.listdir(mydir):
6407 subdir_st = os.lstat(
6408 os.path.join(mydir, filename))
6411 if subdir_st.st_gid != portage_gid or \
6412 ((stat.S_ISDIR(subdir_st.st_mode) and \
6413 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
6418 _adjust_perms_msg(mysettings,
6419 colorize("WARN", " * ") + \
6420 _("Adjusting permissions "
6421 "for FEATURES=userpriv: '%s'\n") % mydir)
6423 _adjust_perms_msg(mysettings,
6424 colorize("WARN", " * ") + \
6425 _("Adjusting permissions "
6426 "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
6428 if modified or kwargs["always_recurse"] or droppriv_fix:
6430 raise # The feature is disabled if a single error
6431 # occurs during permissions adjustment.
6432 if not apply_recursive_permissions(mydir,
6433 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
6434 filemode=filemode, filemask=modemask, onerror=onerror):
6435 raise portage.exception.OperationNotPermitted(
6436 _("Failed to apply recursive permissions for the portage group."))
6438 except portage.exception.DirectoryNotFound as e:
6440 writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
6441 (e,), noiselevel=-1)
6442 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6445 except portage.exception.PortageException as e:
6447 writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
6448 writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
6449 (kwargs["basedir_var"], basedir), noiselevel=-1)
6450 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6454 mysettings.features.remove(myfeature)
6455 mysettings['FEATURES'] = ' '.join(sorted(mysettings.features))
6458 def _prepare_workdir(mysettings):
6459 workdir_mode = 0o700
6461 mode = mysettings["PORTAGE_WORKDIR_MODE"]
6463 parsed_mode = int(mode, 8)
6468 if parsed_mode & 0o7777 != parsed_mode:
6469 raise ValueError("Invalid file mode: %s" % mode)
6471 workdir_mode = parsed_mode
6472 except KeyError as e:
6473 writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
6474 except ValueError as e:
6476 writemsg("%s\n" % e)
6477 writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
6478 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
6479 mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
6481 apply_secpass_permissions(mysettings["WORKDIR"],
6482 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
6483 except portage.exception.FileNotFound:
6484 pass # ebuild.sh will create it
6486 if mysettings.get("PORT_LOGDIR", "") == "":
6487 while "PORT_LOGDIR" in mysettings:
6488 del mysettings["PORT_LOGDIR"]
6489 if "PORT_LOGDIR" in mysettings:
6491 modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
6493 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
6494 uid=portage_uid, gid=portage_gid, mode=0o2770)
6495 except portage.exception.PortageException as e:
6496 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6497 writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
6498 mysettings["PORT_LOGDIR"], noiselevel=-1)
6499 writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
6500 while "PORT_LOGDIR" in mysettings:
6501 del mysettings["PORT_LOGDIR"]
6502 if "PORT_LOGDIR" in mysettings and \
6503 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
6504 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
6505 if not os.path.exists(logid_path):
6506 open(_unicode_encode(logid_path), 'w')
6507 logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
6508 time.gmtime(os.stat(logid_path).st_mtime)),
6509 encoding=_encodings['content'], errors='replace')
6511 if "split-log" in mysettings.features:
6512 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6513 mysettings["PORT_LOGDIR"], "build", "%s/%s:%s.log" % \
6514 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6516 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6517 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
6518 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6520 util.ensure_dirs(os.path.dirname(mysettings["PORTAGE_LOG_FILE"]))
6523 # NOTE: When sesandbox is enabled, the local SELinux security policies
6524 # may not allow output to be piped out of the sesandbox domain. The
6525 # current policy will allow it to work when a pty is available, but
6526 # not through a normal pipe. See bug #162404.
6527 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6528 mysettings["T"], "build.log")
6530 def _doebuild_exit_status_check(mydo, settings):
6532 Returns an error string if the shell appeared
6533 to exit unsuccessfully, None otherwise.
6535 exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
6536 if not exit_status_file or \
6537 os.path.exists(exit_status_file):
6539 msg = _("The ebuild phase '%s' has exited "
6540 "unexpectedly. This type of behavior "
6541 "is known to be triggered "
6542 "by things such as failed variable "
6543 "assignments (bug #190128) or bad substitution "
6544 "errors (bug #200313). Normally, before exiting, bash should "
6545 "have displayed an error message above. If bash did not "
6546 "produce an error message above, it's possible "
6547 "that the ebuild has called `exit` when it "
6548 "should have called `die` instead. This behavior may also "
6549 "be triggered by a corrupt bash binary or a hardware "
6550 "problem such as memory or cpu malfunction. If the problem is not "
6551 "reproducible or it appears to occur randomly, then it is likely "
6552 "to be triggered by a hardware problem. "
6553 "If you suspect a hardware problem then you should "
6554 "try some basic hardware diagnostics such as memtest. "
6555 "Please do not report this as a bug unless it is consistently "
6556 "reproducible and you are sure that your bash binary and hardware "
6557 "are functioning properly.") % mydo
6560 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
6561 msg = _doebuild_exit_status_check(mydo, settings)
6563 if retval == os.EX_OK:
6565 from textwrap import wrap
6566 from portage.elog.messages import eerror
6567 for l in wrap(msg, 72):
6568 eerror(l, phase=mydo, key=settings.mycpv)
6571 def _doebuild_exit_status_unlink(exit_status_file):
6573 Double check to make sure it really doesn't exist
6574 and raise an OSError if it still does (it shouldn't).
6575 OSError if necessary.
6577 if not exit_status_file:
6580 os.unlink(exit_status_file)
6583 if os.path.exists(exit_status_file):
6584 os.unlink(exit_status_file)
6586 _doebuild_manifest_exempt_depend = 0
6587 _doebuild_manifest_cache = None
6588 _doebuild_broken_ebuilds = set()
6589 _doebuild_broken_manifests = set()
6591 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
6592 fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
6593 mydbapi=None, vartree=None, prev_mtimes=None,
6594 fd_pipes=None, returnpid=False):
6597 Wrapper function that invokes specific ebuild phases through the spawning
6600 @param myebuild: name of the ebuild to invoke the phase on (CPV)
6601 @type myebuild: String
6602 @param mydo: Phase to run
6604 @param myroot: $ROOT (usually '/', see man make.conf)
6605 @type myroot: String
6606 @param mysettings: Portage Configuration
6607 @type mysettings: instance of portage.config
6608 @param debug: Turns on various debug information (eg, debug for spawn)
6609 @type debug: Boolean
6610 @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
6611 @type listonly: Boolean
6612 @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
6613 @type fetchonly: Boolean
6614 @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
6615 @type cleanup: Boolean
6616 @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
6617 @type dbkey: Dict or String
6618 @param use_cache: Enables the cache
6619 @type use_cache: Boolean
6620 @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
6621 @type fetchall: Boolean
6622 @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
6624 @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
6625 @type mydbapi: portdbapi instance
6626 @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
6627 @type vartree: vartree instance
6628 @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
6629 @type prev_mtimes: dictionary
6630 @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
6632 @type fd_pipes: Dictionary
6633 @param returnpid: Return a list of process IDs for a successful spawn, or
6634 an integer value if spawn is unsuccessful. NOTE: This requires the
6635 caller clean up all returned PIDs.
6636 @type returnpid: Boolean
6642 Most errors have an accompanying error message.
6644 listonly and fetchonly are only really necessary for operations involving 'fetch'
6645 prev_mtimes are only necessary for merge operations.
6646 Other variables may not be strictly required, many have defaults that are set inside of doebuild.
6651 writemsg("Warning: tree not specified to doebuild\n")
6655 # chunked out deps for each phase, so that ebuild binary can use it
6656 # to collapse targets down.
6659 "unpack": ["setup"],
6660 "prepare": ["unpack"],
6661 "configure": ["prepare"],
6662 "compile":["configure"],
6663 "test": ["compile"],
6666 "package":["install"],
6670 mydbapi = db[myroot][tree].dbapi
6672 if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
6673 vartree = db[myroot]["vartree"]
6675 features = mysettings.features
6676 noauto = "noauto" in features
6677 from portage.data import secpass
6679 clean_phases = ("clean", "cleanrm")
6680 validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
6681 "config", "info", "setup", "depend", "pretend",
6682 "fetch", "fetchall", "digest",
6683 "unpack", "prepare", "configure", "compile", "test",
6684 "install", "rpm", "qmerge", "merge",
6685 "package","unmerge", "manifest"]
6687 if mydo not in validcommands:
6688 validcommands.sort()
6689 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
6691 for vcount in range(len(validcommands)):
6693 writemsg("\n!!! ", noiselevel=-1)
6694 writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
6695 writemsg("\n", noiselevel=-1)
6698 if mydo == "fetchall":
6702 parallel_fetchonly = mydo in ("fetch", "fetchall") and \
6703 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
6705 if mydo not in clean_phases and not os.path.exists(myebuild):
6706 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
6710 global _doebuild_manifest_exempt_depend
6712 if "strict" in features and \
6713 "digest" not in features and \
6714 tree == "porttree" and \
6715 mydo not in ("digest", "manifest", "help") and \
6716 not _doebuild_manifest_exempt_depend:
6717 # Always verify the ebuild checksums before executing it.
6718 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
6719 _doebuild_broken_ebuilds
6721 if myebuild in _doebuild_broken_ebuilds:
6724 pkgdir = os.path.dirname(myebuild)
6725 manifest_path = os.path.join(pkgdir, "Manifest")
6727 # Avoid checking the same Manifest several times in a row during a
6728 # regen with an empty cache.
6729 if _doebuild_manifest_cache is None or \
6730 _doebuild_manifest_cache.getFullname() != manifest_path:
6731 _doebuild_manifest_cache = None
6732 if not os.path.exists(manifest_path):
6733 out = portage.output.EOutput()
6734 out.eerror(_("Manifest not found for '%s'") % (myebuild,))
6735 _doebuild_broken_ebuilds.add(myebuild)
6737 mf = Manifest(pkgdir, mysettings["DISTDIR"])
6740 mf = _doebuild_manifest_cache
6743 mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
6745 out = portage.output.EOutput()
6746 out.eerror(_("Missing digest for '%s'") % (myebuild,))
6747 _doebuild_broken_ebuilds.add(myebuild)
6749 except portage.exception.FileNotFound:
6750 out = portage.output.EOutput()
6751 out.eerror(_("A file listed in the Manifest "
6752 "could not be found: '%s'") % (myebuild,))
6753 _doebuild_broken_ebuilds.add(myebuild)
6755 except portage.exception.DigestException as e:
6756 out = portage.output.EOutput()
6757 out.eerror(_("Digest verification failed:"))
6758 out.eerror("%s" % e.value[0])
6759 out.eerror(_("Reason: %s") % e.value[1])
6760 out.eerror(_("Got: %s") % e.value[2])
6761 out.eerror(_("Expected: %s") % e.value[3])
6762 _doebuild_broken_ebuilds.add(myebuild)
6765 if mf.getFullname() in _doebuild_broken_manifests:
6768 if mf is not _doebuild_manifest_cache:
6770 # Make sure that all of the ebuilds are
6771 # actually listed in the Manifest.
6772 glep55 = 'parse-eapi-glep-55' in mysettings.features
6773 for f in os.listdir(pkgdir):
6776 pf, eapi = _split_ebuild_name_glep55(f)
6777 elif f[-7:] == '.ebuild':
6779 if pf is not None and not mf.hasFile("EBUILD", f):
6780 f = os.path.join(pkgdir, f)
6781 if f not in _doebuild_broken_ebuilds:
6782 out = portage.output.EOutput()
6783 out.eerror(_("A file is not listed in the "
6784 "Manifest: '%s'") % (f,))
6785 _doebuild_broken_manifests.add(manifest_path)
6788 # Only cache it if the above stray files test succeeds.
6789 _doebuild_manifest_cache = mf
6791 def exit_status_check(retval):
6792 msg = _doebuild_exit_status_check(mydo, mysettings)
6794 if retval == os.EX_OK:
6796 from textwrap import wrap
6797 from portage.elog.messages import eerror
6798 for l in wrap(msg, 72):
6799 eerror(l, phase=mydo, key=mysettings.mycpv)
6802 # Note: PORTAGE_BIN_PATH may differ from the global
6803 # constant when portage is reinstalling itself.
6804 portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6805 ebuild_sh_binary = os.path.join(portage_bin_path,
6806 os.path.basename(EBUILD_SH_BINARY))
6807 misc_sh_binary = os.path.join(portage_bin_path,
6808 os.path.basename(MISC_SH_BINARY))
6811 builddir_lock = None
6816 if mydo in ("digest", "manifest", "help"):
6817 # Temporarily exempt the depend phase from manifest checks, in case
6818 # aux_get calls trigger cache generation.
6819 _doebuild_manifest_exempt_depend += 1
6821 # If we don't need much space and we don't need a constant location,
6822 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
6823 # so that there's no need for locking and it can be used even if the
6824 # user isn't in the portage group.
6825 if mydo in ("info",):
6826 from tempfile import mkdtemp
6828 tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
6829 mysettings["PORTAGE_TMPDIR"] = tmpdir
6831 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
6834 if mydo in clean_phases:
6835 retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
6836 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
6837 logfile=None, returnpid=returnpid)
6840 restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
6841 # get possible slot information from the deps file
6842 if mydo == "depend":
6843 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
6844 droppriv = "userpriv" in mysettings.features
6846 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6847 mysettings, fd_pipes=fd_pipes, returnpid=True,
6850 elif isinstance(dbkey, dict):
6851 mysettings["dbkey"] = ""
6854 0:sys.stdin.fileno(),
6855 1:sys.stdout.fileno(),
6856 2:sys.stderr.fileno(),
6858 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6860 fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
6861 os.close(pw) # belongs exclusively to the child process now
6862 f = os.fdopen(pr, 'rb')
6863 for k, v in zip(auxdbkeys,
6864 (_unicode_decode(line).rstrip('\n') for line in f)):
6867 retval = os.waitpid(mypids[0], 0)[1]
6868 portage.process.spawned_pids.remove(mypids[0])
6869 # If it got a signal, return the signal that was sent, but
6870 # shift in order to distinguish it from a return value. (just
6871 # like portage.process.spawn() would do).
6873 retval = (retval & 0xff) << 8
6875 # Otherwise, return its exit code.
6876 retval = retval >> 8
6877 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
6878 # Don't trust bash's returncode if the
6879 # number of lines is incorrect.
6883 mysettings["dbkey"] = dbkey
6885 mysettings["dbkey"] = \
6886 os.path.join(mysettings.depcachedir, "aux_db_key_temp")
6888 return spawn(_shell_quote(ebuild_sh_binary) + " depend",
6892 # Validate dependency metadata here to ensure that ebuilds with invalid
6893 # data are never installed via the ebuild command. Don't bother when
6894 # returnpid == True since there's no need to do this every time emerge
6897 rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
6898 if rval != os.EX_OK:
6901 if "PORTAGE_TMPDIR" not in mysettings or \
6902 not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
6903 writemsg(_("The directory specified in your "
6904 "PORTAGE_TMPDIR variable, '%s',\n"
6905 "does not exist. Please create this directory or "
6906 "correct your PORTAGE_TMPDIR setting.\n") % mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
6909 # as some people use a separate PORTAGE_TMPDIR mount
6910 # we prefer that as the checks below would otherwise be pointless
6912 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
6913 checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
6915 checkdir = mysettings["PORTAGE_TMPDIR"]
6917 if not os.access(checkdir, os.W_OK):
6918 writemsg(_("%s is not writable.\n"
6919 "Likely cause is that you've mounted it as readonly.\n") % checkdir,
6923 from tempfile import NamedTemporaryFile
6924 fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
6925 os.chmod(fd.name, 0o755)
6926 if not os.access(fd.name, os.X_OK):
6927 writemsg(_("Can not execute files in %s\n"
6928 "Likely cause is that you've mounted it with one of the\n"
6929 "following mount options: 'noexec', 'user', 'users'\n\n"
6930 "Please make sure that portage can execute files in this directory.\n") % checkdir,
6937 if mydo == "unmerge":
6938 return unmerge(mysettings["CATEGORY"],
6939 mysettings["PF"], myroot, mysettings, vartree=vartree)
6941 # Build directory creation isn't required for any of these.
6942 # In the fetch phase, the directory is needed only for RESTRICT=fetch
6943 # in order to satisfy the sane $PWD requirement (from bug #239560)
6944 # when pkg_nofetch is spawned.
6945 have_build_dirs = False
6946 if not parallel_fetchonly and \
6947 mydo not in ('digest', 'help', 'manifest') and \
6948 not (mydo == 'fetch' and 'fetch' not in restrict):
6949 mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
6952 have_build_dirs = True
6954 # emerge handles logging externally
6956 # PORTAGE_LOG_FILE is set by the
6957 # above prepare_build_dirs() call.
6958 logfile = mysettings.get("PORTAGE_LOG_FILE")
6961 env_file = os.path.join(mysettings["T"], "environment")
6965 env_stat = os.stat(env_file)
6966 except OSError as e:
6967 if e.errno != errno.ENOENT:
6971 saved_env = os.path.join(
6972 os.path.dirname(myebuild), "environment.bz2")
6973 if not os.path.isfile(saved_env):
6977 "bzip2 -dc %s > %s" % \
6978 (_shell_quote(saved_env),
6979 _shell_quote(env_file)))
6981 env_stat = os.stat(env_file)
6982 except OSError as e:
6983 if e.errno != errno.ENOENT:
6986 if os.WIFEXITED(retval) and \
6987 os.WEXITSTATUS(retval) == os.EX_OK and \
6988 env_stat and env_stat.st_size > 0:
6989 # This is a signal to ebuild.sh, so that it knows to filter
6990 # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
6991 # would be preserved between normal phases.
6992 open(_unicode_encode(env_file + '.raw'), 'w')
6994 writemsg(_("!!! Error extracting saved "
6995 "environment: '%s'\n") % \
6996 saved_env, noiselevel=-1)
6999 except OSError as e:
7000 if e.errno != errno.ENOENT:
7007 for var in ("ARCH", ):
7008 value = mysettings.get(var)
7009 if value and value.strip():
7011 msg = _("%(var)s is not set... "
7012 "Are you missing the '%(configroot)setc/make.profile' symlink? "
7013 "Is the symlink correct? "
7014 "Is your portage tree complete?") % \
7015 {"var": var, "configroot": mysettings["PORTAGE_CONFIGROOT"]}
7016 from portage.elog.messages import eerror
7017 from textwrap import wrap
7018 for line in wrap(msg, 70):
7019 eerror(line, phase="setup", key=mysettings.mycpv)
7020 from portage.elog import elog_process
7021 elog_process(mysettings.mycpv, mysettings)
7023 del env_file, env_stat, saved_env
7024 _doebuild_exit_status_unlink(
7025 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7027 mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
7029 # if any of these are being called, handle them -- running them out of
7030 # the sandbox -- and stop now.
7032 return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
7033 mysettings, debug=debug, free=1, logfile=logfile)
7034 elif mydo == "setup":
7036 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
7037 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
7038 returnpid=returnpid)
7041 retval = exit_status_check(retval)
7043 """ Privileged phases may have left files that need to be made
7044 writable to a less privileged user."""
7045 apply_recursive_permissions(mysettings["T"],
7046 uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
7047 filemode=0o60, filemask=0)
7049 elif mydo == "preinst":
7050 phase_retval = spawn(
7051 _shell_quote(ebuild_sh_binary) + " " + mydo,
7052 mysettings, debug=debug, free=1, logfile=logfile,
7053 fd_pipes=fd_pipes, returnpid=returnpid)
7058 phase_retval = exit_status_check(phase_retval)
7059 if phase_retval == os.EX_OK:
7060 _doebuild_exit_status_unlink(
7061 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7062 mysettings.pop("EBUILD_PHASE", None)
7063 phase_retval = spawn(
7064 " ".join(_post_pkg_preinst_cmd(mysettings)),
7065 mysettings, debug=debug, free=1, logfile=logfile)
7066 phase_retval = exit_status_check(phase_retval)
7067 if phase_retval != os.EX_OK:
7068 writemsg(_("!!! post preinst failed; exiting.\n"),
7071 elif mydo == "postinst":
7072 phase_retval = spawn(
7073 _shell_quote(ebuild_sh_binary) + " " + mydo,
7074 mysettings, debug=debug, free=1, logfile=logfile,
7075 fd_pipes=fd_pipes, returnpid=returnpid)
7080 phase_retval = exit_status_check(phase_retval)
7081 if phase_retval == os.EX_OK:
7082 _doebuild_exit_status_unlink(
7083 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7084 mysettings.pop("EBUILD_PHASE", None)
7085 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
7086 mysettings, debug=debug, free=1, logfile=logfile)
7087 phase_retval = exit_status_check(phase_retval)
7088 if phase_retval != os.EX_OK:
7089 writemsg(_("!!! post postinst failed; exiting.\n"),
7092 elif mydo in ("prerm", "postrm", "config", "info"):
7094 _shell_quote(ebuild_sh_binary) + " " + mydo,
7095 mysettings, debug=debug, free=1, logfile=logfile,
7096 fd_pipes=fd_pipes, returnpid=returnpid)
7101 retval = exit_status_check(retval)
7104 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
7106 emerge_skip_distfiles = returnpid
7107 emerge_skip_digest = returnpid
7108 # Only try and fetch the files if we are going to need them ...
7109 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
7110 # unpack compile install`, we will try and fetch 4 times :/
7111 need_distfiles = not emerge_skip_distfiles and \
7112 (mydo in ("fetch", "unpack") or \
7113 mydo not in ("digest", "manifest") and "noauto" not in features)
7114 alist = mysettings.configdict["pkg"].get("A")
7115 aalist = mysettings.configdict["pkg"].get("AA")
7116 if need_distfiles or alist is None or aalist is None:
7117 # Make sure we get the correct tree in case there are overlays.
7118 mytree = os.path.realpath(
7119 os.path.dirname(os.path.dirname(mysettings["O"])))
7120 useflags = mysettings["PORTAGE_USE"].split()
7122 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
7124 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
7125 except portage.exception.InvalidDependString as e:
7126 writemsg("!!! %s\n" % str(e), noiselevel=-1)
7127 writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
7131 mysettings.configdict["pkg"]["A"] = " ".join(alist)
7132 mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
7134 alist = set(alist.split())
7135 aalist = set(aalist.split())
7136 if ("mirror" in features) or fetchall:
7144 # Files are already checked inside fetch(),
7145 # so do not check them again.
7148 if not emerge_skip_distfiles and \
7149 need_distfiles and not fetch(
7150 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
7153 if mydo == "fetch" and listonly:
7157 if mydo == "manifest":
7158 return not digestgen(aalist, mysettings, overwrite=1,
7159 manifestonly=1, myportdb=mydbapi)
7160 elif mydo == "digest":
7161 return not digestgen(aalist, mysettings, overwrite=1,
7163 elif mydo != 'fetch' and not emerge_skip_digest and \
7164 "digest" in mysettings.features:
7165 # Don't do this when called by emerge or when called just
7166 # for fetch (especially parallel-fetch) since it's not needed
7167 # and it can interfere with parallel tasks.
7168 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
7169 except portage.exception.PermissionDenied as e:
7170 writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
7171 if mydo in ("digest", "manifest"):
7174 # See above comment about fetching only when needed
7175 if not emerge_skip_distfiles and \
7176 not digestcheck(checkme, mysettings, "strict" in features):
7182 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
7183 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
7184 orig_distdir = mysettings["DISTDIR"]
7185 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
7186 edpath = mysettings["DISTDIR"] = \
7187 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
7188 portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
7190 # Remove any unexpected files or directories.
7191 for x in os.listdir(edpath):
7192 symlink_path = os.path.join(edpath, x)
7193 st = os.lstat(symlink_path)
7194 if x in alist and stat.S_ISLNK(st.st_mode):
7196 if stat.S_ISDIR(st.st_mode):
7197 shutil.rmtree(symlink_path)
7199 os.unlink(symlink_path)
7201 # Check for existing symlinks and recreate if necessary.
7203 symlink_path = os.path.join(edpath, x)
7204 target = os.path.join(orig_distdir, x)
7206 link_target = os.readlink(symlink_path)
7208 os.symlink(target, symlink_path)
7210 if link_target != target:
7211 os.unlink(symlink_path)
7212 os.symlink(target, symlink_path)
7214 #initial dep checks complete; time to process main commands
7216 restrict = mysettings["PORTAGE_RESTRICT"].split()
7217 nosandbox = (("userpriv" in features) and \
7218 ("usersandbox" not in features) and \
7219 "userpriv" not in restrict and \
7220 "nouserpriv" not in restrict)
7221 if nosandbox and ("userpriv" not in features or \
7222 "userpriv" in restrict or \
7223 "nouserpriv" in restrict):
7224 nosandbox = ("sandbox" not in features and \
7225 "usersandbox" not in features)
7227 sesandbox = mysettings.selinux_enabled() and \
7228 "sesandbox" in mysettings.features
7230 droppriv = "userpriv" in mysettings.features and \
7231 "userpriv" not in restrict and \
7234 fakeroot = "fakeroot" in mysettings.features
7236 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
7237 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
7239 # args are for the to spawn function
7241 "pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
7242 "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
7243 "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
7244 "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
7245 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7246 "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7247 "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7248 "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
7249 "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
7250 "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
7253 # merge the deps in so we have again a 'full' actionmap
7254 # be glad when this can die.
7256 if len(actionmap_deps.get(x, [])):
7257 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
7259 if mydo in actionmap:
7260 if mydo == "package":
7261 # Make sure the package directory exists before executing
7262 # this phase. This can raise PermissionDenied if
7263 # the current user doesn't have write access to $PKGDIR.
7264 parent_dir = os.path.join(mysettings["PKGDIR"],
7265 mysettings["CATEGORY"])
7266 portage.util.ensure_dirs(parent_dir)
7267 if not os.access(parent_dir, os.W_OK):
7268 raise portage.exception.PermissionDenied(
7269 "access('%s', os.W_OK)" % parent_dir)
7270 retval = spawnebuild(mydo,
7271 actionmap, mysettings, debug, logfile=logfile,
7272 fd_pipes=fd_pipes, returnpid=returnpid)
7273 elif mydo=="qmerge":
7274 # check to ensure install was run. this *only* pops up when users
7275 # forget it and are using ebuild
7276 if not os.path.exists(
7277 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
7278 writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
7281 # qmerge is a special phase that implies noclean.
7282 if "noclean" not in mysettings.features:
7283 mysettings.features.add("noclean")
7284 #qmerge is specifically not supposed to do a runtime dep check
7286 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
7287 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
7288 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
7289 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
7291 retval = spawnebuild("install", actionmap, mysettings, debug,
7292 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
7293 returnpid=returnpid)
7294 retval = exit_status_check(retval)
7295 if retval != os.EX_OK:
7296 # The merge phase handles this already. Callers don't know how
7297 # far this function got, so we have to call elog_process() here
7298 # so that it's only called once.
7299 from portage.elog import elog_process
7300 elog_process(mysettings.mycpv, mysettings)
7301 if retval == os.EX_OK:
7302 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
7303 mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
7304 "build-info"), myroot, mysettings,
7305 myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
7306 vartree=vartree, prev_mtimes=prev_mtimes)
7308 print(_("!!! Unknown mydo: %s") % mydo)
7316 mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
7317 shutil.rmtree(tmpdir)
7319 portage.locks.unlockdir(builddir_lock)
7321 # Make sure that DISTDIR is restored to it's normal value before we return!
7322 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
7323 mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
7324 del mysettings["PORTAGE_ACTUAL_DISTDIR"]
7328 if os.stat(logfile).st_size == 0:
7333 if mydo in ("digest", "manifest", "help"):
7334 # If necessary, depend phase has been triggered by aux_get calls
7335 # and the exemption is no longer needed.
7336 _doebuild_manifest_exempt_depend -= 1
7338 def _validate_deps(mysettings, myroot, mydo, mydbapi):
7340 invalid_dep_exempt_phases = \
7341 set(["clean", "cleanrm", "help", "prerm", "postrm"])
7342 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7343 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
7344 other_keys = ["SLOT"]
7345 all_keys = dep_keys + misc_keys + other_keys
7346 metadata = dict(zip(all_keys,
7347 mydbapi.aux_get(mysettings.mycpv, all_keys)))
7349 class FakeTree(object):
7350 def __init__(self, mydb):
7352 dep_check_trees = {myroot:{}}
7353 dep_check_trees[myroot]["porttree"] = \
7354 FakeTree(fakedbapi(settings=mysettings))
7357 for dep_type in dep_keys:
7358 mycheck = dep_check(metadata[dep_type], None, mysettings,
7359 myuse="all", myroot=myroot, trees=dep_check_trees)
7361 msgs.append(" %s: %s\n %s\n" % (
7362 dep_type, metadata[dep_type], mycheck[1]))
7366 portage.dep.use_reduce(
7367 portage.dep.paren_reduce(metadata[k]), matchall=True)
7368 except portage.exception.InvalidDependString as e:
7369 msgs.append(" %s: %s\n %s\n" % (
7370 k, metadata[k], str(e)))
7372 if not metadata["SLOT"]:
7373 msgs.append(_(" SLOT is undefined\n"))
7376 portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
7377 (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
7379 portage.util.writemsg_level(x,
7380 level=logging.ERROR, noiselevel=-1)
7381 if mydo not in invalid_dep_exempt_phases:
7388 def _movefile(src, dest, **kwargs):
7389 """Calls movefile and raises a PortageException if an error occurs."""
7390 if movefile(src, dest, **kwargs) is None:
7391 raise portage.exception.PortageException(
7392 "mv '%s' '%s'" % (src, dest))
7394 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
7395 hardlink_candidates=None, encoding=_encodings['fs']):
7396 """moves a file from src to dest, preserving all permissions and attributes; mtime will
7397 be preserved even when moving across filesystems. Returns true on success and false on
7398 failure. Move is atomic."""
7399 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
7401 if mysettings is None:
7403 mysettings = settings
7405 selinux_enabled = mysettings.selinux_enabled()
7407 selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
7409 lchown = _unicode_func_wrapper(data.lchown, encoding=encoding)
7410 os = _unicode_module_wrapper(_os,
7411 encoding=encoding, overrides=_os_overrides)
7412 shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
7418 except SystemExit as e:
7420 except Exception as e:
7421 print(_("!!! Stating source file failed... movefile()"))
7427 dstat=os.lstat(dest)
7428 except (OSError, IOError):
7429 dstat=os.lstat(os.path.dirname(dest))
7433 if destexists and dstat.st_flags != 0:
7434 bsd_chflags.lchflags(dest, 0)
7435 # Use normal stat/chflags for the parent since we want to
7436 # follow any symlinks to the real parent directory.
7437 pflags = os.stat(os.path.dirname(dest)).st_flags
7439 bsd_chflags.chflags(os.path.dirname(dest), 0)
7442 if stat.S_ISLNK(dstat[stat.ST_MODE]):
7446 except SystemExit as e:
7448 except Exception as e:
7451 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7453 target=os.readlink(src)
7454 if mysettings and mysettings["D"]:
7455 if target.find(mysettings["D"])==0:
7456 target=target[len(mysettings["D"]):]
7457 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
7460 selinux.symlink(target, dest, src)
7462 os.symlink(target,dest)
7463 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7464 # utime() only works on the target of a symlink, so it's not
7465 # possible to perserve mtime on symlinks.
7466 return os.lstat(dest)[stat.ST_MTIME]
7467 except SystemExit as e:
7469 except Exception as e:
7470 print(_("!!! failed to properly create symlink:"))
7471 print("!!!",dest,"->",target)
7476 # Since identical files might be merged to multiple filesystems,
7477 # so os.link() calls might fail for some paths, so try them all.
7478 # For atomic replacement, first create the link as a temp file
7479 # and them use os.rename() to replace the destination.
7480 if hardlink_candidates:
7481 head, tail = os.path.split(dest)
7482 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
7483 (tail, os.getpid()))
7485 os.unlink(hardlink_tmp)
7486 except OSError as e:
7487 if e.errno != errno.ENOENT:
7488 writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
7489 (hardlink_tmp,), noiselevel=-1)
7490 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7493 for hardlink_src in hardlink_candidates:
7495 os.link(hardlink_src, hardlink_tmp)
7500 os.rename(hardlink_tmp, dest)
7501 except OSError as e:
7502 writemsg(_("!!! Failed to rename %s to %s\n") % \
7503 (hardlink_tmp, dest), noiselevel=-1)
7504 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7511 renamefailed = False
7512 if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
7515 ret = selinux.rename(src, dest)
7517 ret=os.rename(src,dest)
7519 except SystemExit as e:
7521 except Exception as e:
7522 if e[0]!=errno.EXDEV:
7523 # Some random error.
7524 print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
7527 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
7530 if stat.S_ISREG(sstat[stat.ST_MODE]):
7531 try: # For safety copy then move it over.
7533 selinux.copyfile(src, dest + "#new")
7534 selinux.rename(dest + "#new", dest)
7536 shutil.copyfile(src,dest+"#new")
7537 os.rename(dest+"#new",dest)
7539 except SystemExit as e:
7541 except Exception as e:
7542 print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
7546 #we don't yet handle special, so we need to fall back to /bin/mv
7547 a = process.spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
7549 writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
7550 writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
7551 {"src": _unicode_decode(src, encoding=encoding),
7552 "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
7553 writemsg("!!! %s\n" % a, noiselevel=-1)
7554 return None # failure
7557 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7558 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7560 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7561 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
7563 except SystemExit as e:
7565 except Exception as e:
7566 print(_("!!! Failed to chown/chmod/unlink in movefile()"))
7573 newmtime = long(os.stat(dest).st_mtime)
7575 if newmtime is not None:
7576 os.utime(dest, (newmtime, newmtime))
7578 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
7579 newmtime = long(sstat.st_mtime)
7581 # The utime can fail here with EPERM even though the move succeeded.
7582 # Instead of failing, use stat to return the mtime if possible.
7584 newmtime = long(os.stat(dest).st_mtime)
7585 except OSError as e:
7586 writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
7587 writemsg("!!! %s\n" % dest, noiselevel=-1)
7588 writemsg("!!! %s\n" % str(e), noiselevel=-1)
7592 # Restore the flags we saved before moving
7594 bsd_chflags.chflags(os.path.dirname(dest), pflags)
7598 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
7599 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
7601 if not os.access(myroot, os.W_OK):
7602 writemsg(_("Permission denied: access('%s', W_OK)\n") % myroot,
7605 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
7606 vartree=vartree, blockers=blockers, scheduler=scheduler)
7607 return mylink.merge(pkgloc, infloc, myroot, myebuild,
7608 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7610 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
7611 ldpath_mtimes=None, scheduler=None):
7612 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
7613 vartree=vartree, scheduler=scheduler)
7614 vartree = mylink.vartree
7618 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
7619 ldpath_mtimes=ldpath_mtimes)
7620 if retval == os.EX_OK:
7627 def dep_virtual(mysplit, mysettings):
7628 "Does virtual dependency conversion"
7630 myvirtuals = mysettings.getvirtuals()
7632 if isinstance(x, list):
7633 newsplit.append(dep_virtual(x, mysettings))
7636 mychoices = myvirtuals.get(mykey, None)
7638 if len(mychoices) == 1:
7639 a = x.replace(mykey, dep_getkey(mychoices[0]), 1)
7642 # blocker needs "and" not "or(||)".
7647 a.append(x.replace(mykey, dep_getkey(y), 1))
7653 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
7654 trees=None, use_mask=None, use_force=None, **kwargs):
7655 """Recursively expand new-style virtuals so as to collapse one or more
7656 levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
7657 zero cost regardless of whether or not they are currently installed. Virtual
7658 blockers are supported but only when the virtual expands to a single
7659 atom because it wouldn't necessarily make sense to block all the components
7660 of a compound virtual. When more than one new-style virtual is matched,
7661 the matches are sorted from highest to lowest versions and the atom is
7662 expanded to || ( highest match ... lowest match )."""
7664 mytrees = trees[myroot]
7665 portdb = mytrees["porttree"].dbapi
7666 atom_graph = mytrees.get("atom_graph")
7667 parent = mytrees.get("parent")
7668 virt_parent = mytrees.get("virt_parent")
7671 if parent is not None:
7672 if virt_parent is not None:
7673 graph_parent = virt_parent
7674 eapi = virt_parent[0].metadata['EAPI']
7676 graph_parent = parent
7677 eapi = parent.metadata["EAPI"]
7678 repoman = not mysettings.local_config
7679 if kwargs["use_binaries"]:
7680 portdb = trees[myroot]["bintree"].dbapi
7681 myvirtuals = mysettings.getvirtuals()
7682 pprovideddict = mysettings.pprovideddict
7683 myuse = kwargs["myuse"]
7688 elif isinstance(x, list):
7689 newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
7690 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
7691 use_force=use_force, **kwargs))
7694 if not isinstance(x, portage.dep.Atom):
7696 x = portage.dep.Atom(x)
7697 except portage.exception.InvalidAtom:
7698 if portage.dep._dep_check_strict:
7699 raise portage.exception.ParseError(
7700 _("invalid atom: '%s'") % x)
7702 # Only real Atom instances are allowed past this point.
7705 if x.blocker and x.blocker.overlap.forbid and \
7706 eapi in ("0", "1") and portage.dep._dep_check_strict:
7707 raise portage.exception.ParseError(
7708 _("invalid atom: '%s'") % (x,))
7709 if x.use and eapi in ("0", "1") and \
7710 portage.dep._dep_check_strict:
7711 raise portage.exception.ParseError(
7712 _("invalid atom: '%s'") % (x,))
7714 if repoman and x.use and x.use.conditional:
7715 evaluated_atom = portage.dep.remove_slot(x)
7717 evaluated_atom += ":%s" % x.slot
7718 evaluated_atom += str(x.use._eval_qa_conditionals(
7719 use_mask, use_force))
7720 x = portage.dep.Atom(evaluated_atom)
7722 if not repoman and \
7723 myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
7724 if x.use.conditional:
7725 evaluated_atom = portage.dep.remove_slot(x)
7727 evaluated_atom += ":%s" % x.slot
7728 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
7729 x = portage.dep.Atom(evaluated_atom)
7732 if not mykey.startswith("virtual/"):
7734 if atom_graph is not None:
7735 atom_graph.add(x, graph_parent)
7737 mychoices = myvirtuals.get(mykey, [])
7739 # Virtual blockers are no longer expanded here since
7740 # the un-expanded virtual atom is more useful for
7741 # maintaining a cache of blocker atoms.
7743 if atom_graph is not None:
7744 atom_graph.add(x, graph_parent)
7747 if repoman or not hasattr(portdb, 'match_pkgs'):
7748 if portdb.cp_list(x.cp):
7751 # TODO: Add PROVIDE check for repoman.
7754 a.append(dep.Atom(x.replace(x.cp, y.cp, 1)))
7758 newsplit.append(a[0])
7760 newsplit.append(['||'] + a)
7764 # Ignore USE deps here, since otherwise we might not
7765 # get any matches. Choices with correct USE settings
7766 # will be preferred in dep_zapdeps().
7767 matches = portdb.match_pkgs(x.without_use)
7768 # Use descending order to prefer higher versions.
7771 # only use new-style matches
7772 if pkg.cp.startswith("virtual/"):
7774 if not (pkgs or mychoices):
7775 # This one couldn't be expanded as a new-style virtual. Old-style
7776 # virtuals have already been expanded by dep_virtual, so this one
7777 # is unavailable and dep_zapdeps will identify it as such. The
7778 # atom is not eliminated here since it may still represent a
7779 # dependency that needs to be satisfied.
7781 if atom_graph is not None:
7782 atom_graph.add(x, graph_parent)
7787 virt_atom = '=' + pkg.cpv
7789 virt_atom += str(x.use)
7790 virt_atom = dep.Atom(virt_atom)
7791 # According to GLEP 37, RDEPEND is the only dependency
7792 # type that is valid for new-style virtuals. Repoman
7793 # should enforce this.
7794 depstring = pkg.metadata['RDEPEND']
7795 pkg_kwargs = kwargs.copy()
7796 pkg_kwargs["myuse"] = pkg.use.enabled
7798 util.writemsg_level(_("Virtual Parent: %s\n") \
7799 % (pkg,), noiselevel=-1, level=logging.DEBUG)
7800 util.writemsg_level(_("Virtual Depstring: %s\n") \
7801 % (depstring,), noiselevel=-1, level=logging.DEBUG)
7803 # Set EAPI used for validation in dep_check() recursion.
7804 mytrees["virt_parent"] = (pkg, virt_atom)
7807 mycheck = dep_check(depstring, mydbapi, mysettings,
7808 myroot=myroot, trees=trees, **pkg_kwargs)
7810 # Restore previous EAPI after recursion.
7811 if virt_parent is not None:
7812 mytrees["virt_parent"] = virt_parent
7814 del mytrees["virt_parent"]
7817 raise portage.exception.ParseError(
7818 "%s: %s '%s'" % (y[0], mycheck[1], depstring))
7820 # pull in the new-style virtual
7821 mycheck[1].append(virt_atom)
7822 a.append(mycheck[1])
7823 if atom_graph is not None:
7824 atom_graph.add(virt_atom, graph_parent)
7825 # Plain old-style virtuals. New-style virtuals are preferred.
7828 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7829 matches = portdb.match(new_atom)
7830 # portdb is an instance of depgraph._dep_check_composite_db, so
7831 # USE conditionals are already evaluated.
7832 if matches and mykey in \
7833 portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
7835 if atom_graph is not None:
7836 atom_graph.add(new_atom, graph_parent)
7838 if not a and mychoices:
7839 # Check for a virtual package.provided match.
7841 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7842 if match_from_list(new_atom,
7843 pprovideddict.get(new_atom.cp, [])):
7845 if atom_graph is not None:
7846 atom_graph.add(new_atom, graph_parent)
7850 if atom_graph is not None:
7851 atom_graph.add(x, graph_parent)
7853 newsplit.append(a[0])
7855 newsplit.append(['||'] + a)
7859 def dep_eval(deplist):
7862 if deplist[0]=="||":
7863 #or list; we just need one "1"
7864 for x in deplist[1:]:
7865 if isinstance(x, list):
7870 #XXX: unless there's no available atoms in the list
7871 #in which case we need to assume that everything is
7872 #okay as some ebuilds are relying on an old bug.
7873 if len(deplist) == 1:
7878 if isinstance(x, list):
7885 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
7886 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
7887 Returned deplist contains steps that must be taken to satisfy dependencies."""
7891 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
7892 if not reduced or unreduced == ["||"] or dep_eval(reduced):
7895 if unreduced[0] != "||":
7897 for x, satisfied in zip(unreduced, reduced):
7898 if isinstance(x, list):
7899 unresolved += dep_zapdeps(x, satisfied, myroot,
7900 use_binaries=use_binaries, trees=trees)
7902 unresolved.append(x)
7905 # We're at a ( || atom ... ) type level and need to make a choice
7906 deps = unreduced[1:]
7907 satisfieds = reduced[1:]
7909 # Our preference order is for an the first item that:
7910 # a) contains all unmasked packages with the same key as installed packages
7911 # b) contains all unmasked packages
7912 # c) contains masked installed packages
7913 # d) is the first item
7915 preferred_installed = []
7916 preferred_in_graph = []
7917 preferred_any_slot = []
7918 preferred_non_installed = []
7919 unsat_use_in_graph = []
7920 unsat_use_installed = []
7921 unsat_use_non_installed = []
7924 # Alias the trees we'll be checking availability against
7925 parent = trees[myroot].get("parent")
7926 priority = trees[myroot].get("priority")
7927 graph_db = trees[myroot].get("graph_db")
7929 if "vartree" in trees[myroot]:
7930 vardb = trees[myroot]["vartree"].dbapi
7932 mydbapi = trees[myroot]["bintree"].dbapi
7934 mydbapi = trees[myroot]["porttree"].dbapi
7936 # Sort the deps into installed, not installed but already
7937 # in the graph and other, not installed and not in the graph
7938 # and other, with values of [[required_atom], availablility]
7939 for x, satisfied in zip(deps, satisfieds):
7940 if isinstance(x, list):
7941 atoms = dep_zapdeps(x, satisfied, myroot,
7942 use_binaries=use_binaries, trees=trees)
7947 other.append((atoms, None, False))
7950 all_available = True
7951 all_use_satisfied = True
7956 # Ignore USE dependencies here since we don't want USE
7957 # settings to adversely affect || preference evaluation.
7958 avail_pkg = mydbapi.match(atom.without_use)
7960 avail_pkg = avail_pkg[-1] # highest (ascending order)
7961 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7962 mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7964 all_available = False
7965 all_use_satisfied = False
7969 avail_pkg_use = mydbapi.match(atom)
7970 if not avail_pkg_use:
7971 all_use_satisfied = False
7973 # highest (ascending order)
7974 avail_pkg_use = avail_pkg_use[-1]
7975 if avail_pkg_use != avail_pkg:
7976 avail_pkg = avail_pkg_use
7977 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7978 mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7980 versions[avail_slot] = avail_pkg
7982 this_choice = (atoms, versions, all_available)
7984 # The "all installed" criterion is not version or slot specific.
7985 # If any version of a package is already in the graph then we
7986 # assume that it is preferred over other possible packages choices.
7987 all_installed = True
7988 for atom in set(dep.Atom(atom.cp) for atom in atoms \
7989 if not atom.blocker):
7990 # New-style virtuals have zero cost to install.
7991 if not vardb.match(atom) and not atom.startswith("virtual/"):
7992 all_installed = False
7994 all_installed_slots = False
7996 all_installed_slots = True
7997 for slot_atom in versions:
7998 # New-style virtuals have zero cost to install.
7999 if not vardb.match(slot_atom) and \
8000 not slot_atom.startswith("virtual/"):
8001 all_installed_slots = False
8003 if graph_db is None:
8004 if all_use_satisfied:
8006 if all_installed_slots:
8007 preferred_installed.append(this_choice)
8009 preferred_any_slot.append(this_choice)
8011 preferred_non_installed.append(this_choice)
8013 if all_installed_slots:
8014 unsat_use_installed.append(this_choice)
8016 unsat_use_non_installed.append(this_choice)
8019 for slot_atom in versions:
8020 # New-style virtuals have zero cost to install.
8021 if not graph_db.match(slot_atom) and \
8022 not slot_atom.startswith("virtual/"):
8023 all_in_graph = False
8025 circular_atom = None
8027 if parent is None or priority is None:
8029 elif priority.buildtime:
8030 # Check if the atom would result in a direct circular
8031 # dependency and try to avoid that if it seems likely
8032 # to be unresolvable. This is only relevant for
8033 # buildtime deps that aren't already satisfied by an
8034 # installed package.
8035 cpv_slot_list = [parent]
8039 if vardb.match(atom):
8040 # If the atom is satisfied by an installed
8041 # version then it's not a circular dep.
8043 if atom.cp != parent.cp:
8045 if match_from_list(atom, cpv_slot_list):
8046 circular_atom = atom
8048 if circular_atom is not None:
8049 other.append(this_choice)
8051 if all_use_satisfied:
8053 preferred_in_graph.append(this_choice)
8055 if all_installed_slots:
8056 preferred_installed.append(this_choice)
8058 preferred_any_slot.append(this_choice)
8060 preferred_non_installed.append(this_choice)
8063 unsat_use_in_graph.append(this_choice)
8064 elif all_installed_slots:
8065 unsat_use_installed.append(this_choice)
8067 unsat_use_non_installed.append(this_choice)
8069 other.append(this_choice)
8071 # unsat_use_* must come after preferred_non_installed
8072 # for correct ordering in cases like || ( foo[a] foo[b] ).
8073 preferred = preferred_in_graph + preferred_installed + \
8074 preferred_any_slot + preferred_non_installed + \
8075 unsat_use_in_graph + unsat_use_installed + unsat_use_non_installed + \
8078 for allow_masked in (False, True):
8079 for atoms, versions, all_available in preferred:
8080 if all_available or allow_masked:
8083 assert(False) # This point should not be reachable
8085 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
8094 if isinstance(orig_dep, dep.Atom):
8098 has_cat = '/' in orig_dep
8100 alphanum = re.search(r'\w', orig_dep)
8102 mydep = orig_dep[:alphanum.start()] + "null/" + \
8103 orig_dep[alphanum.start():]
8105 mydep = dep.Atom(mydep)
8106 except exception.InvalidAtom:
8107 # Missing '=' prefix is allowed for backward compatibility.
8108 if not dep.isvalidatom("=" + mydep):
8110 mydep = dep.Atom('=' + mydep)
8111 orig_dep = '=' + orig_dep
8113 null_cat, pn = catsplit(mydep.cp)
8117 expanded = cpv_expand(mydep, mydb=mydb,
8118 use_cache=use_cache, settings=settings)
8119 return portage.dep.Atom(orig_dep.replace(mydep, expanded, 1))
8121 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
8122 use_cache=1, use_binaries=0, myroot="/", trees=None):
8123 """Takes a depend string and parses the condition."""
8124 edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
8125 #check_config_instance(mysettings)
8127 trees = globals()["db"]
8131 myusesplit = mysettings["PORTAGE_USE"].split()
8134 # We've been given useflags to use.
8135 #print "USE FLAGS PASSED IN."
8137 #if "bindist" in myusesplit:
8138 # print "BINDIST is set!"
8140 # print "BINDIST NOT set."
8142 #we are being run by autouse(), don't consult USE vars yet.
8143 # WE ALSO CANNOT USE SETTINGS
8146 #convert parenthesis to sublists
8148 mysplit = portage.dep.paren_reduce(depstring)
8149 except portage.exception.InvalidDependString as e:
8154 useforce.add(mysettings["ARCH"])
8156 # This masking/forcing is only for repoman. In other cases, relevant
8157 # masking/forcing should have already been applied via
8158 # config.regenerate(). Also, binary or installed packages may have
8159 # been built with flags that are now masked, and it would be
8160 # inconsistent to mask them now. Additionally, myuse may consist of
8161 # flags from a parent package that is being merged to a $ROOT that is
8162 # different from the one that mysettings represents.
8163 mymasks.update(mysettings.usemask)
8164 mymasks.update(mysettings.archlist())
8165 mymasks.discard(mysettings["ARCH"])
8166 useforce.update(mysettings.useforce)
8167 useforce.difference_update(mymasks)
8169 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
8170 masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
8171 except portage.exception.InvalidDependString as e:
8174 # Do the || conversions
8175 mysplit=portage.dep.dep_opconvert(mysplit)
8178 #dependencies were reduced to nothing
8181 # Recursively expand new-style virtuals so as to
8182 # collapse one or more levels of indirection.
8184 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
8185 use=use, mode=mode, myuse=myuse,
8186 use_force=useforce, use_mask=mymasks, use_cache=use_cache,
8187 use_binaries=use_binaries, myroot=myroot, trees=trees)
8188 except portage.exception.ParseError as e:
8192 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
8193 if mysplit2 is None:
8194 return [0, _("Invalid token")]
8196 writemsg("\n\n\n", 1)
8197 writemsg("mysplit: %s\n" % (mysplit), 1)
8198 writemsg("mysplit2: %s\n" % (mysplit2), 1)
8201 selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
8202 use_binaries=use_binaries, trees=trees)
8203 except portage.exception.InvalidAtom as e:
8204 if portage.dep._dep_check_strict:
8205 raise # This shouldn't happen.
8206 # dbapi.match() failed due to an invalid atom in
8207 # the dependencies of an installed package.
8208 return [0, _("Invalid atom: '%s'") % (e,)]
8210 return [1, selected_atoms]
8212 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
8213 "Reduces the deplist to ones and zeros"
8214 deplist=mydeplist[:]
8215 for mypos, token in enumerate(deplist):
8216 if isinstance(deplist[mypos], list):
8218 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
8219 elif deplist[mypos]=="||":
8221 elif token[:1] == "!":
8222 deplist[mypos] = False
8224 mykey = deplist[mypos].cp
8225 if mysettings and mykey in mysettings.pprovideddict and \
8226 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
8228 elif mydbapi is None:
8229 # Assume nothing is satisfied. This forces dep_zapdeps to
8230 # return all of deps the deps that have been selected
8231 # (excluding those satisfied by package.provided).
8232 deplist[mypos] = False
8235 x = mydbapi.xmatch(mode, deplist[mypos])
8236 if mode.startswith("minimum-"):
8243 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
8246 if deplist[mypos][0]=="!":
8250 #encountered invalid string
8254 _cpv_key_re = re.compile('^' + versions._cpv + '$', re.VERBOSE)
8255 def cpv_getkey(mycpv):
8256 """Calls pkgsplit on a cpv and returns only the cp."""
8257 m = _cpv_key_re.match(mycpv)
8261 warnings.warn("portage.cpv_getkey() called with invalid cpv: '%s'" \
8262 % (mycpv,), DeprecationWarning)
8264 myslash = mycpv.split("/", 1)
8265 mysplit = versions._pkgsplit(myslash[-1])
8270 return myslash[0]+"/"+mysplit[0]
8274 getCPFromCPV = cpv_getkey
8276 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
8277 """This is deprecated because it just returns the first match instead of
8278 raising AmbiguousPackageName like cpv_expand does."""
8279 warnings.warn("portage.key_expand() is deprecated", DeprecationWarning)
8280 mysplit=mykey.split("/")
8281 if settings is None:
8282 settings = globals()["settings"]
8283 virts = settings.getvirtuals("/")
8284 virts_p = settings.get_virts_p("/")
8286 if hasattr(mydb, "cp_list"):
8287 for x in mydb.categories:
8288 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
8289 return dep.Atom(x + "/" + mykey)
8290 if mykey in virts_p:
8291 return(virts_p[mykey][0])
8292 return dep.Atom("null/" + mykey)
8294 if hasattr(mydb, "cp_list"):
8295 if not mydb.cp_list(mykey, use_cache=use_cache) and \
8296 virts and mykey in virts:
8297 return virts[mykey][0]
8298 if not isinstance(mykey, dep.Atom):
8299 mykey = dep.Atom(mykey)
8302 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
8303 """Given a string (packagename or virtual) expand it into a valid
8304 cat/package string. Virtuals use the mydb to determine which provided
8305 virtual is a valid choice and defaults to the first element when there
8306 are no installed/available candidates."""
8307 myslash=mycpv.split("/")
8308 mysplit = versions._pkgsplit(myslash[-1])
8309 if settings is None:
8310 settings = globals()["settings"]
8311 virts = settings.getvirtuals("/")
8312 virts_p = settings.get_virts_p("/")
8314 # this is illegal case.
8317 elif len(myslash)==2:
8319 mykey=myslash[0]+"/"+mysplit[0]
8322 if mydb and virts and mykey in virts:
8323 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
8324 if hasattr(mydb, "cp_list"):
8325 if not mydb.cp_list(mykey, use_cache=use_cache):
8326 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
8327 mykey_orig = mykey[:]
8328 for vkey in virts[mykey]:
8329 # The virtuals file can contain a versioned atom, so
8330 # it may be necessary to remove the operator and
8331 # version from the atom before it is passed into
8333 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
8335 writemsg(_("virts chosen: %s\n") % (mykey), 1)
8337 if mykey == mykey_orig:
8338 mykey = str(virts[mykey][0])
8339 writemsg(_("virts defaulted: %s\n") % (mykey), 1)
8340 #we only perform virtual expansion if we are passed a dbapi
8342 #specific cpv, no category, ie. "foo-1.0"
8350 if mydb and hasattr(mydb, "categories"):
8351 for x in mydb.categories:
8352 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
8353 matches.append(x+"/"+myp)
8354 if len(matches) > 1:
8355 virtual_name_collision = False
8356 if len(matches) == 2:
8358 if not x.startswith("virtual/"):
8359 # Assume that the non-virtual is desired. This helps
8360 # avoid the ValueError for invalid deps that come from
8361 # installed packages (during reverse blocker detection,
8365 virtual_name_collision = True
8366 if not virtual_name_collision:
8367 # AmbiguousPackageName inherits from ValueError,
8368 # for backward compatibility with calling code
8369 # that already handles ValueError.
8370 raise portage.exception.AmbiguousPackageName(matches)
8374 if not mykey and not isinstance(mydb, list):
8376 mykey=virts_p[myp][0]
8377 #again, we only perform virtual expansion if we have a dbapi (not a list)
8381 if mysplit[2]=="r0":
8382 return mykey+"-"+mysplit[1]
8384 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
8388 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
8389 from portage.util import grablines
8390 if settings is None:
8391 settings = globals()["settings"]
8393 portdb = globals()["portdb"]
8394 mysplit = catpkgsplit(mycpv)
8396 raise ValueError(_("invalid CPV: %s") % mycpv)
8397 if metadata is None:
8398 db_keys = list(portdb._aux_cache_keys)
8400 metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8402 if not portdb.cpv_exists(mycpv):
8404 if metadata is None:
8405 # Can't access SLOT due to corruption.
8406 cpv_slot_list = [mycpv]
8408 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8409 mycp=mysplit[0]+"/"+mysplit[1]
8411 # XXX- This is a temporary duplicate of code from the config constructor.
8412 locations = [os.path.join(settings["PORTDIR"], "profiles")]
8413 locations.extend(settings.profiles)
8414 for ov in settings["PORTDIR_OVERLAY"].split():
8415 profdir = os.path.join(normalize_path(ov), "profiles")
8416 if os.path.isdir(profdir):
8417 locations.append(profdir)
8418 locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
8421 pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
8423 if mycp in settings.pmaskdict:
8424 for x in settings.pmaskdict[mycp]:
8425 if match_from_list(x, cpv_slot_list):
8429 for pmask in pmasklists:
8430 pmask_filename = os.path.join(pmask[0], "package.mask")
8431 for i in range(len(pmask[1])):
8432 l = pmask[1][i].strip()
8438 comment_valid = i + 1
8440 if comment_valid != i:
8443 return (comment, pmask_filename)
8446 elif comment_valid != -1:
8447 # Apparently this comment applies to muliple masks, so
8448 # it remains valid until a blank line is encountered.
8455 def getmaskingstatus(mycpv, settings=None, portdb=None):
8456 if settings is None:
8457 settings = config(clone=globals()["settings"])
8459 portdb = globals()["portdb"]
8463 if not isinstance(mycpv, basestring):
8464 # emerge passed in a Package instance
8467 metadata = pkg.metadata
8468 installed = pkg.installed
8470 mysplit = catpkgsplit(mycpv)
8472 raise ValueError(_("invalid CPV: %s") % mycpv)
8473 if metadata is None:
8474 db_keys = list(portdb._aux_cache_keys)
8476 metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8478 if not portdb.cpv_exists(mycpv):
8480 return ["corruption"]
8481 if "?" in metadata["LICENSE"]:
8482 settings.setcpv(mycpv, mydb=metadata)
8483 metadata["USE"] = settings["PORTAGE_USE"]
8485 metadata["USE"] = ""
8486 mycp=mysplit[0]+"/"+mysplit[1]
8491 if settings._getProfileMaskAtom(mycpv, metadata):
8492 rValue.append("profile")
8494 # package.mask checking
8495 if settings._getMaskAtom(mycpv, metadata):
8496 rValue.append("package.mask")
8499 eapi = metadata["EAPI"]
8500 mygroups = settings._getKeywords(mycpv, metadata)
8501 licenses = metadata["LICENSE"]
8502 properties = metadata["PROPERTIES"]
8503 slot = metadata["SLOT"]
8504 if eapi.startswith("-"):
8506 if not eapi_is_supported(eapi):
8507 return ["EAPI %s" % eapi]
8508 elif _eapi_is_deprecated(eapi) and not installed:
8509 return ["EAPI %s" % eapi]
8510 egroups = settings.configdict["backupenv"].get(
8511 "ACCEPT_KEYWORDS", "").split()
8512 pgroups = settings["ACCEPT_KEYWORDS"].split()
8513 myarch = settings["ARCH"]
8514 if pgroups and myarch not in pgroups:
8515 """For operating systems other than Linux, ARCH is not necessarily a
8517 myarch = pgroups[0].lstrip("~")
8519 cp = dep_getkey(mycpv)
8520 pkgdict = settings.pkeywordsdict.get(cp)
8523 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8524 for atom, pkgkeywords in pkgdict.items():
8525 if match_from_list(atom, cpv_slot_list):
8527 pgroups.extend(pkgkeywords)
8528 if matches or egroups:
8529 pgroups.extend(egroups)
8532 if x.startswith("-"):
8536 inc_pgroups.discard(x[1:])
8539 pgroups = inc_pgroups
8547 for keyword in pgroups:
8548 if keyword in mygroups:
8558 elif gp=="-"+myarch and myarch in pgroups:
8561 elif gp=="~"+myarch and myarch in pgroups:
8566 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
8567 if missing_licenses:
8568 allowed_tokens = set(["||", "(", ")"])
8569 allowed_tokens.update(missing_licenses)
8570 license_split = licenses.split()
8571 license_split = [x for x in license_split \
8572 if x in allowed_tokens]
8573 msg = license_split[:]
8574 msg.append("license(s)")
8575 rValue.append(" ".join(msg))
8576 except portage.exception.InvalidDependString as e:
8577 rValue.append("LICENSE: "+str(e))
8580 missing_properties = settings._getMissingProperties(mycpv, metadata)
8581 if missing_properties:
8582 allowed_tokens = set(["||", "(", ")"])
8583 allowed_tokens.update(missing_properties)
8584 properties_split = properties.split()
8585 properties_split = [x for x in properties_split \
8586 if x in allowed_tokens]
8587 msg = properties_split[:]
8588 msg.append("properties")
8589 rValue.append(" ".join(msg))
8590 except portage.exception.InvalidDependString as e:
8591 rValue.append("PROPERTIES: "+str(e))
8593 # Only show KEYWORDS masks for installed packages
8594 # if they're not masked for any other reason.
8595 if kmask and (not installed or not rValue):
8596 rValue.append(kmask+" keyword")
8601 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
8602 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
8603 'KEYWORDS', 'INHERITED', 'IUSE', 'UNUSED_00',
8604 'PDEPEND', 'PROVIDE', 'EAPI',
8605 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
8606 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
8608 auxdbkeylen=len(auxdbkeys)
8610 from portage.dbapi import dbapi
8611 from portage.dbapi.virtual import fakedbapi
8612 from portage.dbapi.bintree import bindbapi, binarytree
8613 from portage.dbapi.vartree import vardbapi, vartree, dblink
8614 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
8616 class FetchlistDict(portage.cache.mappings.Mapping):
8617 """This provide a mapping interface to retrieve fetch lists. It's used
8618 to allow portage.manifest.Manifest to access fetch lists via a standard
8619 mapping interface rather than use the dbapi directly."""
8620 def __init__(self, pkgdir, settings, mydbapi):
8621 """pkgdir is a directory containing ebuilds and settings is passed into
8622 portdbapi.getfetchlist for __getitem__ calls."""
8623 self.pkgdir = pkgdir
8624 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
8625 self.settings = settings
8626 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
8627 self.portdb = mydbapi
8628 def __getitem__(self, pkg_key):
8629 """Returns the complete fetch list for a given package."""
8630 return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
8631 def __contains__(self, cpv):
8632 return cpv in self.__iter__()
8633 def has_key(self, pkg_key):
8634 """Returns true if the given package exists within pkgdir."""
8635 return pkg_key in self
8638 return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
8641 """This needs to be implemented in order to avoid
8642 infinite recursion in some cases."""
8643 return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
8646 """Returns keys for all packages within pkgdir"""
8647 return self.portdb.cp_list(self.cp, mytree=self.mytree)
8649 if sys.hexversion >= 0x3000000:
8652 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
8653 vartree=None, prev_mtimes=None, blockers=None):
8654 """will merge a .tbz2 file, returning a list of runtime dependencies
8655 that must be satisfied, or None if there was a merge error. This
8656 code assumes the package exists."""
8659 mydbapi = db[myroot]["bintree"].dbapi
8661 vartree = db[myroot]["vartree"]
8662 if mytbz2[-5:]!=".tbz2":
8663 print(_("!!! Not a .tbz2 file"))
8669 did_merge_phase = False
8672 """ Don't lock the tbz2 file because the filesytem could be readonly or
8673 shared by a cluster."""
8674 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
8676 mypkg = os.path.basename(mytbz2)[:-5]
8677 xptbz2 = portage.xpak.tbz2(mytbz2)
8678 mycat = xptbz2.getfile(_unicode_encode("CATEGORY",
8679 encoding=_encodings['repo.content']))
8681 writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"),
8684 mycat = _unicode_decode(mycat,
8685 encoding=_encodings['repo.content'], errors='replace')
8686 mycat = mycat.strip()
8688 # These are the same directories that would be used at build time.
8689 builddir = os.path.join(
8690 mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
8691 catdir = os.path.dirname(builddir)
8692 pkgloc = os.path.join(builddir, "image")
8693 infloc = os.path.join(builddir, "build-info")
8694 myebuild = os.path.join(
8695 infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
8696 portage.util.ensure_dirs(os.path.dirname(catdir),
8697 uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8698 catdir_lock = portage.locks.lockdir(catdir)
8699 portage.util.ensure_dirs(catdir,
8700 uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8702 shutil.rmtree(builddir)
8703 except (IOError, OSError) as e:
8704 if e.errno != errno.ENOENT:
8707 for mydir in (builddir, pkgloc, infloc):
8708 portage.util.ensure_dirs(mydir, uid=portage_uid,
8709 gid=portage_gid, mode=0o755)
8710 writemsg_stdout(_(">>> Extracting info\n"))
8711 xptbz2.unpackinfo(infloc)
8712 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
8713 # Store the md5sum in the vdb.
8714 fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
8715 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
8718 # This gives bashrc users an opportunity to do various things
8719 # such as remove binary packages after they're installed.
8720 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
8721 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
8722 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8724 # Eventually we'd like to pass in the saved ebuild env here.
8725 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8726 tree="bintree", mydbapi=mydbapi, vartree=vartree)
8727 if retval != os.EX_OK:
8728 writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
8731 writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
8732 retval = portage.process.spawn_bash(
8733 "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8734 env=mysettings.environ())
8735 if retval != os.EX_OK:
8736 writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
8738 #portage.locks.unlockfile(tbz2_lock)
8741 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8742 treetype="bintree", blockers=blockers)
8743 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8744 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8745 did_merge_phase = True
8746 success = retval == os.EX_OK
8749 mysettings.pop("PORTAGE_BINPKG_FILE", None)
8751 portage.locks.unlockfile(tbz2_lock)
8753 if not did_merge_phase:
8754 # The merge phase handles this already. Callers don't know how
8755 # far this function got, so we have to call elog_process() here
8756 # so that it's only called once.
8757 from portage.elog import elog_process
8758 elog_process(mycat + "/" + mypkg, mysettings)
8761 shutil.rmtree(builddir)
8762 except (IOError, OSError) as e:
8763 if e.errno != errno.ENOENT:
8767 def deprecated_profile_check(settings=None):
8769 if settings is not None:
8770 config_root = settings["PORTAGE_CONFIGROOT"]
8771 deprecated_profile_file = os.path.join(config_root,
8772 DEPRECATED_PROFILE_FILE)
8773 if not os.access(deprecated_profile_file, os.R_OK):
8775 dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
8776 encoding=_encodings['fs'], errors='strict'),
8777 mode='r', encoding=_encodings['content'], errors='replace').readlines()
8778 writemsg(colorize("BAD", _("\n!!! Your current profile is "
8779 "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
8780 writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
8781 "profile.")) + "\n", noiselevel=-1)
8783 writemsg(colorize("BAD", _("!!! Please refer to the "
8784 "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
8786 newprofile = dcontent[0]
8787 writemsg(colorize("BAD", _("!!! Please upgrade to the "
8788 "following profile if possible:")) + "\n", noiselevel=-1)
8789 writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
8790 if len(dcontent) > 1:
8791 writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
8792 for myline in dcontent[1:]:
8793 writemsg(myline, noiselevel=-1)
8794 writemsg("\n\n", noiselevel=-1)
8797 # gets virtual package settings
8798 def getvirtuals(myroot):
8800 writemsg("--- DEPRECATED call to getvirtual\n")
8801 return settings.getvirtuals(myroot)
8803 def commit_mtimedb(mydict=None, filename=None):
8806 if "mtimedb" not in globals() or mtimedb is None:
8810 if filename is None:
8812 filename = mtimedbfile
8813 mydict["version"] = VERSION
8814 d = {} # for full backward compat, pickle it as a plain dict object.
8817 f = atomic_ofstream(filename, mode='wb')
8818 pickle.dump(d, f, protocol=2)
8820 portage.util.apply_secpass_permissions(filename,
8821 uid=uid, gid=portage_gid, mode=0o644)
8822 except (IOError, OSError) as e:
8826 global uid,portage_gid,portdb,db
8827 if secpass and os.environ.get("SANDBOX_ON") != "1":
8828 close_portdbapi_caches()
8831 atexit_register(portageexit)
8833 def _global_updates(trees, prev_mtimes):
8835 Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8837 @param trees: A dictionary containing portage trees.
8839 @param prev_mtimes: A dictionary containing mtimes of files located in
8840 $PORTDIR/profiles/updates/.
8841 @type prev_mtimes: dict
8842 @rtype: None or List
8843 @return: None if no were no updates, otherwise a list of update commands
8844 that have been performed.
8846 # only do this if we're root and not running repoman/ebuild digest
8848 if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8851 mysettings = trees["/"]["vartree"].settings
8852 updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8855 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8856 update_data = grab_updates(updpath)
8858 update_data = grab_updates(updpath, prev_mtimes)
8859 except portage.exception.DirectoryNotFound:
8860 writemsg(_("--- 'profiles/updates' is empty or "
8861 "not available. Empty portage tree?\n"), noiselevel=1)
8864 if len(update_data) > 0:
8865 do_upgrade_packagesmessage = 0
8868 for mykey, mystat, mycontent in update_data:
8869 writemsg_stdout("\n\n")
8870 writemsg_stdout(colorize("GOOD",
8871 _("Performing Global Updates: "))+bold(mykey)+"\n")
8872 writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
8873 writemsg_stdout(_(" %s='update pass' %s='binary update' "
8874 "%s='/var/db update' %s='/var/db move'\n"
8875 " %s='/var/db SLOT move' %s='binary move' "
8876 "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
8877 (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
8878 valid_updates, errors = parse_updates(mycontent)
8879 myupd.extend(valid_updates)
8880 writemsg_stdout(len(valid_updates) * "." + "\n")
8881 if len(errors) == 0:
8882 # Update our internal mtime since we
8883 # processed all of our directives.
8884 timestamps[mykey] = long(mystat.st_mtime)
8887 writemsg("%s\n" % msg, noiselevel=-1)
8889 world_file = os.path.join(root, WORLD_FILE)
8890 world_list = grabfile(world_file)
8891 world_modified = False
8892 for update_cmd in myupd:
8893 for pos, atom in enumerate(world_list):
8894 new_atom = update_dbentry(update_cmd, atom)
8895 if atom != new_atom:
8896 world_list[pos] = new_atom
8897 world_modified = True
8900 write_atomic(world_file,
8901 "".join("%s\n" % (x,) for x in world_list))
8903 update_config_files("/",
8904 mysettings.get("CONFIG_PROTECT","").split(),
8905 mysettings.get("CONFIG_PROTECT_MASK","").split(),
8908 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8909 settings=mysettings)
8910 vardb = trees["/"]["vartree"].dbapi
8911 bindb = trees["/"]["bintree"].dbapi
8912 if not os.access(bindb.bintree.pkgdir, os.W_OK):
8914 for update_cmd in myupd:
8915 if update_cmd[0] == "move":
8916 moves = vardb.move_ent(update_cmd)
8918 writemsg_stdout(moves * "@")
8920 moves = bindb.move_ent(update_cmd)
8922 writemsg_stdout(moves * "%")
8923 elif update_cmd[0] == "slotmove":
8924 moves = vardb.move_slot_ent(update_cmd)
8926 writemsg_stdout(moves * "s")
8928 moves = bindb.move_slot_ent(update_cmd)
8930 writemsg_stdout(moves * "S")
8932 # The above global updates proceed quickly, so they
8933 # are considered a single mtimedb transaction.
8934 if len(timestamps) > 0:
8935 # We do not update the mtime in the mtimedb
8936 # until after _all_ of the above updates have
8937 # been processed because the mtimedb will
8938 # automatically commit when killed by ctrl C.
8939 for mykey, mtime in timestamps.items():
8940 prev_mtimes[mykey] = mtime
8942 # We gotta do the brute force updates for these now.
8943 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8944 "fixpackages" in mysettings.features:
8945 def onUpdate(maxval, curval):
8947 writemsg_stdout("#")
8948 vardb.update_ents(myupd, onUpdate=onUpdate)
8950 def onUpdate(maxval, curval):
8952 writemsg_stdout("*")
8953 bindb.update_ents(myupd, onUpdate=onUpdate)
8955 do_upgrade_packagesmessage = 1
8957 # Update progress above is indicated by characters written to stdout so
8958 # we print a couple new lines here to separate the progress output from
8963 if do_upgrade_packagesmessage and bindb and \
8965 writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
8966 writemsg_stdout(bold(_("Note: This can take a very long time.")))
8967 writemsg_stdout("\n")
8971 #continue setting up other trees
8973 class MtimeDB(dict):
8974 def __init__(self, filename):
8976 self.filename = filename
8977 self._load(filename)
8979 def _load(self, filename):
8981 f = open(_unicode_encode(filename), 'rb')
8982 mypickle = pickle.Unpickler(f)
8984 mypickle.find_global = None
8985 except AttributeError:
8986 # TODO: If py3k, override Unpickler.find_class().
8991 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
8992 if isinstance(e, pickle.UnpicklingError):
8993 writemsg(_("!!! Error loading '%s': %s\n") % \
8994 (filename, str(e)), noiselevel=-1)
8999 d["updates"] = d["old"]
9004 d.setdefault("starttime", 0)
9005 d.setdefault("version", "")
9006 for k in ("info", "ldpath", "updates"):
9009 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
9010 "starttime", "updates", "version"))
9013 if k not in mtimedbkeys:
9014 writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
9017 self._clean_data = copy.deepcopy(d)
9020 if not self.filename:
9024 # Only commit if the internal state has changed.
9025 if d != self._clean_data:
9026 commit_mtimedb(mydict=d, filename=self.filename)
9027 self._clean_data = copy.deepcopy(d)
9029 def create_trees(config_root=None, target_root=None, trees=None):
9033 # clean up any existing portdbapi instances
9034 for myroot in trees:
9035 portdb = trees[myroot]["porttree"].dbapi
9036 portdb.close_caches()
9037 portdbapi.portdbapi_instances.remove(portdb)
9038 del trees[myroot]["porttree"], myroot, portdb
9040 settings = config(config_root=config_root, target_root=target_root,
9041 config_incrementals=portage.const.INCREMENTALS)
9044 myroots = [(settings["ROOT"], settings)]
9045 if settings["ROOT"] != "/":
9047 # When ROOT != "/" we only want overrides from the calling
9048 # environment to apply to the config that's associated
9049 # with ROOT != "/", so pass an empty dict for the env parameter.
9050 settings = config(config_root=None, target_root="/", env={})
9052 myroots.append((settings["ROOT"], settings))
9054 for myroot, mysettings in myroots:
9055 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
9056 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
9057 trees[myroot].addLazySingleton(
9058 "vartree", vartree, myroot, categories=mysettings.categories,
9059 settings=mysettings)
9060 trees[myroot].addLazySingleton("porttree",
9061 portagetree, myroot, settings=mysettings)
9062 trees[myroot].addLazySingleton("bintree",
9063 binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
9066 class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
9068 Instances of these serve as proxies to global variables
9069 that are initialized on demand.
9072 __slots__ = ('_name',)
9074 def __init__(self, name):
9075 proxy.objectproxy.ObjectProxy.__init__(self)
9076 object.__setattr__(self, '_name', name)
9078 def _get_target(self):
9079 init_legacy_globals()
9080 name = object.__getattribute__(self, '_name')
9081 return globals()[name]
9083 class _PortdbProxy(proxy.objectproxy.ObjectProxy):
9085 The portdb is initialized separately from the rest
9086 of the variables, since sometimes the other variables
9087 are needed while the portdb is not.
9092 def _get_target(self):
9093 init_legacy_globals()
9094 global db, portdb, root, _portdb_initialized
9095 if not _portdb_initialized:
9096 portdb = db[root]["porttree"].dbapi
9097 _portdb_initialized = True
9100 class _MtimedbProxy(proxy.objectproxy.ObjectProxy):
9102 The mtimedb is independent from the portdb and other globals.
9105 __slots__ = ('_name',)
9107 def __init__(self, name):
9108 proxy.objectproxy.ObjectProxy.__init__(self)
9109 object.__setattr__(self, '_name', name)
9111 def _get_target(self):
9112 global mtimedb, mtimedbfile, _mtimedb_initialized
9113 if not _mtimedb_initialized:
9114 mtimedbfile = os.path.join(os.path.sep,
9115 CACHE_PATH, "mtimedb")
9116 mtimedb = MtimeDB(mtimedbfile)
9117 _mtimedb_initialized = True
9118 name = object.__getattribute__(self, '_name')
9119 return globals()[name]
9121 _legacy_global_var_names = ("archlist", "db", "features",
9122 "groups", "mtimedb", "mtimedbfile", "pkglines",
9123 "portdb", "profiledir", "root", "selinux_enabled",
9124 "settings", "thirdpartymirrors", "usedefaults")
9126 def _disable_legacy_globals():
9128 This deletes the ObjectProxy instances that are used
9129 for lazy initialization of legacy global variables.
9130 The purpose of deleting them is to prevent new code
9131 from referencing these deprecated variables.
9133 global _legacy_global_var_names
9134 for k in _legacy_global_var_names:
9135 globals().pop(k, None)
9137 # Initialization of legacy globals. No functions/classes below this point
9138 # please! When the above functions and classes become independent of the
9139 # below global variables, it will be possible to make the below code
9140 # conditional on a backward compatibility flag (backward compatibility could
9141 # be disabled via an environment variable, for example). This will enable new
9142 # code that is aware of this flag to import portage without the unnecessary
9143 # overhead (and other issues!) of initializing the legacy globals.
9145 def init_legacy_globals():
9146 global _globals_initialized
9147 if _globals_initialized:
9149 _globals_initialized = True
9151 global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
9152 archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
9153 profiledir, flushmtimedb
9155 # Portage needs to ensure a sane umask for the files it creates.
9159 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
9160 kwargs[k] = os.environ.get(envvar, "/")
9162 global _initializing_globals
9163 _initializing_globals = True
9164 db = create_trees(**kwargs)
9165 del _initializing_globals
9167 settings = db["/"]["vartree"].settings
9171 settings = db[myroot]["vartree"].settings
9174 root = settings["ROOT"]
9175 output._init(config_root=settings['PORTAGE_CONFIGROOT'])
9177 # ========================================================================
9179 # These attributes should not be used
9180 # within Portage under any circumstances.
9181 # ========================================================================
9182 archlist = settings.archlist()
9183 features = settings.features
9184 groups = settings["ACCEPT_KEYWORDS"].split()
9185 pkglines = settings.packages
9186 selinux_enabled = settings.selinux_enabled()
9187 thirdpartymirrors = settings.thirdpartymirrors()
9188 usedefaults = settings.use_defs
9189 profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
9190 if not os.path.isdir(profiledir):
9192 def flushmtimedb(record):
9193 writemsg("portage.flushmtimedb() is DEPRECATED\n")
9194 # ========================================================================
9196 # These attributes should not be used
9197 # within Portage under any circumstances.
9198 # ========================================================================
9202 _mtimedb_initialized = False
9203 mtimedb = _MtimedbProxy("mtimedb")
9204 mtimedbfile = _MtimedbProxy("mtimedbfile")
9206 _portdb_initialized = False
9207 portdb = _PortdbProxy()
9209 _globals_initialized = False
9211 for k in ("db", "settings", "root", "selinux_enabled",
9212 "archlist", "features", "groups",
9213 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
9215 globals()[k] = _LegacyGlobalProxy(k)
9220 # ============================================================================
9221 # ============================================================================