33e1338ffb2cc5ff61c65d0e161202541314727c
[portage.git] / pym / portage / __init__.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2009 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 from __future__ import print_function
7
8 VERSION="$Rev$"[6:-2] + "-svn"
9
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
13
14 try:
15         import sys
16         import codecs
17         import copy
18         import errno
19         if not hasattr(errno, 'ESTALE'):
20                 # ESTALE may not be defined on some systems, such as interix.
21                 errno.ESTALE = -1
22         import logging
23         import re
24         import time
25         import types
26         try:
27                 import cPickle as pickle
28         except ImportError:
29                 import pickle
30
31         import stat
32         try:
33                 from subprocess import getstatusoutput as subprocess_getstatusoutput
34         except ImportError:
35                 from commands import getstatusoutput as subprocess_getstatusoutput
36         from time import sleep
37         from random import shuffle
38         from itertools import chain
39         import platform
40         import warnings
41
42         # Temporarily delete these imports, to ensure that only the
43         # wrapped versions are imported by portage internals.
44         import os
45         del os
46         import shutil
47         del shutil
48
49 except ImportError as e:
50         sys.stderr.write("\n\n")
51         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
52         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
54
55         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
57         sys.stderr.write("    "+str(e)+"\n\n");
58         raise
59
60 try:
61         from portage.cache.cache_errors import CacheError
62         import portage.proxy.lazyimport
63         import portage.proxy as proxy
64         proxy.lazyimport.lazyimport(globals(),
65                 'portage.checksum',
66                 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
67                 'portage.cvstree',
68                 'portage.data',
69                 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
70                         'uid,userland,userpriv_groups,wheelgid',
71                 'portage.dep',
72                 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
73                         'get_operator,isjustname,isspecific,isvalidatom,' + \
74                         'match_from_list,match_to_list',
75                 'portage.eclass_cache',
76                 'portage.env.loaders',
77                 'portage.exception',
78                 'portage.getbinpkg',
79                 'portage.locks',
80                 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
81                 'portage.mail',
82                 'portage.output',
83                 'portage.output:bold,colorize',
84                 'portage.process',
85                 'portage.process:atexit_register,run_exitfuncs',
86                 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
87                         'parse_updates,update_config_files,update_dbentries,' + \
88                         'update_dbentry',
89                 'portage.util',
90                 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
91                         'apply_recursive_permissions,dump_traceback,getconfig,' + \
92                         'grabdict,grabdict_package,grabfile,grabfile_package,' + \
93                         'map_dictlist_vals,new_protect_filename,normalize_path,' + \
94                         'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
95                         'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
96                         'writemsg_stdout,write_atomic',
97                 'portage.versions',
98                 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \
99                         'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
100                 'portage.xpak',
101         )
102
103         import portage.const
104         from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
105                 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
106                 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
107                 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
108                 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
109                 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
110                 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
111                 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
112
113         from portage.localization import _
114
115 except ImportError as e:
116         sys.stderr.write("\n\n")
117         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122         sys.stderr.write("!!! a recovery of portage.\n")
123         sys.stderr.write("    "+str(e)+"\n\n")
124         raise
125
126 if sys.hexversion >= 0x3000000:
127         basestring = str
128         long = int
129
130 # Assume utf_8 fs encoding everywhere except in merge code, where the
131 # user's locale is respected.
132 _encodings = {
133         'content'                : 'utf_8',
134         'fs'                     : 'utf_8',
135         'merge'                  : sys.getfilesystemencoding(),
136         'repo.content'           : 'utf_8',
137         'stdio'                  : 'utf_8',
138 }
139
140 # This can happen if python is built with USE=build (stage 1).
141 if _encodings['merge'] is None:
142         _encodings['merge'] = 'ascii'
143
144 if sys.hexversion >= 0x3000000:
145         def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
146                 if isinstance(s, str):
147                         s = s.encode(encoding, errors)
148                 return s
149
150         def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
151                 if isinstance(s, bytes):
152                         s = str(s, encoding=encoding, errors=errors)
153                 return s
154 else:
155         def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
156                 if isinstance(s, unicode):
157                         s = s.encode(encoding, errors)
158                 return s
159
160         def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
161                 if isinstance(s, bytes):
162                         s = unicode(s, encoding=encoding, errors=errors)
163                 return s
164
165 class _unicode_func_wrapper(object):
166         """
167         Wraps a function, converts arguments from unicode to bytes,
168         and return values to unicode from bytes. Function calls
169         will raise UnicodeEncodeError if an argument fails to be
170         encoded with the required encoding. Return values that
171         are single strings are decoded with errors='replace'. Return 
172         values that are lists of strings are decoded with errors='strict'
173         and elements that fail to be decoded are omitted from the returned
174         list.
175         """
176         __slots__ = ('_func', '_encoding')
177
178         def __init__(self, func, encoding=_encodings['fs']):
179                 self._func = func
180                 self._encoding = encoding
181
182         def __call__(self, *args, **kwargs):
183
184                 encoding = self._encoding
185                 wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
186                         for x in args]
187                 if kwargs:
188                         wrapped_kwargs = dict(
189                                 (k, _unicode_encode(v, encoding=encoding, errors='strict'))
190                                 for k, v in kwargs.items())
191                 else:
192                         wrapped_kwargs = {}
193
194                 rval = self._func(*wrapped_args, **wrapped_kwargs)
195
196                 if isinstance(rval, (list, tuple)):
197                         decoded_rval = []
198                         for x in rval:
199                                 try:
200                                         x = _unicode_decode(x, encoding=encoding, errors='strict')
201                                 except UnicodeDecodeError:
202                                         pass
203                                 else:
204                                         decoded_rval.append(x)
205
206                         if isinstance(rval, tuple):
207                                 rval = tuple(decoded_rval)
208                         else:
209                                 rval = decoded_rval
210                 else:
211                         rval = _unicode_decode(rval, encoding=encoding, errors='replace')
212
213                 return rval
214
215 class _unicode_module_wrapper(object):
216         """
217         Wraps a module and wraps all functions with _unicode_func_wrapper.
218         """
219         __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
220
221         def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
222                 object.__setattr__(self, '_mod', mod)
223                 object.__setattr__(self, '_encoding', encoding)
224                 object.__setattr__(self, '_overrides', overrides)
225                 if cache:
226                         cache = {}
227                 else:
228                         cache = None
229                 object.__setattr__(self, '_cache', cache)
230
231         def __getattribute__(self, attr):
232                 cache = object.__getattribute__(self, '_cache')
233                 if cache is not None:
234                         result = cache.get(attr)
235                         if result is not None:
236                                 return result
237                 result = getattr(object.__getattribute__(self, '_mod'), attr)
238                 encoding = object.__getattribute__(self, '_encoding')
239                 overrides = object.__getattribute__(self, '_overrides')
240                 override = None
241                 if overrides is not None:
242                         override = overrides.get(id(result))
243                 if override is not None:
244                         result = override
245                 elif isinstance(result, type):
246                         pass
247                 elif type(result) is types.ModuleType:
248                         result = _unicode_module_wrapper(result,
249                                 encoding=encoding, overrides=overrides)
250                 elif hasattr(result, '__call__'):
251                         result = _unicode_func_wrapper(result, encoding=encoding)
252                 if cache is not None:
253                         cache[attr] = result
254                 return result
255
256 import os as _os
257 _os_overrides = {
258         id(_os.fdopen)        : _os.fdopen,
259         id(_os.popen)         : _os.popen,
260         id(_os.read)          : _os.read,
261         id(_os.statvfs)       : _os.statvfs,
262         id(_os.system)        : _os.system,
263 }
264
265 os = _unicode_module_wrapper(_os, overrides=_os_overrides,
266         encoding=_encodings['fs'])
267 _os_merge = _unicode_module_wrapper(_os,
268         encoding=_encodings['merge'], overrides=_os_overrides)
269
270 import shutil as _shutil
271 shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
272
273 # Imports below this point rely on the above unicode wrapper definitions.
274 _selinux = None
275 selinux = None
276 _selinux_merge = None
277 try:
278         import portage._selinux
279         selinux = _unicode_module_wrapper(_selinux,
280                 encoding=_encodings['fs'])
281         _selinux_merge = _unicode_module_wrapper(_selinux,
282                 encoding=_encodings['merge'])
283 except OSError as e:
284         sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
285         del e
286 except ImportError:
287         pass
288
289 from portage.manifest import Manifest
290
291 # ===========================================================================
292 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
293 # ===========================================================================
294
295 def _gen_missing_encodings(missing_encodings):
296
297         encodings = {}
298
299         if 'ascii' in missing_encodings:
300
301                 class AsciiIncrementalEncoder(codecs.IncrementalEncoder):
302                         def encode(self, input, final=False):
303                                 return codecs.ascii_encode(input, self.errors)[0]
304
305                 class AsciiIncrementalDecoder(codecs.IncrementalDecoder):
306                         def decode(self, input, final=False):
307                                 return codecs.ascii_decode(input, self.errors)[0]
308
309                 class AsciiStreamWriter(codecs.StreamWriter):
310                         encode = codecs.ascii_encode
311
312                 class AsciiStreamReader(codecs.StreamReader):
313                         decode = codecs.ascii_decode
314
315                 codec_info =  codecs.CodecInfo(
316                         name='ascii',
317                         encode=codecs.ascii_encode,
318                         decode=codecs.ascii_decode,
319                         incrementalencoder=AsciiIncrementalEncoder,
320                         incrementaldecoder=AsciiIncrementalDecoder,
321                         streamwriter=AsciiStreamWriter,
322                         streamreader=AsciiStreamReader,
323                 )
324
325                 for alias in ('ascii', '646', 'ansi_x3.4_1968', 'ansi_x3_4_1968',
326                         'ansi_x3.4_1986', 'cp367', 'csascii', 'ibm367', 'iso646_us',
327                         'iso_646.irv_1991', 'iso_ir_6', 'us', 'us_ascii'):
328                         encodings[alias] = codec_info
329
330         if 'utf_8' in missing_encodings:
331
332                 def utf8decode(input, errors='strict'):
333                         return codecs.utf_8_decode(input, errors, True)
334
335                 class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
336                         def encode(self, input, final=False):
337                                 return codecs.utf_8_encode(input, self.errors)[0]
338
339                 class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
340                         _buffer_decode = codecs.utf_8_decode
341
342                 class Utf8StreamWriter(codecs.StreamWriter):
343                         encode = codecs.utf_8_encode
344
345                 class Utf8StreamReader(codecs.StreamReader):
346                         decode = codecs.utf_8_decode
347
348                 codec_info = codecs.CodecInfo(
349                         name='utf-8',
350                         encode=codecs.utf_8_encode,
351                         decode=utf8decode,
352                         incrementalencoder=Utf8IncrementalEncoder,
353                         incrementaldecoder=Utf8IncrementalDecoder,
354                         streamreader=Utf8StreamReader,
355                         streamwriter=Utf8StreamWriter,
356                 )
357
358                 for alias in ('utf_8', 'u8', 'utf', 'utf8', 'utf8_ucs2', 'utf8_ucs4'):
359                         encodings[alias] = codec_info
360
361         return encodings
362
363 def _ensure_default_encoding():
364         """
365         The python that's inside stage 1 or 2 is built with a minimal
366         configuration which does not include the /usr/lib/pythonX.Y/encodings
367         directory. This results in error like the following:
368
369           LookupError: no codec search functions registered: can't find encoding
370
371         In order to solve this problem, detect it early and manually register
372         a search function for the ascii and utf_8 codecs. Starting with python-3.0
373         this problem is more noticeable because of stricter handling of encoding
374         and decoding between strings of characters and bytes.
375         """
376
377         default_fallback = 'utf_8'
378         default_encoding = sys.getdefaultencoding().lower().replace('-', '_')
379         filesystem_encoding = _encodings['merge'].lower().replace('-', '_')
380         required_encodings = set(['ascii', 'utf_8'])
381         required_encodings.add(default_encoding)
382         required_encodings.add(filesystem_encoding)
383         missing_encodings = set()
384         for codec_name in required_encodings:
385                 try:
386                         codecs.lookup(codec_name)
387                 except LookupError:
388                         missing_encodings.add(codec_name)
389
390         if not missing_encodings:
391                 return
392
393         encodings = _gen_missing_encodings(missing_encodings)
394
395         if default_encoding in missing_encodings and \
396                 default_encoding not in encodings:
397                 # Make the fallback codec correspond to whatever name happens
398                 # to be returned by sys.getfilesystemencoding().
399
400                 try:
401                         encodings[default_encoding] = codecs.lookup(default_fallback)
402                 except LookupError:
403                         encodings[default_encoding] = encodings[default_fallback]
404
405         if filesystem_encoding in missing_encodings and \
406                 filesystem_encoding not in encodings:
407                 # Make the fallback codec correspond to whatever name happens
408                 # to be returned by sys.getdefaultencoding().
409
410                 try:
411                         encodings[filesystem_encoding] = codecs.lookup(default_fallback)
412                 except LookupError:
413                         encodings[filesystem_encoding] = encodings[default_fallback]
414
415         def search_function(name):
416                 name = name.lower()
417                 name = name.replace('-', '_')
418                 codec_info = encodings.get(name)
419                 if codec_info is not None:
420                         return codecs.CodecInfo(
421                                 name=codec_info.name,
422                                 encode=codec_info.encode,
423                                 decode=codec_info.decode,
424                                 incrementalencoder=codec_info.incrementalencoder,
425                                 incrementaldecoder=codec_info.incrementaldecoder,
426                                 streamreader=codec_info.streamreader,
427                                 streamwriter=codec_info.streamwriter,
428                         )
429                 return None
430
431         codecs.register(search_function)
432
433         del codec_name, default_encoding, default_fallback, \
434                 filesystem_encoding, missing_encodings, \
435                 required_encodings, search_function
436
437 # Do this ASAP since writemsg() might not work without it.
438 _ensure_default_encoding()
439
440 def _shell_quote(s):
441         """
442         Quote a string in double-quotes and use backslashes to
443         escape any backslashes, double-quotes, dollar signs, or
444         backquotes in the string.
445         """
446         for letter in "\\\"$`":
447                 if letter in s:
448                         s = s.replace(letter, "\\" + letter)
449         return "\"%s\"" % s
450
451 bsd_chflags = None
452
453 if platform.system() in ('FreeBSD',):
454
455         class bsd_chflags(object):
456
457                 @classmethod
458                 def chflags(cls, path, flags, opts=""):
459                         cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
460                         status, output = subprocess_getstatusoutput(cmd)
461                         if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
462                                 return
463                         # Try to generate an ENOENT error if appropriate.
464                         if 'h' in opts:
465                                 _os_merge.lstat(path)
466                         else:
467                                 _os_merge.stat(path)
468                         # Make sure the binary exists.
469                         if not portage.process.find_binary('chflags'):
470                                 raise portage.exception.CommandNotFound('chflags')
471                         # Now we're not sure exactly why it failed or what
472                         # the real errno was, so just report EPERM.
473                         e = OSError(errno.EPERM, output)
474                         e.errno = errno.EPERM
475                         e.filename = path
476                         e.message = output
477                         raise e
478
479                 @classmethod
480                 def lchflags(cls, path, flags):
481                         return cls.chflags(path, flags, opts='-h')
482
483 def load_mod(name):
484         modname = ".".join(name.split(".")[:-1])
485         mod = __import__(modname)
486         components = name.split('.')
487         for comp in components[1:]:
488                 mod = getattr(mod, comp)
489         return mod
490
491 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
492         for x in key_order:
493                 if x in top_dict and key in top_dict[x]:
494                         if FullCopy:
495                                 return copy.deepcopy(top_dict[x][key])
496                         else:
497                                 return top_dict[x][key]
498         if EmptyOnError:
499                 return ""
500         else:
501                 raise KeyError("Key not found in list; '%s'" % key)
502
503 def getcwd():
504         "this fixes situations where the current directory doesn't exist"
505         try:
506                 return os.getcwd()
507         except OSError: #dir doesn't exist
508                 os.chdir("/")
509                 return "/"
510 getcwd()
511
512 def abssymlink(symlink):
513         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
514         mylink=os.readlink(symlink)
515         if mylink[0] != '/':
516                 mydir=os.path.dirname(symlink)
517                 mylink=mydir+"/"+mylink
518         return os.path.normpath(mylink)
519
520 dircache = {}
521 cacheHit=0
522 cacheMiss=0
523 cacheStale=0
524 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
525         global cacheHit,cacheMiss,cacheStale
526         mypath = normalize_path(my_original_path)
527         if mypath in dircache:
528                 cacheHit += 1
529                 cached_mtime, list, ftype = dircache[mypath]
530         else:
531                 cacheMiss += 1
532                 cached_mtime, list, ftype = -1, [], []
533         try:
534                 pathstat = os.stat(mypath)
535                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
536                         mtime = pathstat.st_mtime
537                 else:
538                         raise portage.exception.DirectoryNotFound(mypath)
539         except EnvironmentError as e:
540                 if e.errno == portage.exception.PermissionDenied.errno:
541                         raise portage.exception.PermissionDenied(mypath)
542                 del e
543                 if EmptyOnError:
544                         return [], []
545                 return None, None
546         except portage.exception.PortageException:
547                 if EmptyOnError:
548                         return [], []
549                 return None, None
550         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
551         if mtime != cached_mtime or time.time() - mtime < 4:
552                 if mypath in dircache:
553                         cacheStale += 1
554                 try:
555                         list = os.listdir(mypath)
556                 except EnvironmentError as e:
557                         if e.errno != errno.EACCES:
558                                 raise
559                         del e
560                         raise portage.exception.PermissionDenied(mypath)
561                 ftype = []
562                 for x in list:
563                         try:
564                                 if followSymlinks:
565                                         pathstat = os.stat(mypath+"/"+x)
566                                 else:
567                                         pathstat = os.lstat(mypath+"/"+x)
568
569                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
570                                         ftype.append(0)
571                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
572                                         ftype.append(1)
573                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
574                                         ftype.append(2)
575                                 else:
576                                         ftype.append(3)
577                         except (IOError, OSError):
578                                 ftype.append(3)
579                 dircache[mypath] = mtime, list, ftype
580
581         ret_list = []
582         ret_ftype = []
583         for x in range(0, len(list)):
584                 if list[x] in ignorelist:
585                         pass
586                 elif ignorecvs:
587                         if list[x][:2] != ".#":
588                                 ret_list.append(list[x])
589                                 ret_ftype.append(ftype[x])
590                 else:
591                         ret_list.append(list[x])
592                         ret_ftype.append(ftype[x])
593
594         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
595         return ret_list, ret_ftype
596
597 _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git')
598
599 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
600         EmptyOnError=False, dirsonly=False):
601         """
602         Portage-specific implementation of os.listdir
603
604         @param mypath: Path whose contents you wish to list
605         @type mypath: String
606         @param recursive: Recursively scan directories contained within mypath
607         @type recursive: Boolean
608         @param filesonly; Only return files, not more directories
609         @type filesonly: Boolean
610         @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
611         @type ignorecvs: Boolean
612         @param ignorelist: List of filenames/directories to exclude
613         @type ignorelist: List
614         @param followSymlinks: Follow Symlink'd files and directories
615         @type followSymlinks: Boolean
616         @param EmptyOnError: Return [] if an error occurs.
617         @type EmptyOnError: Boolean
618         @param dirsonly: Only return directories.
619         @type dirsonly: Boolean
620         @rtype: List
621         @returns: A list of files and directories (or just files or just directories) or an empty list.
622         """
623
624         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
625
626         if list is None:
627                 list=[]
628         if ftype is None:
629                 ftype=[]
630
631         if not (filesonly or dirsonly or recursive):
632                 return list
633
634         if recursive:
635                 x=0
636                 while x<len(ftype):
637                         if ftype[x] == 1 and not \
638                                 (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs):
639                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
640                                         followSymlinks)
641
642                                 l=l[:]
643                                 for y in range(0,len(l)):
644                                         l[y]=list[x]+"/"+l[y]
645                                 list=list+l
646                                 ftype=ftype+f
647                         x+=1
648         if filesonly:
649                 rlist=[]
650                 for x in range(0,len(ftype)):
651                         if ftype[x]==0:
652                                 rlist=rlist+[list[x]]
653         elif dirsonly:
654                 rlist = []
655                 for x in range(0, len(ftype)):
656                         if ftype[x] == 1:
657                                 rlist = rlist + [list[x]]       
658         else:
659                 rlist=list
660
661         return rlist
662
663 def flatten(mytokens):
664         """this function now turns a [1,[2,3]] list into
665         a [1,2,3] list and returns it."""
666         newlist=[]
667         for x in mytokens:
668                 if isinstance(x, list):
669                         newlist.extend(flatten(x))
670                 else:
671                         newlist.append(x)
672         return newlist
673
674 #beautiful directed graph object
675
676 class digraph(object):
677         def __init__(self):
678                 """Create an empty digraph"""
679                 
680                 # { node : ( { child : priority } , { parent : priority } ) }
681                 self.nodes = {}
682                 self.order = []
683
684         def add(self, node, parent, priority=0):
685                 """Adds the specified node with the specified parent.
686                 
687                 If the dep is a soft-dep and the node already has a hard
688                 relationship to the parent, the relationship is left as hard."""
689                 
690                 if node not in self.nodes:
691                         self.nodes[node] = ({}, {}, node)
692                         self.order.append(node)
693                 
694                 if not parent:
695                         return
696                 
697                 if parent not in self.nodes:
698                         self.nodes[parent] = ({}, {}, parent)
699                         self.order.append(parent)
700
701                 priorities = self.nodes[node][1].get(parent)
702                 if priorities is None:
703                         priorities = []
704                         self.nodes[node][1][parent] = priorities
705                         self.nodes[parent][0][node] = priorities
706                 priorities.append(priority)
707                 priorities.sort()
708
709         def remove(self, node):
710                 """Removes the specified node from the digraph, also removing
711                 and ties to other nodes in the digraph. Raises KeyError if the
712                 node doesn't exist."""
713                 
714                 if node not in self.nodes:
715                         raise KeyError(node)
716                 
717                 for parent in self.nodes[node][1]:
718                         del self.nodes[parent][0][node]
719                 for child in self.nodes[node][0]:
720                         del self.nodes[child][1][node]
721                 
722                 del self.nodes[node]
723                 self.order.remove(node)
724
725         def difference_update(self, t):
726                 """
727                 Remove all given nodes from node_set. This is more efficient
728                 than multiple calls to the remove() method.
729                 """
730                 if isinstance(t, (list, tuple)) or \
731                         not hasattr(t, "__contains__"):
732                         t = frozenset(t)
733                 order = []
734                 for node in self.order:
735                         if node not in t:
736                                 order.append(node)
737                                 continue
738                         for parent in self.nodes[node][1]:
739                                 del self.nodes[parent][0][node]
740                         for child in self.nodes[node][0]:
741                                 del self.nodes[child][1][node]
742                         del self.nodes[node]
743                 self.order = order
744
745         def remove_edge(self, child, parent):
746                 """
747                 Remove edge in the direction from child to parent. Note that it is
748                 possible for a remaining edge to exist in the opposite direction.
749                 Any endpoint vertices that become isolated will remain in the graph.
750                 """
751
752                 # Nothing should be modified when a KeyError is raised.
753                 for k in parent, child:
754                         if k not in self.nodes:
755                                 raise KeyError(k)
756
757                 # Make sure the edge exists.
758                 if child not in self.nodes[parent][0]:
759                         raise KeyError(child)
760                 if parent not in self.nodes[child][1]:
761                         raise KeyError(parent)
762
763                 # Remove the edge.
764                 del self.nodes[child][1][parent]
765                 del self.nodes[parent][0][child]
766
767         def __iter__(self):
768                 return iter(self.order)
769
770         def contains(self, node):
771                 """Checks if the digraph contains mynode"""
772                 return node in self.nodes
773
774         def get(self, key, default=None):
775                 node_data = self.nodes.get(key, self)
776                 if node_data is self:
777                         return default
778                 return node_data[2]
779
780         def all_nodes(self):
781                 """Return a list of all nodes in the graph"""
782                 return self.order[:]
783
784         def child_nodes(self, node, ignore_priority=None):
785                 """Return all children of the specified node"""
786                 if ignore_priority is None:
787                         return list(self.nodes[node][0])
788                 children = []
789                 if hasattr(ignore_priority, '__call__'):
790                         for child, priorities in self.nodes[node][0].items():
791                                 for priority in priorities:
792                                         if not ignore_priority(priority):
793                                                 children.append(child)
794                                                 break
795                 else:
796                         for child, priorities in self.nodes[node][0].items():
797                                 if ignore_priority < priorities[-1]:
798                                         children.append(child)
799                 return children
800
801         def parent_nodes(self, node, ignore_priority=None):
802                 """Return all parents of the specified node"""
803                 if ignore_priority is None:
804                         return list(self.nodes[node][1])
805                 parents = []
806                 if hasattr(ignore_priority, '__call__'):
807                         for parent, priorities in self.nodes[node][1].items():
808                                 for priority in priorities:
809                                         if not ignore_priority(priority):
810                                                 parents.append(parent)
811                                                 break
812                 else:
813                         for parent, priorities in self.nodes[node][1].items():
814                                 if ignore_priority < priorities[-1]:
815                                         parents.append(parent)
816                 return parents
817
818         def leaf_nodes(self, ignore_priority=None):
819                 """Return all nodes that have no children
820                 
821                 If ignore_soft_deps is True, soft deps are not counted as
822                 children in calculations."""
823                 
824                 leaf_nodes = []
825                 if ignore_priority is None:
826                         for node in self.order:
827                                 if not self.nodes[node][0]:
828                                         leaf_nodes.append(node)
829                 elif hasattr(ignore_priority, '__call__'):
830                         for node in self.order:
831                                 is_leaf_node = True
832                                 for child, priorities in self.nodes[node][0].items():
833                                         for priority in priorities:
834                                                 if not ignore_priority(priority):
835                                                         is_leaf_node = False
836                                                         break
837                                         if not is_leaf_node:
838                                                 break
839                                 if is_leaf_node:
840                                         leaf_nodes.append(node)
841                 else:
842                         for node in self.order:
843                                 is_leaf_node = True
844                                 for child, priorities in self.nodes[node][0].items():
845                                         if ignore_priority < priorities[-1]:
846                                                 is_leaf_node = False
847                                                 break
848                                 if is_leaf_node:
849                                         leaf_nodes.append(node)
850                 return leaf_nodes
851
852         def root_nodes(self, ignore_priority=None):
853                 """Return all nodes that have no parents.
854                 
855                 If ignore_soft_deps is True, soft deps are not counted as
856                 parents in calculations."""
857                 
858                 root_nodes = []
859                 if ignore_priority is None:
860                         for node in self.order:
861                                 if not self.nodes[node][1]:
862                                         root_nodes.append(node)
863                 elif hasattr(ignore_priority, '__call__'):
864                         for node in self.order:
865                                 is_root_node = True
866                                 for parent, priorities in self.nodes[node][1].items():
867                                         for priority in priorities:
868                                                 if not ignore_priority(priority):
869                                                         is_root_node = False
870                                                         break
871                                         if not is_root_node:
872                                                 break
873                                 if is_root_node:
874                                         root_nodes.append(node)
875                 else:
876                         for node in self.order:
877                                 is_root_node = True
878                                 for parent, priorities in self.nodes[node][1].items():
879                                         if ignore_priority < priorities[-1]:
880                                                 is_root_node = False
881                                                 break
882                                 if is_root_node:
883                                         root_nodes.append(node)
884                 return root_nodes
885
886         def is_empty(self):
887                 """Checks if the digraph is empty"""
888                 return len(self.nodes) == 0
889
890         def clone(self):
891                 clone = digraph()
892                 clone.nodes = {}
893                 memo = {}
894                 for children, parents, node in self.nodes.values():
895                         children_clone = {}
896                         for child, priorities in children.items():
897                                 priorities_clone = memo.get(id(priorities))
898                                 if priorities_clone is None:
899                                         priorities_clone = priorities[:]
900                                         memo[id(priorities)] = priorities_clone
901                                 children_clone[child] = priorities_clone
902                         parents_clone = {}
903                         for parent, priorities in parents.items():
904                                 priorities_clone = memo.get(id(priorities))
905                                 if priorities_clone is None:
906                                         priorities_clone = priorities[:]
907                                         memo[id(priorities)] = priorities_clone
908                                 parents_clone[parent] = priorities_clone
909                         clone.nodes[node] = (children_clone, parents_clone, node)
910                 clone.order = self.order[:]
911                 return clone
912
913         # Backward compatibility
914         addnode = add
915         allnodes = all_nodes
916         allzeros = leaf_nodes
917         hasnode = contains
918         __contains__ = contains
919         empty = is_empty
920         copy = clone
921
922         def delnode(self, node):
923                 try:
924                         self.remove(node)
925                 except KeyError:
926                         pass
927
928         def firstzero(self):
929                 leaf_nodes = self.leaf_nodes()
930                 if leaf_nodes:
931                         return leaf_nodes[0]
932                 return None
933
934         def hasallzeros(self, ignore_priority=None):
935                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
936                         len(self.order)
937
938         def debug_print(self):
939                 def output(s):
940                         writemsg(s, noiselevel=-1)
941                 for node in self.nodes:
942                         output("%s " % (node,))
943                         if self.nodes[node][0]:
944                                 output("depends on\n")
945                         else:
946                                 output("(no children)\n")
947                         for child, priorities in self.nodes[node][0].items():
948                                 output("  %s (%s)\n" % (child, priorities[-1],))
949
950 #parse /etc/env.d and generate /etc/profile.env
951
952 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
953         env=None, writemsg_level=None):
954         if writemsg_level is None:
955                 writemsg_level = portage.util.writemsg_level
956         if target_root is None:
957                 global settings
958                 target_root = settings["ROOT"]
959         if prev_mtimes is None:
960                 global mtimedb
961                 prev_mtimes = mtimedb["ldpath"]
962         if env is None:
963                 env = os.environ
964         envd_dir = os.path.join(target_root, "etc", "env.d")
965         portage.util.ensure_dirs(envd_dir, mode=0o755)
966         fns = listdir(envd_dir, EmptyOnError=1)
967         fns.sort()
968         templist = []
969         for x in fns:
970                 if len(x) < 3:
971                         continue
972                 if not x[0].isdigit() or not x[1].isdigit():
973                         continue
974                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
975                         continue
976                 templist.append(x)
977         fns = templist
978         del templist
979
980         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
981         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
982                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
983                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
984                   "PYTHONPATH", "ROOTPATH"])
985
986         config_list = []
987
988         for x in fns:
989                 file_path = os.path.join(envd_dir, x)
990                 try:
991                         myconfig = getconfig(file_path, expand=False)
992                 except portage.exception.ParseError as e:
993                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
994                         del e
995                         continue
996                 if myconfig is None:
997                         # broken symlink or file removed by a concurrent process
998                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
999                         continue
1000
1001                 config_list.append(myconfig)
1002                 if "SPACE_SEPARATED" in myconfig:
1003                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
1004                         del myconfig["SPACE_SEPARATED"]
1005                 if "COLON_SEPARATED" in myconfig:
1006                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
1007                         del myconfig["COLON_SEPARATED"]
1008
1009         env = {}
1010         specials = {}
1011         for var in space_separated:
1012                 mylist = []
1013                 for myconfig in config_list:
1014                         if var in myconfig:
1015                                 for item in myconfig[var].split():
1016                                         if item and not item in mylist:
1017                                                 mylist.append(item)
1018                                 del myconfig[var] # prepare for env.update(myconfig)
1019                 if mylist:
1020                         env[var] = " ".join(mylist)
1021                 specials[var] = mylist
1022
1023         for var in colon_separated:
1024                 mylist = []
1025                 for myconfig in config_list:
1026                         if var in myconfig:
1027                                 for item in myconfig[var].split(":"):
1028                                         if item and not item in mylist:
1029                                                 mylist.append(item)
1030                                 del myconfig[var] # prepare for env.update(myconfig)
1031                 if mylist:
1032                         env[var] = ":".join(mylist)
1033                 specials[var] = mylist
1034
1035         for myconfig in config_list:
1036                 """Cumulative variables have already been deleted from myconfig so that
1037                 they won't be overwritten by this dict.update call."""
1038                 env.update(myconfig)
1039
1040         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
1041         try:
1042                 myld = codecs.open(_unicode_encode(ldsoconf_path,
1043                         encoding=_encodings['fs'], errors='strict'),
1044                         mode='r', encoding=_encodings['content'], errors='replace')
1045                 myldlines=myld.readlines()
1046                 myld.close()
1047                 oldld=[]
1048                 for x in myldlines:
1049                         #each line has at least one char (a newline)
1050                         if x[0]=="#":
1051                                 continue
1052                         oldld.append(x[:-1])
1053         except (IOError, OSError) as e:
1054                 if e.errno != errno.ENOENT:
1055                         raise
1056                 oldld = None
1057
1058         ld_cache_update=False
1059
1060         newld = specials["LDPATH"]
1061         if (oldld!=newld):
1062                 #ld.so.conf needs updating and ldconfig needs to be run
1063                 myfd = atomic_ofstream(ldsoconf_path)
1064                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
1065                 myfd.write("# contents of /etc/env.d directory\n")
1066                 for x in specials["LDPATH"]:
1067                         myfd.write(x+"\n")
1068                 myfd.close()
1069                 ld_cache_update=True
1070
1071         # Update prelink.conf if we are prelink-enabled
1072         if prelink_capable:
1073                 newprelink = atomic_ofstream(
1074                         os.path.join(target_root, "etc", "prelink.conf"))
1075                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
1076                 newprelink.write("# contents of /etc/env.d directory\n")
1077
1078                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
1079                         newprelink.write("-l "+x+"\n");
1080                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
1081                         if not x:
1082                                 continue
1083                         if x[-1]!='/':
1084                                 x=x+"/"
1085                         plmasked=0
1086                         for y in specials["PRELINK_PATH_MASK"]:
1087                                 if not y:
1088                                         continue
1089                                 if y[-1]!='/':
1090                                         y=y+"/"
1091                                 if y==x[0:len(y)]:
1092                                         plmasked=1
1093                                         break
1094                         if not plmasked:
1095                                 newprelink.write("-h "+x+"\n")
1096                 for x in specials["PRELINK_PATH_MASK"]:
1097                         newprelink.write("-b "+x+"\n")
1098                 newprelink.close()
1099
1100         # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
1101         # granularity is possible.  In order to avoid the potential ambiguity of
1102         # mtimes that differ by less than 1 second, sleep here if any of the
1103         # directories have been modified during the current second.
1104         sleep_for_mtime_granularity = False
1105         current_time = long(time.time())
1106         mtime_changed = False
1107         lib_dirs = set()
1108         for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
1109                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
1110                 try:
1111                         newldpathtime = long(os.stat(x).st_mtime)
1112                         lib_dirs.add(normalize_path(x))
1113                 except OSError as oe:
1114                         if oe.errno == errno.ENOENT:
1115                                 try:
1116                                         del prev_mtimes[x]
1117                                 except KeyError:
1118                                         pass
1119                                 # ignore this path because it doesn't exist
1120                                 continue
1121                         raise
1122                 if newldpathtime == current_time:
1123                         sleep_for_mtime_granularity = True
1124                 if x in prev_mtimes:
1125                         if prev_mtimes[x] == newldpathtime:
1126                                 pass
1127                         else:
1128                                 prev_mtimes[x] = newldpathtime
1129                                 mtime_changed = True
1130                 else:
1131                         prev_mtimes[x] = newldpathtime
1132                         mtime_changed = True
1133
1134         if mtime_changed:
1135                 ld_cache_update = True
1136
1137         if makelinks and \
1138                 not ld_cache_update and \
1139                 contents is not None:
1140                 libdir_contents_changed = False
1141                 for mypath, mydata in contents.items():
1142                         if mydata[0] not in ("obj","sym"):
1143                                 continue
1144                         head, tail = os.path.split(mypath)
1145                         if head in lib_dirs:
1146                                 libdir_contents_changed = True
1147                                 break
1148                 if not libdir_contents_changed:
1149                         makelinks = False
1150
1151         ldconfig = "/sbin/ldconfig"
1152         if "CHOST" in env and "CBUILD" in env and \
1153                 env["CHOST"] != env["CBUILD"]:
1154                 from portage.process import find_binary
1155                 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
1156
1157         # Only run ldconfig as needed
1158         if (ld_cache_update or makelinks) and ldconfig:
1159                 # ldconfig has very different behaviour between FreeBSD and Linux
1160                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
1161                         # We can't update links if we haven't cleaned other versions first, as
1162                         # an older package installed ON TOP of a newer version will cause ldconfig
1163                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
1164                         # we can safely create links.
1165                         writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
1166                                 (target_root,))
1167                         if makelinks:
1168                                 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
1169                         else:
1170                                 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
1171                 elif ostype in ("FreeBSD","DragonFly"):
1172                         writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
1173                                 target_root)
1174                         os.system(("cd / ; %s -elf -i " + \
1175                                 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
1176                                 (ldconfig, target_root, target_root))
1177
1178         del specials["LDPATH"]
1179
1180         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
1181         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
1182         cenvnotice  = penvnotice[:]
1183         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
1184         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
1185
1186         #create /etc/profile.env for bash support
1187         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
1188         outfile.write(penvnotice)
1189
1190         env_keys = [ x for x in env if x != "LDPATH" ]
1191         env_keys.sort()
1192         for k in env_keys:
1193                 v = env[k]
1194                 if v.startswith('$') and not v.startswith('${'):
1195                         outfile.write("export %s=$'%s'\n" % (k, v[1:]))
1196                 else:
1197                         outfile.write("export %s='%s'\n" % (k, v))
1198         outfile.close()
1199
1200         #create /etc/csh.env for (t)csh support
1201         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
1202         outfile.write(cenvnotice)
1203         for x in env_keys:
1204                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
1205         outfile.close()
1206
1207         if sleep_for_mtime_granularity:
1208                 while current_time == long(time.time()):
1209                         sleep(1)
1210
1211 def ExtractKernelVersion(base_dir):
1212         """
1213         Try to figure out what kernel version we are running
1214         @param base_dir: Path to sources (usually /usr/src/linux)
1215         @type base_dir: string
1216         @rtype: tuple( version[string], error[string])
1217         @returns:
1218         1. tuple( version[string], error[string])
1219         Either version or error is populated (but never both)
1220
1221         """
1222         lines = []
1223         pathname = os.path.join(base_dir, 'Makefile')
1224         try:
1225                 f = codecs.open(_unicode_encode(pathname,
1226                         encoding=_encodings['fs'], errors='strict'), mode='r',
1227                         encoding=_encodings['content'], errors='replace')
1228         except OSError as details:
1229                 return (None, str(details))
1230         except IOError as details:
1231                 return (None, str(details))
1232
1233         try:
1234                 for i in range(4):
1235                         lines.append(f.readline())
1236         except OSError as details:
1237                 return (None, str(details))
1238         except IOError as details:
1239                 return (None, str(details))
1240
1241         lines = [l.strip() for l in lines]
1242
1243         version = ''
1244
1245         #XXX: The following code relies on the ordering of vars within the Makefile
1246         for line in lines:
1247                 # split on the '=' then remove annoying whitespace
1248                 items = line.split("=")
1249                 items = [i.strip() for i in items]
1250                 if items[0] == 'VERSION' or \
1251                         items[0] == 'PATCHLEVEL':
1252                         version += items[1]
1253                         version += "."
1254                 elif items[0] == 'SUBLEVEL':
1255                         version += items[1]
1256                 elif items[0] == 'EXTRAVERSION' and \
1257                         items[-1] != items[0]:
1258                         version += items[1]
1259
1260         # Grab a list of files named localversion* and sort them
1261         localversions = os.listdir(base_dir)
1262         for x in range(len(localversions)-1,-1,-1):
1263                 if localversions[x][:12] != "localversion":
1264                         del localversions[x]
1265         localversions.sort()
1266
1267         # Append the contents of each to the version string, stripping ALL whitespace
1268         for lv in localversions:
1269                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
1270
1271         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
1272         kernelconfig = getconfig(base_dir+"/.config")
1273         if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
1274                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
1275
1276         return (version,None)
1277
1278 def autouse(myvartree, use_cache=1, mysettings=None):
1279         """
1280         autuse returns a list of USE variables auto-enabled to packages being installed
1281
1282         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
1283         @type myvartree: vartree
1284         @param use_cache: read values from cache
1285         @type use_cache: Boolean
1286         @param mysettings: Instance of config
1287         @type mysettings: config
1288         @rtype: string
1289         @returns: A string containing a list of USE variables that are enabled via use.defaults
1290         """
1291         if mysettings is None:
1292                 global settings
1293                 mysettings = settings
1294         if mysettings.profile_path is None:
1295                 return ""
1296         myusevars=""
1297         usedefaults = mysettings.use_defs
1298         for myuse in usedefaults:
1299                 dep_met = True
1300                 for mydep in usedefaults[myuse]:
1301                         if not myvartree.dep_match(mydep,use_cache=True):
1302                                 dep_met = False
1303                                 break
1304                 if dep_met:
1305                         myusevars += " "+myuse
1306         return myusevars
1307
1308 def check_config_instance(test):
1309         if not isinstance(test, config):
1310                 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
1311
1312 def _lazy_iuse_regex(iuse_implicit):
1313         """
1314         The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
1315         and the value is only used when an ebuild phase needs to be executed
1316         (it's used only to generate QA notices).
1317         """
1318         # Escape anything except ".*" which is supposed to pass through from
1319         # _get_implicit_iuse().
1320         regex = sorted(re.escape(x) for x in iuse_implicit)
1321         regex = "^(%s)$" % "|".join(regex)
1322         regex = regex.replace("\\.\\*", ".*")
1323         return regex
1324
1325 class _local_repo_config(object):
1326         __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',)
1327         def __init__(self, name, repo_opts):
1328                 self.name = name
1329
1330                 aliases = repo_opts.get('aliases')
1331                 if aliases is not None:
1332                         aliases = tuple(aliases.split())
1333                 self.aliases = aliases
1334
1335                 eclass_overrides = repo_opts.get('eclass-overrides')
1336                 if eclass_overrides is not None:
1337                         eclass_overrides = tuple(eclass_overrides.split())
1338                 self.eclass_overrides = eclass_overrides
1339
1340                 masters = repo_opts.get('masters')
1341                 if masters is not None:
1342                         masters = tuple(masters.split())
1343                 self.masters = masters
1344
1345 class config(object):
1346         """
1347         This class encompasses the main portage configuration.  Data is pulled from
1348         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
1349         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
1350         overrides.
1351         
1352         Generally if you need data like USE flags, FEATURES, environment variables,
1353         virtuals ...etc you look in here.
1354         """
1355
1356         # Don't include anything that could be extremely long here (like SRC_URI)
1357         # since that could cause execve() calls to fail with E2BIG errors. For
1358         # example, see bug #262647.
1359         _setcpv_aux_keys = ('SLOT', 'RESTRICT', 'LICENSE',
1360                 'KEYWORDS',  'INHERITED', 'IUSE', 'PROVIDE', 'EAPI',
1361                 'PROPERTIES', 'DEFINED_PHASES', 'repository')
1362
1363         _env_blacklist = [
1364                 "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
1365                 "EBUILD_PHASE", "EMERGE_FROM", "HOMEPAGE", "INHERITED", "IUSE",
1366                 "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
1367                 "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
1368                 "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
1369                 "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
1370                 "ROOT", "SLOT", "SRC_URI"
1371         ]
1372
1373         _environ_whitelist = []
1374
1375         # Whitelisted variables are always allowed to enter the ebuild
1376         # environment. Generally, this only includes special portage
1377         # variables. Ebuilds can unset variables that are not whitelisted
1378         # and rely on them remaining unset for future phases, without them
1379         # leaking back in from various locations (bug #189417). It's very
1380         # important to set our special BASH_ENV variable in the ebuild
1381         # environment in order to prevent sandbox from sourcing /etc/profile
1382         # in it's bashrc (causing major leakage).
1383         _environ_whitelist += [
1384                 "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
1385                 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
1386                 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
1387                 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
1388                 "FEATURES", "FILESDIR", "HOME", "NOCOLOR", "PATH",
1389                 "PKGDIR",
1390                 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
1391                 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
1392                 "PORTAGE_BASHRC",
1393                 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
1394                 "PORTAGE_BINPKG_TMPFILE",
1395                 "PORTAGE_BIN_PATH",
1396                 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
1397                 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
1398                 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
1399                 "PORTAGE_IUSE",
1400                 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
1401                 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
1402                 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
1403                 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
1404                 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
1405                 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
1406                 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
1407                 "USE_EXPAND", "USE_ORDER", "WORKDIR",
1408                 "XARGS",
1409         ]
1410
1411         # user config variables
1412         _environ_whitelist += [
1413                 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
1414         ]
1415
1416         _environ_whitelist += [
1417                 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
1418         ]
1419
1420         # misc variables inherited from the calling environment
1421         _environ_whitelist += [
1422                 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
1423                 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
1424                 "TERM", "TERMCAP", "USER",
1425         ]
1426
1427         # other variables inherited from the calling environment
1428         _environ_whitelist += [
1429                 "CVS_RSH", "ECHANGELOG_USER",
1430                 "GPG_AGENT_INFO",
1431                 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
1432                 "STY", "WINDOW", "XAUTHORITY",
1433         ]
1434
1435         _environ_whitelist = frozenset(_environ_whitelist)
1436
1437         _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
1438
1439         # Filter selected variables in the config.environ() method so that
1440         # they don't needlessly propagate down into the ebuild environment.
1441         _environ_filter = []
1442
1443         # misc variables inherited from the calling environment
1444         _environ_filter += [
1445                 "INFOPATH", "MANPATH",
1446         ]
1447
1448         # variables that break bash
1449         _environ_filter += [
1450                 "HISTFILE", "POSIXLY_CORRECT",
1451         ]
1452
1453         # portage config variables and variables set directly by portage
1454         _environ_filter += [
1455                 "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
1456                 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1457                 "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
1458                 "EMERGE_LOG_DIR",
1459                 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1460                 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1461                 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1462                 "PORTAGE_BACKGROUND",
1463                 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1464                 "PORTAGE_ELOG_CLASSES",
1465                 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1466                 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1467                 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1468                 "PORTAGE_GPG_DIR",
1469                 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1470                 "PORTAGE_PACKAGE_EMPTY_ABORT",
1471                 "PORTAGE_REPO_DUPLICATE_WARN",
1472                 "PORTAGE_RO_DISTDIRS",
1473                 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1474                 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1475                 "QUICKPKG_DEFAULT_OPTS",
1476                 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1477                 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1478         ]
1479
1480         _environ_filter = frozenset(_environ_filter)
1481
1482         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1483                 config_incrementals=None, config_root=None, target_root=None,
1484                 local_config=True, env=None):
1485                 """
1486                 @param clone: If provided, init will use deepcopy to copy by value the instance.
1487                 @type clone: Instance of config class.
1488                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1489                 and then calling instance.setcpv(mycpv).
1490                 @type mycpv: String
1491                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1492                 @type config_profile_path: String
1493                 @param config_incrementals: List of incremental variables
1494                         (defaults to portage.const.INCREMENTALS)
1495                 @type config_incrementals: List
1496                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1497                 @type config_root: String
1498                 @param target_root: __init__ override of $ROOT env variable.
1499                 @type target_root: String
1500                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1501                 ignore local config (keywording and unmasking)
1502                 @type local_config: Boolean
1503                 @param env: The calling environment which is used to override settings.
1504                         Defaults to os.environ if unspecified.
1505                 @type env: dict
1506                 """
1507
1508                 # When initializing the global portage.settings instance, avoid
1509                 # raising exceptions whenever possible since exceptions thrown
1510                 # from 'import portage' or 'import portage.exceptions' statements
1511                 # can practically render the api unusable for api consumers.
1512                 tolerant = "_initializing_globals" in globals()
1513
1514                 self.already_in_regenerate = 0
1515
1516                 self.locked   = 0
1517                 self.mycpv    = None
1518                 self._setcpv_args_hash = None
1519                 self.puse     = []
1520                 self.modifiedkeys = []
1521                 self.uvlist = []
1522                 self._accept_chost_re = None
1523                 self._accept_license = None
1524                 self._accept_license_str = None
1525                 self._license_groups = {}
1526                 self._accept_properties = None
1527
1528                 self.virtuals = {}
1529                 self.virts_p = {}
1530                 self.dirVirtuals = None
1531                 self.v_count  = 0
1532
1533                 # Virtuals obtained from the vartree
1534                 self.treeVirtuals = {}
1535                 # Virtuals by user specification. Includes negatives.
1536                 self.userVirtuals = {}
1537                 # Virtual negatives from user specifications.
1538                 self.negVirtuals  = {}
1539                 # Virtuals added by the depgraph via self.setinst().
1540                 self._depgraphVirtuals = {}
1541
1542                 self.user_profile_dir = None
1543                 self.local_config = local_config
1544                 self._local_repo_configs = None
1545                 self._local_repo_conf_path = None
1546
1547                 if clone:
1548                         self.incrementals = copy.deepcopy(clone.incrementals)
1549                         self.profile_path = copy.deepcopy(clone.profile_path)
1550                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1551                         self.local_config = copy.deepcopy(clone.local_config)
1552                         self._local_repo_configs = \
1553                                 copy.deepcopy(clone._local_repo_configs)
1554                         self._local_repo_conf_path = \
1555                                 copy.deepcopy(clone._local_repo_conf_path)
1556
1557                         self.module_priority = copy.deepcopy(clone.module_priority)
1558                         self.modules         = copy.deepcopy(clone.modules)
1559
1560                         self.depcachedir = copy.deepcopy(clone.depcachedir)
1561
1562                         self.packages = copy.deepcopy(clone.packages)
1563                         self.virtuals = copy.deepcopy(clone.virtuals)
1564
1565                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1566                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1567                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1568                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
1569                         self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1570
1571                         self.use_defs = copy.deepcopy(clone.use_defs)
1572                         self.usemask  = copy.deepcopy(clone.usemask)
1573                         self.usemask_list = copy.deepcopy(clone.usemask_list)
1574                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1575                         self.useforce      = copy.deepcopy(clone.useforce)
1576                         self.useforce_list = copy.deepcopy(clone.useforce_list)
1577                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1578                         self.puse     = copy.deepcopy(clone.puse)
1579                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1580                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1581                         self.mycpv    = copy.deepcopy(clone.mycpv)
1582                         self._setcpv_args_hash = copy.deepcopy(clone._setcpv_args_hash)
1583
1584                         self.configdict = copy.deepcopy(clone.configdict)
1585                         self.configlist = [
1586                                 self.configdict['env.d'],
1587                                 self.configdict['pkginternal'],
1588                                 self.configdict['globals'],
1589                                 self.configdict['defaults'],
1590                                 self.configdict['conf'],
1591                                 self.configdict['pkg'],
1592                                 self.configdict['auto'],
1593                                 self.configdict['env'],
1594                         ]
1595                         self.lookuplist = self.configlist[:]
1596                         self.lookuplist.reverse()
1597                         self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
1598                         self.profiles = copy.deepcopy(clone.profiles)
1599                         self.backupenv  = self.configdict["backupenv"]
1600                         self.pusedict   = copy.deepcopy(clone.pusedict)
1601                         self.categories = copy.deepcopy(clone.categories)
1602                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1603                         self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1604                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1605                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1606                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1607                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1608                         self.features = copy.deepcopy(clone.features)
1609
1610                         self._accept_license = copy.deepcopy(clone._accept_license)
1611                         self._plicensedict = copy.deepcopy(clone._plicensedict)
1612                         self._license_groups = copy.deepcopy(clone._license_groups)
1613                         self._accept_properties = copy.deepcopy(clone._accept_properties)
1614                         self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
1615                 else:
1616
1617                         def check_var_directory(varname, var):
1618                                 if not os.path.isdir(var):
1619                                         writemsg(_("!!! Error: %s='%s' is not a directory. "
1620                                                 "Please correct this.\n") % (varname, var),
1621                                                 noiselevel=-1)
1622                                         raise portage.exception.DirectoryNotFound(var)
1623
1624                         if config_root is None:
1625                                 config_root = "/"
1626
1627                         config_root = normalize_path(os.path.abspath(
1628                                 config_root)).rstrip(os.path.sep) + os.path.sep
1629
1630                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
1631
1632                         self.depcachedir = DEPCACHE_PATH
1633
1634                         if not config_profile_path:
1635                                 config_profile_path = \
1636                                         os.path.join(config_root, PROFILE_PATH)
1637                                 if os.path.isdir(config_profile_path):
1638                                         self.profile_path = config_profile_path
1639                                 else:
1640                                         self.profile_path = None
1641                         else:
1642                                 self.profile_path = config_profile_path[:]
1643
1644                         if config_incrementals is None:
1645                                 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1646                         else:
1647                                 self.incrementals = copy.deepcopy(config_incrementals)
1648
1649                         self.module_priority    = ["user","default"]
1650                         self.modules            = {}
1651                         modules_loader = portage.env.loaders.KeyValuePairFileLoader(
1652                                 os.path.join(config_root, MODULES_FILE_PATH), None, None)
1653                         modules_dict, modules_errors = modules_loader.load()
1654                         self.modules["user"] = modules_dict
1655                         if self.modules["user"] is None:
1656                                 self.modules["user"] = {}
1657                         self.modules["default"] = {
1658                                 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1659                                 "portdbapi.auxdbmodule":  "portage.cache.flat_hash.database",
1660                         }
1661
1662                         self.usemask=[]
1663                         self.configlist=[]
1664
1665                         # back up our incremental variables:
1666                         self.configdict={}
1667                         self._use_expand_dict = {}
1668                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1669                         self.configlist.append({})
1670                         self.configdict["env.d"] = self.configlist[-1]
1671
1672                         self.configlist.append({})
1673                         self.configdict["pkginternal"] = self.configlist[-1]
1674
1675                         # The symlink might not exist or might not be a symlink.
1676                         if self.profile_path is None:
1677                                 self.profiles = []
1678                         else:
1679                                 self.profiles = []
1680                                 def addProfile(currentPath):
1681                                         parentsFile = os.path.join(currentPath, "parent")
1682                                         eapi_file = os.path.join(currentPath, "eapi")
1683                                         try:
1684                                                 eapi = codecs.open(_unicode_encode(eapi_file,
1685                                                         encoding=_encodings['fs'], errors='strict'),
1686                                                         mode='r', encoding=_encodings['content'], errors='replace'
1687                                                         ).readline().strip()
1688                                         except IOError:
1689                                                 pass
1690                                         else:
1691                                                 if not eapi_is_supported(eapi):
1692                                                         raise portage.exception.ParseError(_(
1693                                                                 "Profile contains unsupported "
1694                                                                 "EAPI '%s': '%s'") % \
1695                                                                 (eapi, os.path.realpath(eapi_file),))
1696                                         if os.path.exists(parentsFile):
1697                                                 parents = grabfile(parentsFile)
1698                                                 if not parents:
1699                                                         raise portage.exception.ParseError(
1700                                                                 _("Empty parent file: '%s'") % parentsFile)
1701                                                 for parentPath in parents:
1702                                                         parentPath = normalize_path(os.path.join(
1703                                                                 currentPath, parentPath))
1704                                                         if os.path.exists(parentPath):
1705                                                                 addProfile(parentPath)
1706                                                         else:
1707                                                                 raise portage.exception.ParseError(
1708                                                                         _("Parent '%s' not found: '%s'") %  \
1709                                                                         (parentPath, parentsFile))
1710                                         self.profiles.append(currentPath)
1711                                 try:
1712                                         addProfile(os.path.realpath(self.profile_path))
1713                                 except portage.exception.ParseError as e:
1714                                         writemsg(_("!!! Unable to parse profile: '%s'\n") % \
1715                                                 self.profile_path, noiselevel=-1)
1716                                         writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1717                                         del e
1718                                         self.profiles = []
1719                         if local_config and self.profiles:
1720                                 custom_prof = os.path.join(
1721                                         config_root, CUSTOM_PROFILE_PATH)
1722                                 if os.path.exists(custom_prof):
1723                                         self.user_profile_dir = custom_prof
1724                                         self.profiles.append(custom_prof)
1725                                 del custom_prof
1726
1727                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1728                         self.packages      = stack_lists(self.packages_list, incremental=1)
1729                         del self.packages_list
1730                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1731
1732                         # revmaskdict
1733                         self.prevmaskdict={}
1734                         for x in self.packages:
1735                                 # Negative atoms are filtered by the above stack_lists() call.
1736                                 if not isinstance(x, dep.Atom):
1737                                         x = dep.Atom(x.lstrip('*'))
1738                                 self.prevmaskdict.setdefault(x.cp, []).append(x)
1739
1740                         self._pkeywords_list = []
1741                         rawpkeywords = [grabdict_package(
1742                                 os.path.join(x, "package.keywords"), recursive=1) \
1743                                 for x in self.profiles]
1744                         for pkeyworddict in rawpkeywords:
1745                                 cpdict = {}
1746                                 for k, v in pkeyworddict.items():
1747                                         cpdict.setdefault(k.cp, {})[k] = v
1748                                 self._pkeywords_list.append(cpdict)
1749
1750                         # get profile-masked use flags -- INCREMENTAL Child over parent
1751                         self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1752                                 recursive=1) for x in self.profiles]
1753                         self.usemask  = set(stack_lists(
1754                                 self.usemask_list, incremental=True))
1755                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1756                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1757                         del use_defs_lists
1758
1759                         self.pusemask_list = []
1760                         rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1761                                 recursive=1) for x in self.profiles]
1762                         for pusemaskdict in rawpusemask:
1763                                 cpdict = {}
1764                                 for k, v in pusemaskdict.items():
1765                                         cpdict.setdefault(k.cp, {})[k] = v
1766                                 self.pusemask_list.append(cpdict)
1767                         del rawpusemask
1768
1769                         self.pkgprofileuse = []
1770                         rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1771                                 juststrings=True, recursive=1) for x in self.profiles]
1772                         for rawpusedict in rawprofileuse:
1773                                 cpdict = {}
1774                                 for k, v in rawpusedict.items():
1775                                         cpdict.setdefault(k.cp, {})[k] = v
1776                                 self.pkgprofileuse.append(cpdict)
1777                         del rawprofileuse
1778
1779                         self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1780                                 recursive=1) for x in self.profiles]
1781                         self.useforce  = set(stack_lists(
1782                                 self.useforce_list, incremental=True))
1783
1784                         self.puseforce_list = []
1785                         rawpuseforce = [grabdict_package(
1786                                 os.path.join(x, "package.use.force"), recursive=1) \
1787                                 for x in self.profiles]
1788                         for rawpusefdict in rawpuseforce:
1789                                 cpdict = {}
1790                                 for k, v in rawpusefdict.items():
1791                                         cpdict.setdefault(k.cp, {})[k] = v
1792                                 self.puseforce_list.append(cpdict)
1793                         del rawpuseforce
1794
1795                         make_conf = getconfig(
1796                                 os.path.join(config_root, MAKE_CONF_FILE),
1797                                 tolerant=tolerant, allow_sourcing=True)
1798                         if make_conf is None:
1799                                 make_conf = {}
1800
1801                         # Allow ROOT setting to come from make.conf if it's not overridden
1802                         # by the constructor argument (from the calling environment).
1803                         if target_root is None and "ROOT" in make_conf:
1804                                 target_root = make_conf["ROOT"]
1805                                 if not target_root.strip():
1806                                         target_root = None
1807                         if target_root is None:
1808                                 target_root = "/"
1809
1810                         target_root = normalize_path(os.path.abspath(
1811                                 target_root)).rstrip(os.path.sep) + os.path.sep
1812
1813                         portage.util.ensure_dirs(target_root)
1814                         check_var_directory("ROOT", target_root)
1815
1816                         # The expand_map is used for variable substitution
1817                         # in getconfig() calls, and the getconfig() calls
1818                         # update expand_map with the value of each variable
1819                         # assignment that occurs. Variable substitution occurs
1820                         # in the following order, which corresponds to the
1821                         # order of appearance in self.lookuplist:
1822                         #
1823                         #   * env.d
1824                         #   * make.globals
1825                         #   * make.defaults
1826                         #   * make.conf
1827                         #
1828                         # Notably absent is "env", since we want to avoid any
1829                         # interaction with the calling environment that might
1830                         # lead to unexpected results.
1831                         expand_map = {}
1832
1833                         env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1834                                 expand=expand_map)
1835                         # env_d will be None if profile.env doesn't exist.
1836                         if env_d:
1837                                 self.configdict["env.d"].update(env_d)
1838                                 expand_map.update(env_d)
1839
1840                         # backupenv is used for calculating incremental variables.
1841                         if env is None:
1842                                 env = os.environ
1843
1844                         # Avoid potential UnicodeDecodeError exceptions later.
1845                         env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
1846                                 for k, v in env.items())
1847
1848                         self.backupenv = env_unicode
1849
1850                         if env_d:
1851                                 # Remove duplicate values so they don't override updated
1852                                 # profile.env values later (profile.env is reloaded in each
1853                                 # call to self.regenerate).
1854                                 for k, v in env_d.items():
1855                                         try:
1856                                                 if self.backupenv[k] == v:
1857                                                         del self.backupenv[k]
1858                                         except KeyError:
1859                                                 pass
1860                                 del k, v
1861
1862                         self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1863
1864                         # make.globals should not be relative to config_root
1865                         # because it only contains constants.
1866                         for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1867                                 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1868                                         expand=expand_map)
1869                                 if self.mygcfg:
1870                                         break
1871
1872                         if self.mygcfg is None:
1873                                 self.mygcfg = {}
1874
1875                         self.configlist.append(self.mygcfg)
1876                         self.configdict["globals"]=self.configlist[-1]
1877
1878                         self.make_defaults_use = []
1879                         self.mygcfg = {}
1880                         if self.profiles:
1881                                 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1882                                         expand=expand_map) for x in self.profiles]
1883
1884                                 for cfg in mygcfg_dlists:
1885                                         if cfg:
1886                                                 self.make_defaults_use.append(cfg.get("USE", ""))
1887                                         else:
1888                                                 self.make_defaults_use.append("")
1889                                 self.mygcfg = stack_dicts(mygcfg_dlists,
1890                                         incrementals=portage.const.INCREMENTALS)
1891                                 if self.mygcfg is None:
1892                                         self.mygcfg = {}
1893                         self.configlist.append(self.mygcfg)
1894                         self.configdict["defaults"]=self.configlist[-1]
1895
1896                         self.mygcfg = getconfig(
1897                                 os.path.join(config_root, MAKE_CONF_FILE),
1898                                 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1899                         if self.mygcfg is None:
1900                                 self.mygcfg = {}
1901
1902                         # Don't allow the user to override certain variables in make.conf
1903                         profile_only_variables = self.configdict["defaults"].get(
1904                                 "PROFILE_ONLY_VARIABLES", "").split()
1905                         for k in profile_only_variables:
1906                                 self.mygcfg.pop(k, None)
1907
1908                         self.configlist.append(self.mygcfg)
1909                         self.configdict["conf"]=self.configlist[-1]
1910
1911                         self.configlist.append(util.LazyItemsDict())
1912                         self.configdict["pkg"]=self.configlist[-1]
1913
1914                         #auto-use:
1915                         self.configlist.append({})
1916                         self.configdict["auto"]=self.configlist[-1]
1917
1918                         self.configdict["backupenv"] = self.backupenv
1919
1920                         # Don't allow the user to override certain variables in the env
1921                         for k in profile_only_variables:
1922                                 self.backupenv.pop(k, None)
1923
1924                         self.configlist.append(self.configdict["env"])
1925
1926                         # make lookuplist for loading package.*
1927                         self.lookuplist=self.configlist[:]
1928                         self.lookuplist.reverse()
1929
1930                         # Blacklist vars that could interfere with portage internals.
1931                         for blacklisted in self._env_blacklist:
1932                                 for cfg in self.lookuplist:
1933                                         cfg.pop(blacklisted, None)
1934                                 self.backupenv.pop(blacklisted, None)
1935                         del blacklisted, cfg
1936
1937                         self["PORTAGE_CONFIGROOT"] = config_root
1938                         self.backup_changes("PORTAGE_CONFIGROOT")
1939                         self["ROOT"] = target_root
1940                         self.backup_changes("ROOT")
1941
1942                         self.pusedict = {}
1943                         self.pkeywordsdict = {}
1944                         self._plicensedict = {}
1945                         self._ppropertiesdict = {}
1946                         self.punmaskdict = {}
1947                         abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
1948
1949                         # locations for "categories" and "arch.list" files
1950                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1951                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1952                         pmask_locations.extend(self.profiles)
1953
1954                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1955                         special cases are needed here."""
1956                         overlay_profiles = []
1957                         for ov in self["PORTDIR_OVERLAY"].split():
1958                                 ov = normalize_path(ov)
1959                                 profiles_dir = os.path.join(ov, "profiles")
1960                                 if os.path.isdir(profiles_dir):
1961                                         overlay_profiles.append(profiles_dir)
1962                         locations += overlay_profiles
1963                         
1964                         pmask_locations.extend(overlay_profiles)
1965
1966                         if local_config:
1967                                 locations.append(abs_user_config)
1968                                 pmask_locations.append(abs_user_config)
1969                                 pusedict = grabdict_package(
1970                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1971                                 for k, v in pusedict.items():
1972                                         self.pusedict.setdefault(k.cp, {})[k] = v
1973
1974                                 #package.keywords
1975                                 pkgdict = grabdict_package(
1976                                         os.path.join(abs_user_config, "package.keywords"),
1977                                         recursive=1)
1978                                 for k, v in pkgdict.items():
1979                                         # default to ~arch if no specific keyword is given
1980                                         if not v:
1981                                                 mykeywordlist = []
1982                                                 if self.configdict["defaults"] and \
1983                                                         "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1984                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1985                                                 else:
1986                                                         groups = []
1987                                                 for keyword in groups:
1988                                                         if not keyword[0] in "~-":
1989                                                                 mykeywordlist.append("~"+keyword)
1990                                                 v = mykeywordlist
1991                                         self.pkeywordsdict.setdefault(k.cp, {})[k] = v
1992
1993                                 #package.license
1994                                 licdict = grabdict_package(os.path.join(
1995                                         abs_user_config, "package.license"), recursive=1)
1996                                 for k, v in licdict.items():
1997                                         cp = k.cp
1998                                         cp_dict = self._plicensedict.get(cp)
1999                                         if not cp_dict:
2000                                                 cp_dict = {}
2001                                                 self._plicensedict[cp] = cp_dict
2002                                         cp_dict[k] = self.expandLicenseTokens(v)
2003
2004                                 #package.properties
2005                                 propdict = grabdict_package(os.path.join(
2006                                         abs_user_config, "package.properties"), recursive=1)
2007                                 for k, v in propdict.items():
2008                                         cp = k.cp
2009                                         cp_dict = self._ppropertiesdict.get(cp)
2010                                         if not cp_dict:
2011                                                 cp_dict = {}
2012                                                 self._ppropertiesdict[cp] = cp_dict
2013                                         cp_dict[k] = v
2014
2015                                 self._local_repo_configs = {}
2016                                 self._local_repo_conf_path = \
2017                                         os.path.join(abs_user_config, 'repos.conf')
2018                                 try:
2019                                         from configparser import SafeConfigParser, ParsingError
2020                                 except ImportError:
2021                                         from ConfigParser import SafeConfigParser, ParsingError
2022                                 repo_conf_parser = SafeConfigParser()
2023                                 try:
2024                                         repo_conf_parser.readfp(
2025                                                 codecs.open(
2026                                                 _unicode_encode(self._local_repo_conf_path,
2027                                                 encoding=_encodings['fs'], errors='strict'),
2028                                                 mode='r', encoding=_encodings['content'], errors='replace')
2029                                         )
2030                                 except EnvironmentError as e:
2031                                         if e.errno != errno.ENOENT:
2032                                                 raise
2033                                         del e
2034                                 except ParsingError as e:
2035                                         portage.util.writemsg_level(
2036                                                 _("!!! Error parsing '%s': %s\n")  % \
2037                                                 (self._local_repo_conf_path, e),
2038                                                 level=logging.ERROR, noiselevel=-1)
2039                                         del e
2040                                 else:
2041                                         repo_defaults = repo_conf_parser.defaults()
2042                                         if repo_defaults:
2043                                                 self._local_repo_configs['DEFAULT'] = \
2044                                                         _local_repo_config('DEFAULT', repo_defaults)
2045                                         for repo_name in repo_conf_parser.sections():
2046                                                 repo_opts = repo_defaults.copy()
2047                                                 for opt_name in repo_conf_parser.options(repo_name):
2048                                                         repo_opts[opt_name] = \
2049                                                                 repo_conf_parser.get(repo_name, opt_name)
2050                                                 self._local_repo_configs[repo_name] = \
2051                                                         _local_repo_config(repo_name, repo_opts)
2052
2053                         #getting categories from an external file now
2054                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
2055                         self.categories = tuple(sorted(
2056                                 stack_lists(categories, incremental=1)))
2057                         del categories
2058
2059                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
2060                         archlist = stack_lists(archlist, incremental=1)
2061                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
2062
2063                         # package.mask and package.unmask
2064                         pkgmasklines = []
2065                         pkgunmasklines = []
2066                         for x in pmask_locations:
2067                                 pkgmasklines.append(grabfile_package(
2068                                         os.path.join(x, "package.mask"), recursive=1))
2069                                 pkgunmasklines.append(grabfile_package(
2070                                         os.path.join(x, "package.unmask"), recursive=1))
2071                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
2072                         pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
2073
2074                         self.pmaskdict = {}
2075                         for x in pkgmasklines:
2076                                 self.pmaskdict.setdefault(x.cp, []).append(x)
2077
2078                         for x in pkgunmasklines:
2079                                 self.punmaskdict.setdefault(x.cp, []).append(x)
2080
2081                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
2082                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
2083                         has_invalid_data = False
2084                         for x in range(len(pkgprovidedlines)-1, -1, -1):
2085                                 myline = pkgprovidedlines[x]
2086                                 if not isvalidatom("=" + myline):
2087                                         writemsg(_("Invalid package name in package.provided: %s\n") % \
2088                                                 myline, noiselevel=-1)
2089                                         has_invalid_data = True
2090                                         del pkgprovidedlines[x]
2091                                         continue
2092                                 cpvr = catpkgsplit(pkgprovidedlines[x])
2093                                 if not cpvr or cpvr[0] == "null":
2094                                         writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
2095                                                 noiselevel=-1)
2096                                         has_invalid_data = True
2097                                         del pkgprovidedlines[x]
2098                                         continue
2099                                 if cpvr[0] == "virtual":
2100                                         writemsg(_("Virtual package in package.provided: %s\n") % \
2101                                                 myline, noiselevel=-1)
2102                                         has_invalid_data = True
2103                                         del pkgprovidedlines[x]
2104                                         continue
2105                         if has_invalid_data:
2106                                 writemsg(_("See portage(5) for correct package.provided usage.\n"),
2107                                         noiselevel=-1)
2108                         self.pprovideddict = {}
2109                         for x in pkgprovidedlines:
2110                                 cpv=catpkgsplit(x)
2111                                 if not x:
2112                                         continue
2113                                 mycatpkg = cpv_getkey(x)
2114                                 if mycatpkg in self.pprovideddict:
2115                                         self.pprovideddict[mycatpkg].append(x)
2116                                 else:
2117                                         self.pprovideddict[mycatpkg]=[x]
2118
2119                         # parse licensegroups
2120                         for x in locations:
2121                                 self._license_groups.update(
2122                                         grabdict(os.path.join(x, "license_groups")))
2123
2124                         # reasonable defaults; this is important as without USE_ORDER,
2125                         # USE will always be "" (nothing set)!
2126                         if "USE_ORDER" not in self:
2127                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
2128
2129                         self["PORTAGE_GID"] = str(portage_gid)
2130                         self.backup_changes("PORTAGE_GID")
2131
2132                         if self.get("PORTAGE_DEPCACHEDIR", None):
2133                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
2134                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
2135                         self.backup_changes("PORTAGE_DEPCACHEDIR")
2136
2137                         overlays = self.get("PORTDIR_OVERLAY","").split()
2138                         if overlays:
2139                                 new_ov = []
2140                                 for ov in overlays:
2141                                         ov = normalize_path(ov)
2142                                         if os.path.isdir(ov):
2143                                                 new_ov.append(ov)
2144                                         else:
2145                                                 writemsg(_("!!! Invalid PORTDIR_OVERLAY"
2146                                                         " (not a dir): '%s'\n") % ov, noiselevel=-1)
2147                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
2148                                 self.backup_changes("PORTDIR_OVERLAY")
2149
2150                         if "CBUILD" not in self and "CHOST" in self:
2151                                 self["CBUILD"] = self["CHOST"]
2152                                 self.backup_changes("CBUILD")
2153
2154                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
2155                         self.backup_changes("PORTAGE_BIN_PATH")
2156                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
2157                         self.backup_changes("PORTAGE_PYM_PATH")
2158
2159                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
2160                                 try:
2161                                         self[var] = str(int(self.get(var, "0")))
2162                                 except ValueError:
2163                                         writemsg(_("!!! %s='%s' is not a valid integer.  "
2164                                                 "Falling back to '0'.\n") % (var, self[var]),
2165                                                 noiselevel=-1)
2166                                         self[var] = "0"
2167                                 self.backup_changes(var)
2168
2169                         # initialize self.features
2170                         self.regenerate()
2171
2172                         if not portage.process.sandbox_capable and \
2173                                 ("sandbox" in self.features or "usersandbox" in self.features):
2174                                 if self.profile_path is not None and \
2175                                         os.path.realpath(self.profile_path) == \
2176                                         os.path.realpath(os.path.join(config_root, PROFILE_PATH)):
2177                                         """ Don't show this warning when running repoman and the
2178                                         sandbox feature came from a profile that doesn't belong to
2179                                         the user."""
2180                                         writemsg(colorize("BAD", _("!!! Problem with sandbox"
2181                                                 " binary. Disabling...\n\n")), noiselevel=-1)
2182                                 if "sandbox" in self.features:
2183                                         self.features.remove("sandbox")
2184                                 if "usersandbox" in self.features:
2185                                         self.features.remove("usersandbox")
2186
2187                         if bsd_chflags:
2188                                 self.features.add('chflags')
2189
2190                         self["FEATURES"] = " ".join(sorted(self.features))
2191                         self.backup_changes("FEATURES")
2192                         global _glep_55_enabled, _validate_cache_for_unsupported_eapis
2193                         if 'parse-eapi-ebuild-head' in self.features:
2194                                 _validate_cache_for_unsupported_eapis = False
2195                         if 'parse-eapi-glep-55' in self.features:
2196                                 _validate_cache_for_unsupported_eapis = False
2197                                 _glep_55_enabled = True
2198
2199                         self._init_dirs()
2200
2201                 if mycpv:
2202                         self.setcpv(mycpv)
2203
2204         def _init_dirs(self):
2205                 """
2206                 Create a few directories that are critical to portage operation
2207                 """
2208                 if not os.access(self["ROOT"], os.W_OK):
2209                         return
2210
2211                 #                                gid, mode, mask, preserve_perms
2212                 dir_mode_map = {
2213                         "tmp"             : (         -1, 0o1777,  0,  True),
2214                         "var/tmp"         : (         -1, 0o1777,  0,  True),
2215                         PRIVATE_PATH      : (portage_gid, 0o2750, 0o2, False),
2216                         CACHE_PATH        : (portage_gid,  0o755, 0o2, False)
2217                 }
2218
2219                 for mypath, (gid, mode, modemask, preserve_perms) \
2220                         in dir_mode_map.items():
2221                         mydir = os.path.join(self["ROOT"], mypath)
2222                         if preserve_perms and os.path.isdir(mydir):
2223                                 # Only adjust permissions on some directories if
2224                                 # they don't exist yet. This gives freedom to the
2225                                 # user to adjust permissions to suit their taste.
2226                                 continue
2227                         try:
2228                                 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
2229                         except portage.exception.PortageException as e:
2230                                 writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
2231                                         noiselevel=-1)
2232                                 writemsg("!!! %s\n" % str(e),
2233                                         noiselevel=-1)
2234
2235         def expandLicenseTokens(self, tokens):
2236                 """ Take a token from ACCEPT_LICENSE or package.license and expand it
2237                 if it's a group token (indicated by @) or just return it if it's not a
2238                 group.  If a group is negated then negate all group elements."""
2239                 expanded_tokens = []
2240                 for x in tokens:
2241                         expanded_tokens.extend(self._expandLicenseToken(x, None))
2242                 return expanded_tokens
2243
2244         def _expandLicenseToken(self, token, traversed_groups):
2245                 negate = False
2246                 rValue = []
2247                 if token.startswith("-"):
2248                         negate = True
2249                         license_name = token[1:]
2250                 else:
2251                         license_name = token
2252                 if not license_name.startswith("@"):
2253                         rValue.append(token)
2254                         return rValue
2255                 group_name = license_name[1:]
2256                 if not traversed_groups:
2257                         traversed_groups = set()
2258                 license_group = self._license_groups.get(group_name)
2259                 if group_name in traversed_groups:
2260                         writemsg(_("Circular license group reference"
2261                                 " detected in '%s'\n") % group_name, noiselevel=-1)
2262                         rValue.append("@"+group_name)
2263                 elif license_group:
2264                         traversed_groups.add(group_name)
2265                         for l in license_group:
2266                                 if l.startswith("-"):
2267                                         writemsg(_("Skipping invalid element %s"
2268                                                 " in license group '%s'\n") % (l, group_name),
2269                                                 noiselevel=-1)
2270                                 else:
2271                                         rValue.extend(self._expandLicenseToken(l, traversed_groups))
2272                 else:
2273                         writemsg(_("Undefined license group '%s'\n") % group_name,
2274                                 noiselevel=-1)
2275                         rValue.append("@"+group_name)
2276                 if negate:
2277                         rValue = ["-" + token for token in rValue]
2278                 return rValue
2279
2280         def validate(self):
2281                 """Validate miscellaneous settings and display warnings if necessary.
2282                 (This code was previously in the global scope of portage.py)"""
2283
2284                 groups = self["ACCEPT_KEYWORDS"].split()
2285                 archlist = self.archlist()
2286                 if not archlist:
2287                         writemsg(_("--- 'profiles/arch.list' is empty or "
2288                                 "not available. Empty portage tree?\n"), noiselevel=1)
2289                 else:
2290                         for group in groups:
2291                                 if group not in archlist and \
2292                                         not (group.startswith("-") and group[1:] in archlist) and \
2293                                         group not in ("*", "~*", "**"):
2294                                         writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
2295                                                 noiselevel=-1)
2296
2297                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
2298                         PROFILE_PATH)
2299                 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
2300                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
2301                         os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
2302                         writemsg(_("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
2303                                 noiselevel=-1)
2304                         writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
2305                         writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
2306
2307                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
2308                         USER_VIRTUALS_FILE)
2309                 if os.path.exists(abs_user_virtuals):
2310                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
2311                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
2312                         writemsg("!!! this new location.\n\n")
2313
2314                 if "fakeroot" in self.features and \
2315                         not portage.process.fakeroot_capable:
2316                         writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
2317                                 "fakeroot binary is not installed.\n"), noiselevel=-1)
2318
2319         def loadVirtuals(self,root):
2320                 """Not currently used by portage."""
2321                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
2322                 self.getvirtuals(root)
2323
2324         def load_best_module(self,property_string):
2325                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
2326                 mod = None
2327                 try:
2328                         mod = load_mod(best_mod)
2329                 except ImportError:
2330                         if best_mod.startswith("cache."):
2331                                 best_mod = "portage." + best_mod
2332                                 try:
2333                                         mod = load_mod(best_mod)
2334                                 except ImportError:
2335                                         pass
2336                 if mod is None:
2337                         raise
2338                 return mod
2339
2340         def lock(self):
2341                 self.locked = 1
2342
2343         def unlock(self):
2344                 self.locked = 0
2345
2346         def modifying(self):
2347                 if self.locked:
2348                         raise Exception(_("Configuration is locked."))
2349
2350         def backup_changes(self,key=None):
2351                 self.modifying()
2352                 if key and key in self.configdict["env"]:
2353                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
2354                 else:
2355                         raise KeyError(_("No such key defined in environment: %s") % key)
2356
2357         def reset(self,keeping_pkg=0,use_cache=1):
2358                 """
2359                 Restore environment from self.backupenv, call self.regenerate()
2360                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
2361                 @type keeping_pkg: Boolean
2362                 @param use_cache: Should self.regenerate use the cache or not
2363                 @type use_cache: Boolean
2364                 @rype: None
2365                 """
2366                 self.modifying()
2367                 self.configdict["env"].clear()
2368                 self.configdict["env"].update(self.backupenv)
2369
2370                 self.modifiedkeys = []
2371                 if not keeping_pkg:
2372                         self.mycpv = None
2373                         self.puse = ""
2374                         self.configdict["pkg"].clear()
2375                         self.configdict["pkginternal"].clear()
2376                         self.configdict["defaults"]["USE"] = \
2377                                 " ".join(self.make_defaults_use)
2378                         self.usemask  = set(stack_lists(
2379                                 self.usemask_list, incremental=True))
2380                         self.useforce  = set(stack_lists(
2381                                 self.useforce_list, incremental=True))
2382                 self.regenerate(use_cache=use_cache)
2383
2384         def load_infodir(self,infodir):
2385                 warnings.warn("portage.config.load_infodir() is deprecated",
2386                         DeprecationWarning)
2387                 return 1
2388
2389         class _lazy_vars(object):
2390
2391                 __slots__ = ('built_use', 'settings', 'values')
2392
2393                 def __init__(self, built_use, settings):
2394                         self.built_use = built_use
2395                         self.settings = settings
2396                         self.values = None
2397
2398                 def __getitem__(self, k):
2399                         if self.values is None:
2400                                 self.values = self._init_values()
2401                         return self.values[k]
2402
2403                 def _init_values(self):
2404                         values = {}
2405                         settings = self.settings
2406                         use = self.built_use
2407                         if use is None:
2408                                 use = frozenset(settings['PORTAGE_USE'].split())
2409                         values['ACCEPT_LICENSE'] = self._accept_license(use, settings)
2410                         values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
2411                         return values
2412
2413                 def _accept_license(self, use, settings):
2414                         """
2415                         Generate a pruned version of ACCEPT_LICENSE, by intersection with
2416                         LICENSE. This is required since otherwise ACCEPT_LICENSE might be
2417                         too big (bigger than ARG_MAX), causing execve() calls to fail with
2418                         E2BIG errors as in bug #262647.
2419                         """
2420                         try:
2421                                 licenses = set(flatten(
2422                                         dep.use_reduce(dep.paren_reduce(
2423                                         settings['LICENSE']),
2424                                         uselist=use)))
2425                         except exception.InvalidDependString:
2426                                 licenses = set()
2427                         licenses.discard('||')
2428                         if settings._accept_license:
2429                                 acceptable_licenses = set()
2430                                 for x in settings._accept_license:
2431                                         if x == '*':
2432                                                 acceptable_licenses.update(licenses)
2433                                         elif x == '-*':
2434                                                 acceptable_licenses.clear()
2435                                         elif x[:1] == '-':
2436                                                 acceptable_licenses.discard(x[1:])
2437                                         elif x in licenses:
2438                                                 acceptable_licenses.add(x)
2439
2440                                 licenses = acceptable_licenses
2441                         return ' '.join(sorted(licenses))
2442
2443                 def _restrict(self, use, settings):
2444                         try:
2445                                 restrict = set(flatten(
2446                                         dep.use_reduce(dep.paren_reduce(
2447                                         settings['RESTRICT']),
2448                                         uselist=use)))
2449                         except exception.InvalidDependString:
2450                                 restrict = set()
2451                         return ' '.join(sorted(restrict))
2452
2453         class _lazy_use_expand(object):
2454                 """
2455                 Lazily evaluate USE_EXPAND variables since they are only needed when
2456                 an ebuild shell is spawned. Variables values are made consistent with
2457                 the previously calculated USE settings.
2458                 """
2459
2460                 def __init__(self, use, usemask, iuse_implicit,
2461                         use_expand_split, use_expand_dict):
2462                         self._use = use
2463                         self._usemask = usemask
2464                         self._iuse_implicit = iuse_implicit
2465                         self._use_expand_split = use_expand_split
2466                         self._use_expand_dict = use_expand_dict
2467
2468                 def __getitem__(self, key):
2469                         prefix = key.lower() + '_'
2470                         prefix_len = len(prefix)
2471                         expand_flags = set( x[prefix_len:] for x in self._use \
2472                                 if x[:prefix_len] == prefix )
2473                         var_split = self._use_expand_dict.get(key, '').split()
2474                         # Preserve the order of var_split because it can matter for things
2475                         # like LINGUAS.
2476                         var_split = [ x for x in var_split if x in expand_flags ]
2477                         var_split.extend(expand_flags.difference(var_split))
2478                         has_wildcard = '*' in expand_flags
2479                         if has_wildcard:
2480                                 var_split = [ x for x in var_split if x != "*" ]
2481                         has_iuse = set()
2482                         for x in self._iuse_implicit:
2483                                 if x[:prefix_len] == prefix:
2484                                         has_iuse.add(x[prefix_len:])
2485                         if has_wildcard:
2486                                 # * means to enable everything in IUSE that's not masked
2487                                 if has_iuse:
2488                                         usemask = self._usemask
2489                                         for suffix in has_iuse:
2490                                                 x = prefix + suffix
2491                                                 if x not in usemask:
2492                                                         if suffix not in expand_flags:
2493                                                                 var_split.append(suffix)
2494                                 else:
2495                                         # If there is a wildcard and no matching flags in IUSE then
2496                                         # LINGUAS should be unset so that all .mo files are
2497                                         # installed.
2498                                         var_split = []
2499                         # Make the flags unique and filter them according to IUSE.
2500                         # Also, continue to preserve order for things like LINGUAS
2501                         # and filter any duplicates that variable may contain.
2502                         filtered_var_split = []
2503                         remaining = has_iuse.intersection(var_split)
2504                         for x in var_split:
2505                                 if x in remaining:
2506                                         remaining.remove(x)
2507                                         filtered_var_split.append(x)
2508                         var_split = filtered_var_split
2509
2510                         if var_split:
2511                                 value = ' '.join(var_split)
2512                         else:
2513                                 # Don't export empty USE_EXPAND vars unless the user config
2514                                 # exports them as empty.  This is required for vars such as
2515                                 # LINGUAS, where unset and empty have different meanings.
2516                                 if has_wildcard:
2517                                         # ebuild.sh will see this and unset the variable so
2518                                         # that things like LINGUAS work properly
2519                                         value = '*'
2520                                 else:
2521                                         if has_iuse:
2522                                                 value = ''
2523                                         else:
2524                                                 # It's not in IUSE, so just allow the variable content
2525                                                 # to pass through if it is defined somewhere.  This
2526                                                 # allows packages that support LINGUAS but don't
2527                                                 # declare it in IUSE to use the variable outside of the
2528                                                 # USE_EXPAND context.
2529                                                 value = None
2530
2531                         return value
2532
2533         def setcpv(self, mycpv, use_cache=1, mydb=None):
2534                 """
2535                 Load a particular CPV into the config, this lets us see the
2536                 Default USE flags for a particular ebuild as well as the USE
2537                 flags from package.use.
2538
2539                 @param mycpv: A cpv to load
2540                 @type mycpv: string
2541                 @param use_cache: Enables caching
2542                 @type use_cache: Boolean
2543                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
2544                 @type mydb: dbapi or derivative.
2545                 @rtype: None
2546                 """
2547
2548                 self.modifying()
2549
2550                 pkg = None
2551                 built_use = None
2552                 if not isinstance(mycpv, basestring):
2553                         pkg = mycpv
2554                         mycpv = pkg.cpv
2555                         mydb = pkg.metadata
2556                         args_hash = (mycpv, id(pkg))
2557                         if pkg.built:
2558                                 built_use = pkg.use.enabled
2559                 else:
2560                         args_hash = (mycpv, id(mydb))
2561
2562                 if args_hash == self._setcpv_args_hash:
2563                         return
2564                 self._setcpv_args_hash = args_hash
2565
2566                 has_changed = False
2567                 self.mycpv = mycpv
2568                 cat, pf = catsplit(mycpv)
2569                 cp = dep_getkey(mycpv)
2570                 cpv_slot = self.mycpv
2571                 pkginternaluse = ""
2572                 iuse = ""
2573                 pkg_configdict = self.configdict["pkg"]
2574                 previous_iuse = pkg_configdict.get("IUSE")
2575
2576                 aux_keys = self._setcpv_aux_keys
2577
2578                 # Discard any existing metadata from the previous package, but
2579                 # preserve things like USE_EXPAND values and PORTAGE_USE which
2580                 # might be reused.
2581                 for k in aux_keys:
2582                         pkg_configdict.pop(k, None)
2583
2584                 pkg_configdict["CATEGORY"] = cat
2585                 pkg_configdict["PF"] = pf
2586                 if mydb:
2587                         if not hasattr(mydb, "aux_get"):
2588                                 for k in aux_keys:
2589                                         if k in mydb:
2590                                                 # Make these lazy, since __getitem__ triggers
2591                                                 # evaluation of USE conditionals which can't
2592                                                 # occur until PORTAGE_USE is calculated below.
2593                                                 pkg_configdict.addLazySingleton(k,
2594                                                         mydb.__getitem__, k)
2595                         else:
2596                                 for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
2597                                         pkg_configdict[k] = v
2598                         repository = pkg_configdict.pop("repository", None)
2599                         if repository is not None:
2600                                 pkg_configdict["PORTAGE_REPO_NAME"] = repository
2601                         slot = pkg_configdict["SLOT"]
2602                         iuse = pkg_configdict["IUSE"]
2603                         if pkg is None:
2604                                 cpv_slot = "%s:%s" % (self.mycpv, slot)
2605                         else:
2606                                 cpv_slot = pkg
2607                         pkginternaluse = []
2608                         for x in iuse.split():
2609                                 if x.startswith("+"):
2610                                         pkginternaluse.append(x[1:])
2611                                 elif x.startswith("-"):
2612                                         pkginternaluse.append(x)
2613                         pkginternaluse = " ".join(pkginternaluse)
2614                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
2615                         self.configdict["pkginternal"]["USE"] = pkginternaluse
2616                         has_changed = True
2617
2618                 defaults = []
2619                 pos = 0
2620                 for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
2621                         cpdict = pkgprofileuse_dict.get(cp)
2622                         if cpdict:
2623                                 keys = list(cpdict)
2624                                 while keys:
2625                                         bestmatch = best_match_to_list(cpv_slot, keys)
2626                                         if bestmatch:
2627                                                 keys.remove(bestmatch)
2628                                                 defaults.insert(pos, cpdict[bestmatch])
2629                                         else:
2630                                                 break
2631                                 del keys
2632                         if self.make_defaults_use[i]:
2633                                 defaults.insert(pos, self.make_defaults_use[i])
2634                         pos = len(defaults)
2635                 defaults = " ".join(defaults)
2636                 if defaults != self.configdict["defaults"].get("USE",""):
2637                         self.configdict["defaults"]["USE"] = defaults
2638                         has_changed = True
2639
2640                 useforce = self._getUseForce(cpv_slot)
2641                 if useforce != self.useforce:
2642                         self.useforce = useforce
2643                         has_changed = True
2644
2645                 usemask = self._getUseMask(cpv_slot)
2646                 if usemask != self.usemask:
2647                         self.usemask = usemask
2648                         has_changed = True
2649                 oldpuse = self.puse
2650                 self.puse = ""
2651                 cpdict = self.pusedict.get(cp)
2652                 if cpdict:
2653                         keys = list(cpdict)
2654                         while keys:
2655                                 self.pusekey = best_match_to_list(cpv_slot, keys)
2656                                 if self.pusekey:
2657                                         keys.remove(self.pusekey)
2658                                         self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2659                                 else:
2660                                         break
2661                         del keys
2662                 if oldpuse != self.puse:
2663                         has_changed = True
2664                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2665                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
2666
2667                 if has_changed:
2668                         self.reset(keeping_pkg=1,use_cache=use_cache)
2669
2670                 # Ensure that "pkg" values are always preferred over "env" values.
2671                 # This must occur _after_ the above reset() call, since reset()
2672                 # copies values from self.backupenv.
2673                 env_configdict = self.configdict['env']
2674                 for k in pkg_configdict:
2675                         if k != 'USE':
2676                                 env_configdict.pop(k, None)
2677
2678                 lazy_vars = self._lazy_vars(built_use, self)
2679                 env_configdict.addLazySingleton('ACCEPT_LICENSE',
2680                         lazy_vars.__getitem__, 'ACCEPT_LICENSE')
2681                 env_configdict.addLazySingleton('PORTAGE_RESTRICT',
2682                         lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
2683
2684                 # If reset() has not been called, it's safe to return
2685                 # early if IUSE has not changed.
2686                 if not has_changed and previous_iuse == iuse:
2687                         return
2688
2689                 # Filter out USE flags that aren't part of IUSE. This has to
2690                 # be done for every setcpv() call since practically every
2691                 # package has different IUSE.
2692                 use = set(self["USE"].split())
2693                 iuse_implicit = self._get_implicit_iuse()
2694                 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2695
2696                 # PORTAGE_IUSE is not always needed so it's lazily evaluated.
2697                 self.configdict["pkg"].addLazySingleton(
2698                         "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
2699
2700                 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2701                 if ebuild_force_test and \
2702                         not hasattr(self, "_ebuild_force_test_msg_shown"):
2703                                 self._ebuild_force_test_msg_shown = True
2704                                 writemsg(_("Forcing test.\n"), noiselevel=-1)
2705                 if "test" in self.features and "test" in iuse_implicit:
2706                         if "test" in self.usemask and not ebuild_force_test:
2707                                 # "test" is in IUSE and USE=test is masked, so execution
2708                                 # of src_test() probably is not reliable. Therefore,
2709                                 # temporarily disable FEATURES=test just for this package.
2710                                 self["FEATURES"] = " ".join(x for x in self.features \
2711                                         if x != "test")
2712                                 use.discard("test")
2713                         else:
2714                                 use.add("test")
2715                                 if ebuild_force_test:
2716                                         self.usemask.discard("test")
2717
2718                 # Allow _* flags from USE_EXPAND wildcards to pass through here.
2719                 use.difference_update([x for x in use \
2720                         if x not in iuse_implicit and x[-2:] != '_*'])
2721
2722                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2723                 # that they are consistent. For optimal performance, use slice
2724                 # comparison instead of startswith().
2725                 use_expand_split = set(x.lower() for \
2726                         x in self.get('USE_EXPAND', '').split())
2727                 lazy_use_expand = self._lazy_use_expand(use, self.usemask,
2728                         iuse_implicit, use_expand_split, self._use_expand_dict)
2729
2730                 use_expand_iuses = {}
2731                 for x in iuse_implicit:
2732                         x_split = x.split('_')
2733                         if len(x_split) == 1:
2734                                 continue
2735                         for i in range(len(x_split) - 1):
2736                                 k = '_'.join(x_split[:i+1])
2737                                 if k in use_expand_split:
2738                                         v = use_expand_iuses.get(k)
2739                                         if v is None:
2740                                                 v = set()
2741                                                 use_expand_iuses[k] = v
2742                                         v.add(x)
2743                                         break
2744
2745                 # If it's not in IUSE, variable content is allowed
2746                 # to pass through if it is defined somewhere.  This
2747                 # allows packages that support LINGUAS but don't
2748                 # declare it in IUSE to use the variable outside of the
2749                 # USE_EXPAND context.
2750                 for k, use_expand_iuse in use_expand_iuses.items():
2751                         if k + '_*' in use:
2752                                 use.update( x for x in use_expand_iuse if x not in usemask )
2753                         k = k.upper()
2754                         self.configdict['env'].addLazySingleton(k,
2755                                 lazy_use_expand.__getitem__, k)
2756
2757                 # Filtered for the ebuild environment. Store this in a separate
2758                 # attribute since we still want to be able to see global USE
2759                 # settings for things like emerge --info.
2760
2761                 self.configdict["pkg"]["PORTAGE_USE"] = \
2762                         " ".join(sorted(x for x in use if x[-2:] != '_*'))
2763
2764         def _get_implicit_iuse(self):
2765                 """
2766                 Some flags are considered to
2767                 be implicit members of IUSE:
2768                   * Flags derived from ARCH
2769                   * Flags derived from USE_EXPAND_HIDDEN variables
2770                   * Masked flags, such as those from {,package}use.mask
2771                   * Forced flags, such as those from {,package}use.force
2772                   * build and bootstrap flags used by bootstrap.sh
2773                 """
2774                 iuse_implicit = set()
2775                 # Flags derived from ARCH.
2776                 arch = self.configdict["defaults"].get("ARCH")
2777                 if arch:
2778                         iuse_implicit.add(arch)
2779                 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2780
2781                 # Flags derived from USE_EXPAND_HIDDEN variables
2782                 # such as ELIBC, KERNEL, and USERLAND.
2783                 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2784                 for x in use_expand_hidden:
2785                         iuse_implicit.add(x.lower() + "_.*")
2786
2787                 # Flags that have been masked or forced.
2788                 iuse_implicit.update(self.usemask)
2789                 iuse_implicit.update(self.useforce)
2790
2791                 # build and bootstrap flags used by bootstrap.sh
2792                 iuse_implicit.add("build")
2793                 iuse_implicit.add("bootstrap")
2794                 return iuse_implicit
2795
2796         def _getUseMask(self, pkg):
2797                 cp = getattr(pkg, "cp", None)
2798                 if cp is None:
2799                         cp = dep_getkey(pkg)
2800                 usemask = []
2801                 pos = 0
2802                 for i, pusemask_dict in enumerate(self.pusemask_list):
2803                         cpdict = pusemask_dict.get(cp)
2804                         if cpdict:
2805                                 keys = list(cpdict)
2806                                 while keys:
2807                                         best_match = best_match_to_list(pkg, keys)
2808                                         if best_match:
2809                                                 keys.remove(best_match)
2810                                                 usemask.insert(pos, cpdict[best_match])
2811                                         else:
2812                                                 break
2813                                 del keys
2814                         if self.usemask_list[i]:
2815                                 usemask.insert(pos, self.usemask_list[i])
2816                         pos = len(usemask)
2817                 return set(stack_lists(usemask, incremental=True))
2818
2819         def _getUseForce(self, pkg):
2820                 cp = getattr(pkg, "cp", None)
2821                 if cp is None:
2822                         cp = dep_getkey(pkg)
2823                 useforce = []
2824                 pos = 0
2825                 for i, puseforce_dict in enumerate(self.puseforce_list):
2826                         cpdict = puseforce_dict.get(cp)
2827                         if cpdict:
2828                                 keys = list(cpdict)
2829                                 while keys:
2830                                         best_match = best_match_to_list(pkg, keys)
2831                                         if best_match:
2832                                                 keys.remove(best_match)
2833                                                 useforce.insert(pos, cpdict[best_match])
2834                                         else:
2835                                                 break
2836                                 del keys
2837                         if self.useforce_list[i]:
2838                                 useforce.insert(pos, self.useforce_list[i])
2839                         pos = len(useforce)
2840                 return set(stack_lists(useforce, incremental=True))
2841
2842         def _getMaskAtom(self, cpv, metadata):
2843                 """
2844                 Take a package and return a matching package.mask atom, or None if no
2845                 such atom exists or it has been cancelled by package.unmask. PROVIDE
2846                 is not checked, so atoms will not be found for old-style virtuals.
2847
2848                 @param cpv: The package name
2849                 @type cpv: String
2850                 @param metadata: A dictionary of raw package metadata
2851                 @type metadata: dict
2852                 @rtype: String
2853                 @return: An matching atom string or None if one is not found.
2854                 """
2855
2856                 cp = cpv_getkey(cpv)
2857                 mask_atoms = self.pmaskdict.get(cp)
2858                 if mask_atoms:
2859                         pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2860                         unmask_atoms = self.punmaskdict.get(cp)
2861                         for x in mask_atoms:
2862                                 if not match_from_list(x, pkg_list):
2863                                         continue
2864                                 if unmask_atoms:
2865                                         for y in unmask_atoms:
2866                                                 if match_from_list(y, pkg_list):
2867                                                         return None
2868                                 return x
2869                 return None
2870
2871         def _getProfileMaskAtom(self, cpv, metadata):
2872                 """
2873                 Take a package and return a matching profile atom, or None if no
2874                 such atom exists. Note that a profile atom may or may not have a "*"
2875                 prefix. PROVIDE is not checked, so atoms will not be found for
2876                 old-style virtuals.
2877
2878                 @param cpv: The package name
2879                 @type cpv: String
2880                 @param metadata: A dictionary of raw package metadata
2881                 @type metadata: dict
2882                 @rtype: String
2883                 @return: An matching profile atom string or None if one is not found.
2884                 """
2885
2886                 cp = cpv_getkey(cpv)
2887                 profile_atoms = self.prevmaskdict.get(cp)
2888                 if profile_atoms:
2889                         pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2890                         for x in profile_atoms:
2891                                 if match_from_list(x, pkg_list):
2892                                         continue
2893                                 return x
2894                 return None
2895
2896         def _getKeywords(self, cpv, metadata):
2897                 cp = cpv_getkey(cpv)
2898                 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2899                 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2900                 pos = len(keywords)
2901                 for pkeywords_dict in self._pkeywords_list:
2902                         cpdict = pkeywords_dict.get(cp)
2903                         if cpdict:
2904                                 keys = list(cpdict)
2905                                 while keys:
2906                                         best_match = best_match_to_list(pkg, keys)
2907                                         if best_match:
2908                                                 keys.remove(best_match)
2909                                                 keywords.insert(pos, cpdict[best_match])
2910                                         else:
2911                                                 break
2912                         pos = len(keywords)
2913                 return stack_lists(keywords, incremental=True)
2914
2915         def _getMissingKeywords(self, cpv, metadata):
2916                 """
2917                 Take a package and return a list of any KEYWORDS that the user may
2918                 may need to accept for the given package. If the KEYWORDS are empty
2919                 and the the ** keyword has not been accepted, the returned list will
2920                 contain ** alone (in order to distiguish from the case of "none
2921                 missing").
2922
2923                 @param cpv: The package name (for package.keywords support)
2924                 @type cpv: String
2925                 @param metadata: A dictionary of raw package metadata
2926                 @type metadata: dict
2927                 @rtype: List
2928                 @return: A list of KEYWORDS that have not been accepted.
2929                 """
2930
2931                 # Hack: Need to check the env directly here as otherwise stacking 
2932                 # doesn't work properly as negative values are lost in the config
2933                 # object (bug #139600)
2934                 egroups = self.configdict["backupenv"].get(
2935                         "ACCEPT_KEYWORDS", "").split()
2936                 mygroups = self._getKeywords(cpv, metadata)
2937                 # Repoman may modify this attribute as necessary.
2938                 pgroups = self["ACCEPT_KEYWORDS"].split()
2939                 match=0
2940                 cp = cpv_getkey(cpv)
2941                 pkgdict = self.pkeywordsdict.get(cp)
2942                 matches = False
2943                 if pkgdict:
2944                         cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2945                         for atom, pkgkeywords in pkgdict.items():
2946                                 if match_from_list(atom, cpv_slot_list):
2947                                         matches = True
2948                                         pgroups.extend(pkgkeywords)
2949                 if matches or egroups:
2950                         pgroups.extend(egroups)
2951                         inc_pgroups = set()
2952                         for x in pgroups:
2953                                 if x.startswith("-"):
2954                                         if x == "-*":
2955                                                 inc_pgroups.clear()
2956                                         else:
2957                                                 inc_pgroups.discard(x[1:])
2958                                 else:
2959                                         inc_pgroups.add(x)
2960                         pgroups = inc_pgroups
2961                         del inc_pgroups
2962                 hasstable = False
2963                 hastesting = False
2964                 for gp in mygroups:
2965                         if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2966                                 writemsg(_("--- WARNING: Package '%(cpv)s' uses"
2967                                         " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, noiselevel=-1)
2968                                 if gp == "*":
2969                                         match = 1
2970                                         break
2971                         elif gp in pgroups:
2972                                 match=1
2973                                 break
2974                         elif gp.startswith("~"):
2975                                 hastesting = True
2976                         elif not gp.startswith("-"):
2977                                 hasstable = True
2978                 if not match and \
2979                         ((hastesting and "~*" in pgroups) or \
2980                         (hasstable and "*" in pgroups) or "**" in pgroups):
2981                         match=1
2982                 if match:
2983                         missing = []
2984                 else:
2985                         if not mygroups:
2986                                 # If KEYWORDS is empty then we still have to return something
2987                                 # in order to distiguish from the case of "none missing".
2988                                 mygroups.append("**")
2989                         missing = mygroups
2990                 return missing
2991
2992         def _getMissingLicenses(self, cpv, metadata):
2993                 """
2994                 Take a LICENSE string and return a list any licenses that the user may
2995                 may need to accept for the given package.  The returned list will not
2996                 contain any licenses that have already been accepted.  This method
2997                 can throw an InvalidDependString exception.
2998
2999                 @param cpv: The package name (for package.license support)
3000                 @type cpv: String
3001                 @param metadata: A dictionary of raw package metadata
3002                 @type metadata: dict
3003                 @rtype: List
3004                 @return: A list of licenses that have not been accepted.
3005                 """
3006                 if not self._accept_license:
3007                         return []
3008                 accept_license = self._accept_license
3009                 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
3010                 if cpdict:
3011                         accept_license = list(self._accept_license)
3012                         cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3013                         for atom in match_to_list(cpv_slot, list(cpdict)):
3014                                 accept_license.extend(cpdict[atom])
3015
3016                 licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
3017                         metadata["LICENSE"]), matchall=1)))
3018                 licenses.discard('||')
3019
3020                 acceptable_licenses = set()
3021                 for x in accept_license:
3022                         if x == '*':
3023                                 acceptable_licenses.update(licenses)
3024                         elif x == '-*':
3025                                 acceptable_licenses.clear()
3026                         elif x[:1] == '-':
3027                                 acceptable_licenses.discard(x[1:])
3028                         else:
3029                                 acceptable_licenses.add(x)
3030
3031                 license_str = metadata["LICENSE"]
3032                 if "?" in license_str:
3033                         use = metadata["USE"].split()
3034                 else:
3035                         use = []
3036
3037                 license_struct = portage.dep.use_reduce(
3038                         portage.dep.paren_reduce(license_str), uselist=use)
3039                 license_struct = portage.dep.dep_opconvert(license_struct)
3040                 return self._getMaskedLicenses(license_struct, acceptable_licenses)
3041
3042         def _getMaskedLicenses(self, license_struct, acceptable_licenses):
3043                 if not license_struct:
3044                         return []
3045                 if license_struct[0] == "||":
3046                         ret = []
3047                         for element in license_struct[1:]:
3048                                 if isinstance(element, list):
3049                                         if element:
3050                                                 ret.append(self._getMaskedLicenses(
3051                                                         element, acceptable_licenses))
3052                                                 if not ret[-1]:
3053                                                         return []
3054                                 else:
3055                                         if element in acceptable_licenses:
3056                                                 return []
3057                                         ret.append(element)
3058                         # Return all masked licenses, since we don't know which combination
3059                         # (if any) the user will decide to unmask.
3060                         return flatten(ret)
3061
3062                 ret = []
3063                 for element in license_struct:
3064                         if isinstance(element, list):
3065                                 if element:
3066                                         ret.extend(self._getMaskedLicenses(element,
3067                                                 acceptable_licenses))
3068                         else:
3069                                 if element not in acceptable_licenses:
3070                                         ret.append(element)
3071                 return ret
3072
3073         def _getMissingProperties(self, cpv, metadata):
3074                 """
3075                 Take a PROPERTIES string and return a list of any properties the user may
3076                 may need to accept for the given package.  The returned list will not
3077                 contain any properties that have already been accepted.  This method
3078                 can throw an InvalidDependString exception.
3079
3080                 @param cpv: The package name (for package.properties support)
3081                 @type cpv: String
3082                 @param metadata: A dictionary of raw package metadata
3083                 @type metadata: dict
3084                 @rtype: List
3085                 @return: A list of properties that have not been accepted.
3086                 """
3087                 if not self._accept_properties:
3088                         return []
3089                 accept_properties = self._accept_properties
3090                 cpdict = self._ppropertiesdict.get(dep_getkey(cpv), None)
3091                 if cpdict:
3092                         accept_properties = list(self._accept_properties)
3093                         cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3094                         for atom in match_to_list(cpv_slot, list(cpdict)):
3095                                 accept_properties.extend(cpdict[atom])
3096
3097                 properties = set(flatten(dep.use_reduce(dep.paren_reduce(
3098                         metadata["PROPERTIES"]), matchall=1)))
3099                 properties.discard('||')
3100
3101                 acceptable_properties = set()
3102                 for x in accept_properties:
3103                         if x == '*':
3104                                 acceptable_properties.update(properties)
3105                         elif x == '-*':
3106                                 acceptable_properties.clear()
3107                         elif x[:1] == '-':
3108                                 acceptable_properties.discard(x[1:])
3109                         else:
3110                                 acceptable_properties.add(x)
3111
3112                 properties_str = metadata["PROPERTIES"]
3113                 if "?" in properties_str:
3114                         use = metadata["USE"].split()
3115                 else:
3116                         use = []
3117
3118                 properties_struct = portage.dep.use_reduce(
3119                         portage.dep.paren_reduce(properties_str), uselist=use)
3120                 properties_struct = portage.dep.dep_opconvert(properties_struct)
3121                 return self._getMaskedProperties(properties_struct, acceptable_properties)
3122
3123         def _getMaskedProperties(self, properties_struct, acceptable_properties):
3124                 if not properties_struct:
3125                         return []
3126                 if properties_struct[0] == "||":
3127                         ret = []
3128                         for element in properties_struct[1:]:
3129                                 if isinstance(element, list):
3130                                         if element:
3131                                                 ret.append(self._getMaskedProperties(
3132                                                         element, acceptable_properties))
3133                                                 if not ret[-1]:
3134                                                         return []
3135                                 else:
3136                                         if element in acceptable_properties:
3137                                                 return[]
3138                                         ret.append(element)
3139                         # Return all masked properties, since we don't know which combination
3140                         # (if any) the user will decide to unmask
3141                         return flatten(ret)
3142
3143                 ret = []
3144                 for element in properties_struct:
3145                         if isinstance(element, list):
3146                                 if element:
3147                                         ret.extend(self._getMaskedProperties(element,
3148                                                 acceptable_properties))
3149                         else:
3150                                 if element not in acceptable_properties:
3151                                         ret.append(element)
3152                 return ret
3153
3154         def _accept_chost(self, cpv, metadata):
3155                 """
3156                 @return True if pkg CHOST is accepted, False otherwise.
3157                 """
3158                 if self._accept_chost_re is None:
3159                         accept_chost = self.get("ACCEPT_CHOSTS", "").split()
3160                         if not accept_chost:
3161                                 chost = self.get("CHOST")
3162                                 if chost:
3163                                         accept_chost.append(chost)
3164                         if not accept_chost:
3165                                 self._accept_chost_re = re.compile(".*")
3166                         elif len(accept_chost) == 1:
3167                                 try:
3168                                         self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
3169                                 except re.error as e:
3170                                         writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3171                                                 (accept_chost[0], e), noiselevel=-1)
3172                                         self._accept_chost_re = re.compile("^$")
3173                         else:
3174                                 try:
3175                                         self._accept_chost_re = re.compile(
3176                                                 r'^(%s)$' % "|".join(accept_chost))
3177                                 except re.error as e:
3178                                         writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3179                                                 (" ".join(accept_chost), e), noiselevel=-1)
3180                                         self._accept_chost_re = re.compile("^$")
3181
3182                 return self._accept_chost_re.match(
3183                         metadata.get('CHOST', '')) is not None
3184
3185         def setinst(self,mycpv,mydbapi):
3186                 """This updates the preferences for old-style virtuals,
3187                 affecting the behavior of dep_expand() and dep_check()
3188                 calls. It can change dbapi.match() behavior since that
3189                 calls dep_expand(). However, dbapi instances have
3190                 internal match caches that are not invalidated when
3191                 preferences are updated here. This can potentially
3192                 lead to some inconsistency (relevant to bug #1343)."""
3193                 self.modifying()
3194                 if len(self.virtuals) == 0:
3195                         self.getvirtuals()
3196                 # Grab the virtuals this package provides and add them into the tree virtuals.
3197                 if not hasattr(mydbapi, "aux_get"):
3198                         provides = mydbapi["PROVIDE"]
3199                 else:
3200                         provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
3201                 if not provides:
3202                         return
3203                 if isinstance(mydbapi, portdbapi):
3204                         self.setcpv(mycpv, mydb=mydbapi)
3205                         myuse = self["PORTAGE_USE"]
3206                 elif not hasattr(mydbapi, "aux_get"):
3207                         myuse = mydbapi["USE"]
3208                 else:
3209                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
3210                 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
3211
3212                 modified = False
3213                 cp = dep.Atom(cpv_getkey(mycpv))
3214                 for virt in virts:
3215                         virt = dep_getkey(virt)
3216                         providers = self.virtuals.get(virt)
3217                         if providers and cp in providers:
3218                                 continue
3219                         providers = self._depgraphVirtuals.get(virt)
3220                         if providers is None:
3221                                 providers = []
3222                                 self._depgraphVirtuals[virt] = providers
3223                         if cp not in providers:
3224                                 providers.append(cp)
3225                                 modified = True
3226
3227                 if modified:
3228                         self.virtuals = self.__getvirtuals_compile()
3229
3230         def reload(self):
3231                 """Reload things like /etc/profile.env that can change during runtime."""
3232                 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
3233                 self.configdict["env.d"].clear()
3234                 env_d = getconfig(env_d_filename, expand=False)
3235                 if env_d:
3236                         # env_d will be None if profile.env doesn't exist.
3237                         self.configdict["env.d"].update(env_d)
3238
3239         def regenerate(self,useonly=0,use_cache=1):
3240                 """
3241                 Regenerate settings
3242                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
3243                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
3244                 variables.  This also updates the env.d configdict; useful in case an ebuild
3245                 changes the environment.
3246
3247                 If FEATURES has already stacked, it is not stacked twice.
3248
3249                 @param useonly: Only regenerate USE flags (not any other incrementals)
3250                 @type useonly: Boolean
3251                 @param use_cache: Enable Caching (only for autouse)
3252                 @type use_cache: Boolean
3253                 @rtype: None
3254                 """
3255
3256                 self.modifying()
3257                 if self.already_in_regenerate:
3258                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
3259                         writemsg("!!! Looping in regenerate.\n",1)
3260                         return
3261                 else:
3262                         self.already_in_regenerate = 1
3263
3264                 if useonly:
3265                         myincrementals=["USE"]
3266                 else:
3267                         myincrementals = self.incrementals
3268                 myincrementals = set(myincrementals)
3269                 # If self.features exists, it has already been stacked and may have
3270                 # been mutated, so don't stack it again or else any mutations will be
3271                 # reverted.
3272                 if "FEATURES" in myincrementals and hasattr(self, "features"):
3273                         myincrementals.remove("FEATURES")
3274
3275                 if "USE" in myincrementals:
3276                         # Process USE last because it depends on USE_EXPAND which is also
3277                         # an incremental!
3278                         myincrementals.remove("USE")
3279
3280                 mydbs = self.configlist[:-1]
3281                 mydbs.append(self.backupenv)
3282
3283                 # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
3284                 # used to match all licenses without every having to explicitly expand
3285                 # it to all licenses.
3286                 if self.local_config:
3287                         mysplit = []
3288                         for curdb in mydbs:
3289                                 mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
3290                         accept_license_str = ' '.join(mysplit)
3291                         if accept_license_str:
3292                                 self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
3293                         if accept_license_str != self._accept_license_str:
3294                                 self._accept_license_str = accept_license_str
3295                                 self._accept_license = tuple(self.expandLicenseTokens(mysplit))
3296                 else:
3297                         # repoman will accept any license
3298                         self._accept_license = ()
3299
3300                 # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
3301                 if self.local_config:
3302                         mysplit = []
3303                         for curdb in mydbs:
3304                                 mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
3305                         if mysplit:
3306                                 self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
3307                         if tuple(mysplit) != self._accept_properties:
3308                                 self._accept_properties = tuple(mysplit)
3309                 else:
3310                         # repoman will accept any property
3311                         self._accept_properties = ()
3312
3313                 for mykey in myincrementals:
3314
3315                         myflags=[]
3316                         for curdb in mydbs:
3317                                 if mykey not in curdb:
3318                                         continue
3319                                 #variables are already expanded
3320                                 mysplit = curdb[mykey].split()
3321
3322                                 for x in mysplit:
3323                                         if x=="-*":
3324                                                 # "-*" is a special "minus" var that means "unset all settings".
3325                                                 # so USE="-* gnome" will have *just* gnome enabled.
3326                                                 myflags = []
3327                                                 continue
3328
3329                                         if x[0]=="+":
3330                                                 # Not legal. People assume too much. Complain.
3331                                                 writemsg(colorize("BAD",
3332                                                         _("USE flags should not start with a '+': %s") % x) \
3333                                                         + "\n", noiselevel=-1)
3334                                                 x=x[1:]
3335                                                 if not x:
3336                                                         continue
3337
3338                                         if (x[0]=="-"):
3339                                                 if (x[1:] in myflags):
3340                                                         # Unset/Remove it.
3341                                                         del myflags[myflags.index(x[1:])]
3342                                                 continue
3343
3344                                         # We got here, so add it now.
3345                                         if x not in myflags:
3346                                                 myflags.append(x)
3347
3348                         myflags.sort()
3349                         #store setting in last element of configlist, the original environment:
3350                         if myflags or mykey in self:
3351                                 self.configlist[-1][mykey] = " ".join(myflags)
3352                         del myflags
3353
3354                 # Do the USE calculation last because it depends on USE_EXPAND.
3355                 if "auto" in self["USE_ORDER"].split(":"):
3356                         self.configdict["auto"]["USE"] = autouse(
3357                                 vartree(root=self["ROOT"], categories=self.categories,
3358                                         settings=self),
3359                                 use_cache=use_cache, mysettings=self)
3360                 else:
3361                         self.configdict["auto"]["USE"] = ""
3362
3363                 use_expand = self.get("USE_EXPAND", "").split()
3364                 use_expand_dict = self._use_expand_dict
3365                 use_expand_dict.clear()
3366                 for k in use_expand:
3367                         v = self.get(k)
3368                         if v is not None:
3369                                 use_expand_dict[k] = v
3370
3371                 if not self.uvlist:
3372                         for x in self["USE_ORDER"].split(":"):
3373                                 if x in self.configdict:
3374                                         self.uvlist.append(self.configdict[x])
3375                         self.uvlist.reverse()
3376
3377                 # For optimal performance, use slice
3378                 # comparison instead of startswith().
3379                 myflags = set()
3380                 for curdb in self.uvlist:
3381                         cur_use_expand = [x for x in use_expand if x in curdb]
3382                         mysplit = curdb.get("USE", "").split()
3383                         if not mysplit and not cur_use_expand:
3384                                 continue
3385                         for x in mysplit:
3386                                 if x == "-*":
3387                                         myflags.clear()
3388                                         continue
3389
3390                                 if x[0] == "+":
3391                                         writemsg(colorize("BAD", _("USE flags should not start "
3392                                                 "with a '+': %s\n") % x), noiselevel=-1)
3393                                         x = x[1:]
3394                                         if not x:
3395                                                 continue
3396
3397                                 if x[0] == "-":
3398                                         myflags.discard(x[1:])
3399                                         continue
3400
3401                                 myflags.add(x)
3402
3403                         for var in cur_use_expand:
3404                                 var_lower = var.lower()
3405                                 is_not_incremental = var not in myincrementals
3406                                 if is_not_incremental:
3407                                         prefix = var_lower + "_"
3408                                         prefix_len = len(prefix)
3409                                         for x in list(myflags):
3410                                                 if x[:prefix_len] == prefix:
3411                                                         myflags.remove(x)
3412                                 for x in curdb[var].split():
3413                                         if x[0] == "+":
3414                                                 if is_not_incremental:
3415                                                         writemsg(colorize("BAD", _("Invalid '+' "
3416                                                                 "operator in non-incremental variable "
3417                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3418                                                         continue
3419                                                 else:
3420                                                         writemsg(colorize("BAD", _("Invalid '+' "
3421                                                                 "operator in incremental variable "
3422                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3423                                                 x = x[1:]
3424                                         if x[0] == "-":
3425                                                 if is_not_incremental:
3426                                                         writemsg(colorize("BAD", _("Invalid '-' "
3427                                                                 "operator in non-incremental variable "
3428                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3429                                                         continue
3430                                                 myflags.discard(var_lower + "_" + x[1:])
3431                                                 continue
3432                                         myflags.add(var_lower + "_" + x)
3433
3434                 if hasattr(self, "features"):
3435                         self.features.clear()
3436                 else:
3437                         self.features = set()
3438                 self.features.update(self.configlist[-1].get('FEATURES', '').split())
3439                 self['FEATURES'] = ' '.join(sorted(self.features))
3440
3441                 myflags.update(self.useforce)
3442                 arch = self.configdict["defaults"].get("ARCH")
3443                 if arch:
3444                         myflags.add(arch)
3445
3446                 myflags.difference_update(self.usemask)
3447                 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
3448
3449                 self.already_in_regenerate = 0
3450
3451         def get_virts_p(self, myroot=None):
3452                 if self.virts_p:
3453                         return self.virts_p
3454                 virts = self.getvirtuals()
3455                 if virts:
3456                         for x in virts:
3457                                 vkeysplit = x.split("/")
3458                                 if vkeysplit[1] not in self.virts_p:
3459                                         self.virts_p[vkeysplit[1]] = virts[x]
3460                 return self.virts_p
3461
3462         def getvirtuals(self, myroot=None):
3463                 """myroot is now ignored because, due to caching, it has always been
3464                 broken for all but the first call."""
3465                 myroot = self["ROOT"]
3466                 if self.virtuals:
3467                         return self.virtuals
3468
3469                 virtuals_list = []
3470                 for x in self.profiles:
3471                         virtuals_file = os.path.join(x, "virtuals")
3472                         virtuals_dict = grabdict(virtuals_file)
3473                         atoms_dict = {}
3474                         for k, v in virtuals_dict.items():
3475                                 try:
3476                                         virt_atom = portage.dep.Atom(k)
3477                                 except portage.exception.InvalidAtom:
3478                                         virt_atom = None
3479                                 else:
3480                                         if virt_atom.blocker or \
3481                                                 str(virt_atom) != str(virt_atom.cp):
3482                                                 virt_atom = None
3483                                 if virt_atom is None:
3484                                         writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
3485                                                 (virtuals_file, k), noiselevel=-1)
3486                                         continue
3487                                 providers = []
3488                                 for atom in v:
3489                                         atom_orig = atom
3490                                         if atom[:1] == '-':
3491                                                 # allow incrementals
3492                                                 atom = atom[1:]
3493                                         try:
3494                                                 atom = portage.dep.Atom(atom)
3495                                         except portage.exception.InvalidAtom:
3496                                                 atom = None
3497                                         else:
3498                                                 if atom.blocker:
3499                                                         atom = None
3500                                         if atom is None:
3501                                                 writemsg(_("--- Invalid atom in %s: %s\n") % \
3502                                                         (virtuals_file, myatom), noiselevel=-1)
3503                                         else:
3504                                                 if atom_orig == str(atom):
3505                                                         # normal atom, so return as Atom instance
3506                                                         providers.append(atom)
3507                                                 else:
3508                                                         # atom has special prefix, so return as string
3509                                                         providers.append(atom_orig)
3510                                 if providers:
3511                                         atoms_dict[virt_atom] = providers
3512                         if atoms_dict:
3513                                 virtuals_list.append(atoms_dict)
3514
3515                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
3516                 del virtuals_list
3517
3518                 for virt in self.dirVirtuals:
3519                         # Preference for virtuals decreases from left to right.
3520                         self.dirVirtuals[virt].reverse()
3521
3522                 # Repoman does not use user or tree virtuals.
3523                 if self.local_config and not self.treeVirtuals:
3524                         temp_vartree = vartree(myroot, None,
3525                                 categories=self.categories, settings=self)
3526                         self._populate_treeVirtuals(temp_vartree)
3527
3528                 self.virtuals = self.__getvirtuals_compile()
3529                 return self.virtuals
3530
3531         def _populate_treeVirtuals(self, vartree):
3532                 """Reduce the provides into a list by CP."""
3533                 for provide, cpv_list in vartree.get_all_provides().items():
3534                         try:
3535                                 provide = dep.Atom(provide)
3536                         except exception.InvalidAtom:
3537                                 continue
3538                         self.treeVirtuals[provide.cp] = \
3539                                 [dep.Atom(cpv_getkey(cpv)) for cpv in cpv_list]
3540
3541         def __getvirtuals_compile(self):
3542                 """Stack installed and profile virtuals.  Preference for virtuals
3543                 decreases from left to right.
3544                 Order of preference:
3545                 1. installed and in profile
3546                 2. installed only
3547                 3. profile only
3548                 """
3549
3550                 # Virtuals by profile+tree preferences.
3551                 ptVirtuals   = {}
3552
3553                 for virt, installed_list in self.treeVirtuals.items():
3554                         profile_list = self.dirVirtuals.get(virt, None)
3555                         if not profile_list:
3556                                 continue
3557                         for cp in installed_list:
3558                                 if cp in profile_list:
3559                                         ptVirtuals.setdefault(virt, [])
3560                                         ptVirtuals[virt].append(cp)
3561
3562                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
3563                         self.dirVirtuals, self._depgraphVirtuals])
3564                 return virtuals
3565
3566         def __delitem__(self,mykey):
3567                 self.modifying()
3568                 for x in self.lookuplist:
3569                         if x != None:
3570                                 if mykey in x:
3571                                         del x[mykey]
3572
3573         def __getitem__(self,mykey):
3574                 for d in self.lookuplist:
3575                         if mykey in d:
3576                                 return d[mykey]
3577                 return '' # for backward compat, don't raise KeyError
3578
3579         def get(self, k, x=None):
3580                 for d in self.lookuplist:
3581                         if k in d:
3582                                 return d[k]
3583                 return x
3584
3585         def pop(self, key, *args):
3586                 if len(args) > 1:
3587                         raise TypeError(
3588                                 "pop expected at most 2 arguments, got " + \
3589                                 repr(1 + len(args)))
3590                 v = self
3591                 for d in reversed(self.lookuplist):
3592                         v = d.pop(key, v)
3593                 if v is self:
3594                         if args:
3595                                 return args[0]
3596                         raise KeyError(key)
3597                 return v
3598
3599         def has_key(self,mykey):
3600                 warnings.warn("portage.config.has_key() is deprecated, "
3601                         "use the in operator instead",
3602                         DeprecationWarning)
3603                 return mykey in self
3604
3605         def __contains__(self, mykey):
3606                 """Called to implement membership test operators (in and not in)."""
3607                 for d in self.lookuplist:
3608                         if mykey in d:
3609                                 return True
3610                 return False
3611
3612         def setdefault(self, k, x=None):
3613                 v = self.get(k)
3614                 if v is not None:
3615                         return v
3616                 else:
3617                         self[k] = x
3618                         return x
3619
3620         def keys(self):
3621                 return list(self)
3622
3623         def __iter__(self):
3624                 keys = set()
3625                 for d in self.lookuplist:
3626                         keys.update(d)
3627                 return iter(keys)
3628
3629         def iterkeys(self):
3630                 return iter(self)
3631
3632         def iteritems(self):
3633                 for k in self:
3634                         yield (k, self[k])
3635
3636         def items(self):
3637                 return list(self.iteritems())
3638
3639         def __setitem__(self,mykey,myvalue):
3640                 "set a value; will be thrown away at reset() time"
3641                 if not isinstance(myvalue, basestring):
3642                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
3643
3644                 # Avoid potential UnicodeDecodeError exceptions later.
3645                 mykey = _unicode_decode(mykey)
3646                 myvalue = _unicode_decode(myvalue)
3647
3648                 self.modifying()
3649                 self.modifiedkeys.append(mykey)
3650                 self.configdict["env"][mykey]=myvalue
3651
3652         def environ(self):
3653                 "return our locally-maintained environment"
3654                 mydict={}
3655                 environ_filter = self._environ_filter
3656
3657                 phase = self.get('EBUILD_PHASE')
3658                 filter_calling_env = False
3659                 if phase not in ('clean', 'cleanrm', 'depend'):
3660                         temp_dir = self.get('T')
3661                         if temp_dir is not None and \
3662                                 os.path.exists(os.path.join(temp_dir, 'environment')):
3663                                 filter_calling_env = True
3664
3665                 environ_whitelist = self._environ_whitelist
3666                 env_d = self.configdict["env.d"]
3667                 for x in self:
3668                         if x in environ_filter:
3669                                 continue
3670                         myvalue = self[x]
3671                         if not isinstance(myvalue, basestring):
3672                                 writemsg(_("!!! Non-string value in config: %s=%s\n") % \
3673                                         (x, myvalue), noiselevel=-1)
3674                                 continue
3675                         if filter_calling_env and \
3676                                 x not in environ_whitelist and \
3677                                 not self._environ_whitelist_re.match(x):
3678                                 # Do not allow anything to leak into the ebuild
3679                                 # environment unless it is explicitly whitelisted.
3680                                 # This ensures that variables unset by the ebuild
3681                                 # remain unset.
3682                                 continue
3683                         mydict[x] = myvalue
3684                 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
3685                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
3686                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
3687
3688                 if filter_calling_env:
3689                         if phase:
3690                                 whitelist = []
3691                                 if "rpm" == phase:
3692                                         whitelist.append("RPMDIR")
3693                                 for k in whitelist:
3694                                         v = self.get(k)
3695                                         if v is not None:
3696                                                 mydict[k] = v
3697
3698                 # Filtered by IUSE and implicit IUSE.
3699                 mydict["USE"] = self.get("PORTAGE_USE", "")
3700
3701                 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
3702                 # so we have to back it up and restore it.
3703                 rootpath = mydict.get("ROOTPATH")
3704                 if rootpath:
3705                         mydict["PORTAGE_ROOTPATH"] = rootpath
3706
3707                 return mydict
3708
3709         def thirdpartymirrors(self):
3710                 if getattr(self, "_thirdpartymirrors", None) is None:
3711                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3712                         for x in self["PORTDIR_OVERLAY"].split():
3713                                 profileroots.insert(0, os.path.join(x, "profiles"))
3714                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3715                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3716                 return self._thirdpartymirrors
3717
3718         def archlist(self):
3719                 return flatten([[myarch, "~" + myarch] \
3720                         for myarch in self["PORTAGE_ARCHLIST"].split()])
3721
3722         def selinux_enabled(self):
3723                 if getattr(self, "_selinux_enabled", None) is None:
3724                         self._selinux_enabled = 0
3725                         if "selinux" in self["USE"].split():
3726                                 if selinux:
3727                                         if selinux.is_selinux_enabled() == 1:
3728                                                 self._selinux_enabled = 1
3729                                         else:
3730                                                 self._selinux_enabled = 0
3731                                 else:
3732                                         writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
3733                                                 noiselevel=-1)
3734                                         self._selinux_enabled = 0
3735
3736                 return self._selinux_enabled
3737
3738         if sys.hexversion >= 0x3000000:
3739                 keys = __iter__
3740                 items = iteritems
3741
3742 # In some cases, openpty can be slow when it fails. Therefore,
3743 # stop trying to use it after the first failure.
3744 _disable_openpty = False
3745
3746 if sys.hexversion >= 0x3000000:
3747         # This is a temporary workaround for http://bugs.python.org/issue5380.
3748         _disable_openpty = True
3749
3750 def _create_pty_or_pipe(copy_term_size=None):
3751         """
3752         Try to create a pty and if then fails then create a normal
3753         pipe instead.
3754
3755         @param copy_term_size: If a tty file descriptor is given
3756                 then the term size will be copied to the pty.
3757         @type copy_term_size: int
3758         @rtype: tuple
3759         @returns: A tuple of (is_pty, master_fd, slave_fd) where
3760                 is_pty is True if a pty was successfully allocated, and
3761                 False if a normal pipe was allocated.
3762         """
3763
3764         got_pty = False
3765
3766         global _disable_openpty
3767         if _disable_openpty:
3768                 master_fd, slave_fd = os.pipe()
3769         else:
3770                 from pty import openpty
3771                 try:
3772                         master_fd, slave_fd = openpty()
3773                         got_pty = True
3774                 except EnvironmentError as e:
3775                         _disable_openpty = True
3776                         writemsg("openpty failed: '%s'\n" % str(e),
3777                                 noiselevel=-1)
3778                         del e
3779                         master_fd, slave_fd = os.pipe()
3780
3781         if got_pty:
3782                 # Disable post-processing of output since otherwise weird
3783                 # things like \n -> \r\n transformations may occur.
3784                 import termios
3785                 mode = termios.tcgetattr(slave_fd)
3786                 mode[1] &= ~termios.OPOST
3787                 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3788
3789         if got_pty and \
3790                 copy_term_size is not None and \
3791                 os.isatty(copy_term_size):
3792                 from portage.output import get_term_size, set_term_size
3793                 rows, columns = get_term_size()
3794                 set_term_size(rows, columns, slave_fd)
3795
3796         return (got_pty, master_fd, slave_fd)
3797
3798 # XXX This would be to replace getstatusoutput completely.
3799 # XXX Issue: cannot block execution. Deadlock condition.
3800 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3801         """
3802         Spawn a subprocess with extra portage-specific options.
3803         Optiosn include:
3804
3805         Sandbox: Sandbox means the spawned process will be limited in its ability t
3806         read and write files (normally this means it is restricted to ${IMAGE}/)
3807         SElinux Sandbox: Enables sandboxing on SElinux
3808         Reduced Privileges: Drops privilages such that the process runs as portage:portage
3809         instead of as root.
3810
3811         Notes: os.system cannot be used because it messes with signal handling.  Instead we
3812         use the portage.process spawn* family of functions.
3813
3814         This function waits for the process to terminate.
3815
3816         @param mystring: Command to run
3817         @type mystring: String
3818         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3819         @type mysettings: Dictionary or config instance
3820         @param debug: Ignored
3821         @type debug: Boolean
3822         @param free: Enable sandboxing for this process
3823         @type free: Boolean
3824         @param droppriv: Drop to portage:portage when running this command
3825         @type droppriv: Boolean
3826         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
3827         @type sesandbox: Boolean
3828         @param fakeroot: Run this command with faked root privileges
3829         @type fakeroot: Boolean
3830         @param keywords: Extra options encoded as a dict, to be passed to spawn
3831         @type keywords: Dictionary
3832         @rtype: Integer
3833         @returns:
3834         1. The return code of the spawned process.
3835         """
3836
3837         if isinstance(mysettings, dict):
3838                 env=mysettings
3839                 keywords["opt_name"]="[ %s ]" % "portage"
3840         else:
3841                 check_config_instance(mysettings)
3842                 env=mysettings.environ()
3843                 if mysettings.mycpv is not None:
3844                         keywords["opt_name"] = "[%s]" % mysettings.mycpv
3845                 else:
3846                         keywords["opt_name"] = "[%s/%s]" % \
3847                                 (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
3848
3849         fd_pipes = keywords.get("fd_pipes")
3850         if fd_pipes is None:
3851                 fd_pipes = {
3852                         0:sys.stdin.fileno(),
3853                         1:sys.stdout.fileno(),
3854                         2:sys.stderr.fileno(),
3855                 }
3856         # In some cases the above print statements don't flush stdout, so
3857         # it needs to be flushed before allowing a child process to use it
3858         # so that output always shows in the correct order.
3859         stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
3860         for fd in fd_pipes.values():
3861                 if fd in stdout_filenos:
3862                         sys.stdout.flush()
3863                         sys.stderr.flush()
3864                         break
3865
3866         # The default policy for the sesandbox domain only allows entry (via exec)
3867         # from shells and from binaries that belong to portage (the number of entry
3868         # points is minimized).  The "tee" binary is not among the allowed entry
3869         # points, so it is spawned outside of the sesandbox domain and reads from a
3870         # pseudo-terminal that connects two domains.
3871         logfile = keywords.get("logfile")
3872         mypids = []
3873         master_fd = None
3874         slave_fd = None
3875         fd_pipes_orig = None
3876         got_pty = False
3877         if logfile:
3878                 del keywords["logfile"]
3879                 if 1 not in fd_pipes or 2 not in fd_pipes:
3880                         raise ValueError(fd_pipes)
3881
3882                 fd_pipes.setdefault(0, sys.stdin.fileno())
3883                 fd_pipes_orig = fd_pipes.copy()
3884
3885                 got_pty, master_fd, slave_fd = \
3886                         _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
3887
3888                 # We must set non-blocking mode before we close the slave_fd
3889                 # since otherwise the fcntl call can fail on FreeBSD (the child
3890                 # process might have already exited and closed slave_fd so we
3891                 # have to keep it open in order to avoid FreeBSD potentially
3892                 # generating an EAGAIN exception).
3893                 import fcntl
3894                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3895                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3896
3897                 fd_pipes[0] = fd_pipes_orig[0]
3898                 fd_pipes[1] = slave_fd
3899                 fd_pipes[2] = slave_fd
3900                 keywords["fd_pipes"] = fd_pipes
3901
3902         features = mysettings.features
3903         # TODO: Enable fakeroot to be used together with droppriv.  The
3904         # fake ownership/permissions will have to be converted to real
3905         # permissions in the merge phase.
3906         fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
3907         if droppriv and not uid and portage_gid and portage_uid:
3908                 keywords.update({"uid":portage_uid,"gid":portage_gid,
3909                         "groups":userpriv_groups,"umask":0o02})
3910         if not free:
3911                 free=((droppriv and "usersandbox" not in features) or \
3912                         (not droppriv and "sandbox" not in features and \
3913                         "usersandbox" not in features and not fakeroot))
3914
3915         if free or "SANDBOX_ACTIVE" in os.environ:
3916                 keywords["opt_name"] += " bash"
3917                 spawn_func = portage.process.spawn_bash
3918         elif fakeroot:
3919                 keywords["opt_name"] += " fakeroot"
3920                 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
3921                 spawn_func = portage.process.spawn_fakeroot
3922         else:
3923                 keywords["opt_name"] += " sandbox"
3924                 spawn_func = portage.process.spawn_sandbox
3925
3926         if sesandbox:
3927                 spawn_func = selinux.spawn_wrapper(spawn_func,
3928                         mysettings["PORTAGE_SANDBOX_T"])
3929
3930         returnpid = keywords.get("returnpid")
3931         keywords["returnpid"] = True
3932         try:
3933                 mypids.extend(spawn_func(mystring, env=env, **keywords))
3934         finally:
3935                 if logfile:
3936                         os.close(slave_fd)
3937
3938         if returnpid:
3939                 return mypids
3940
3941         if logfile:
3942                 log_file = open(_unicode_encode(logfile), mode='ab')
3943                 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
3944                 master_file = os.fdopen(master_fd, 'rb')
3945                 iwtd = [master_file]
3946                 owtd = []
3947                 ewtd = []
3948                 import array, select
3949                 buffsize = 65536
3950                 eof = False
3951                 while not eof:
3952                         events = select.select(iwtd, owtd, ewtd)
3953                         for f in events[0]:
3954                                 # Use non-blocking mode to prevent read
3955                                 # calls from blocking indefinitely.
3956                                 buf = array.array('B')
3957                                 try:
3958                                         buf.fromfile(f, buffsize)
3959                                 except EOFError:
3960                                         pass
3961                                 if not buf:
3962                                         eof = True
3963                                         break
3964                                 if f is master_file:
3965                                         buf.tofile(stdout_file)
3966                                         stdout_file.flush()
3967                                         buf.tofile(log_file)
3968                                         log_file.flush()
3969                 log_file.close()
3970                 stdout_file.close()
3971                 master_file.close()
3972         pid = mypids[-1]
3973         retval = os.waitpid(pid, 0)[1]
3974         portage.process.spawned_pids.remove(pid)
3975         if retval != os.EX_OK:
3976                 if retval & 0xff:
3977                         return (retval & 0xff) << 8
3978                 return retval >> 8
3979         return retval
3980
3981 _userpriv_spawn_kwargs = (
3982         ("uid",    portage_uid),
3983         ("gid",    portage_gid),
3984         ("groups", userpriv_groups),
3985         ("umask",  0o02),
3986 )
3987
3988 def _spawn_fetch(settings, args, **kwargs):
3989         """
3990         Spawn a process with appropriate settings for fetching, including
3991         userfetch and selinux support.
3992         """
3993
3994         global _userpriv_spawn_kwargs
3995
3996         # Redirect all output to stdout since some fetchers like
3997         # wget pollute stderr (if portage detects a problem then it
3998         # can send it's own message to stderr).
3999         if "fd_pipes" not in kwargs:
4000
4001                 kwargs["fd_pipes"] = {
4002                         0 : sys.stdin.fileno(),
4003                         1 : sys.stdout.fileno(),
4004                         2 : sys.stdout.fileno(),
4005                 }
4006
4007         if "userfetch" in settings.features and \
4008                 os.getuid() == 0 and portage_gid and portage_uid:
4009                 kwargs.update(_userpriv_spawn_kwargs)
4010
4011         spawn_func = portage.process.spawn
4012
4013         if settings.selinux_enabled():
4014                 spawn_func = selinux.spawn_wrapper(spawn_func,
4015                         settings["PORTAGE_FETCH_T"])
4016
4017                 # bash is an allowed entrypoint, while most binaries are not
4018                 if args[0] != BASH_BINARY:
4019                         args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
4020
4021         rval = spawn_func(args, env=dict(iter(settings.items())), **kwargs)
4022
4023         return rval
4024
4025 _userpriv_test_write_file_cache = {}
4026 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
4027         "rm -f  %(file_path)s ; exit $rval"
4028
4029 def _userpriv_test_write_file(settings, file_path):
4030         """
4031         Drop privileges and try to open a file for writing. The file may or
4032         may not exist, and the parent directory is assumed to exist. The file
4033         is removed before returning.
4034
4035         @param settings: A config instance which is passed to _spawn_fetch()
4036         @param file_path: A file path to open and write.
4037         @return: True if write succeeds, False otherwise.
4038         """
4039
4040         global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
4041         rval = _userpriv_test_write_file_cache.get(file_path)
4042         if rval is not None:
4043                 return rval
4044
4045         args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
4046                 {"file_path" : _shell_quote(file_path)}]
4047
4048         returncode = _spawn_fetch(settings, args)
4049
4050         rval = returncode == os.EX_OK
4051         _userpriv_test_write_file_cache[file_path] = rval
4052         return rval
4053
4054 def _checksum_failure_temp_file(distdir, basename):
4055         """
4056         First try to find a duplicate temp file with the same checksum and return
4057         that filename if available. Otherwise, use mkstemp to create a new unique
4058         filename._checksum_failure_.$RANDOM, rename the given file, and return the
4059         new filename. In any case, filename will be renamed or removed before this
4060         function returns a temp filename.
4061         """
4062
4063         filename = os.path.join(distdir, basename)
4064         size = os.stat(filename).st_size
4065         checksum = None
4066         tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
4067         for temp_filename in os.listdir(distdir):
4068                 if not tempfile_re.match(temp_filename):
4069                         continue
4070                 temp_filename = os.path.join(distdir, temp_filename)
4071                 try:
4072                         if size != os.stat(temp_filename).st_size:
4073                                 continue
4074                 except OSError:
4075                         continue
4076                 try:
4077                         temp_checksum = portage.checksum.perform_md5(temp_filename)
4078                 except portage.exception.FileNotFound:
4079                         # Apparently the temp file disappeared. Let it go.
4080                         continue
4081                 if checksum is None:
4082                         checksum = portage.checksum.perform_md5(filename)
4083                 if checksum == temp_checksum:
4084                         os.unlink(filename)
4085                         return temp_filename
4086
4087         from tempfile import mkstemp
4088         fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
4089         os.close(fd)
4090         os.rename(filename, temp_filename)
4091         return temp_filename
4092
4093 def _check_digests(filename, digests, show_errors=1):
4094         """
4095         Check digests and display a message if an error occurs.
4096         @return True if all digests match, False otherwise.
4097         """
4098         verified_ok, reason = portage.checksum.verify_all(filename, digests)
4099         if not verified_ok:
4100                 if show_errors:
4101                         writemsg(_("!!! Previously fetched"
4102                                 " file: '%s'\n") % filename, noiselevel=-1)
4103                         writemsg(_("!!! Reason: %s\n") % reason[0],
4104                                 noiselevel=-1)
4105                         writemsg(_("!!! Got:      %s\n"
4106                                 "!!! Expected: %s\n") % \
4107                                 (reason[1], reason[2]), noiselevel=-1)
4108                 return False
4109         return True
4110
4111 def _check_distfile(filename, digests, eout, show_errors=1):
4112         """
4113         @return a tuple of (match, stat_obj) where match is True if filename
4114         matches all given digests (if any) and stat_obj is a stat result, or
4115         None if the file does not exist.
4116         """
4117         if digests is None:
4118                 digests = {}
4119         size = digests.get("size")
4120         if size is not None and len(digests) == 1:
4121                 digests = None
4122
4123         try:
4124                 st = os.stat(filename)
4125         except OSError:
4126                 return (False, None)
4127         if size is not None and size != st.st_size:
4128                 return (False, st)
4129         if not digests:
4130                 if size is not None:
4131                         eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
4132                         eout.eend(0)
4133                 elif st.st_size == 0:
4134                         # Zero-byte distfiles are always invalid.
4135                         return (False, st)
4136         else:
4137                 if _check_digests(filename, digests, show_errors=show_errors):
4138                         eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
4139                                 " ".join(sorted(digests))))
4140                         eout.eend(0)
4141                 else:
4142                         return (False, st)
4143         return (True, st)
4144
4145 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
4146
4147 _size_suffix_map = {
4148         ''  : 0,
4149         'K' : 10,
4150         'M' : 20,
4151         'G' : 30,
4152         'T' : 40,
4153         'P' : 50,
4154         'E' : 60,
4155         'Z' : 70,
4156         'Y' : 80,
4157 }
4158
4159 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
4160         "fetch files.  Will use digest file if available."
4161
4162         if not myuris:
4163                 return 1
4164
4165         features = mysettings.features
4166         restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4167
4168         from portage.data import secpass
4169         userfetch = secpass >= 2 and "userfetch" in features
4170         userpriv = secpass >= 2 and "userpriv" in features
4171
4172         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
4173         if "mirror" in restrict or \
4174            "nomirror" in restrict:
4175                 if ("mirror" in features) and ("lmirror" not in features):
4176                         # lmirror should allow you to bypass mirror restrictions.
4177                         # XXX: This is not a good thing, and is temporary at best.
4178                         print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
4179                         return 1
4180
4181         # Generally, downloading the same file repeatedly from
4182         # every single available mirror is a waste of bandwidth
4183         # and time, so there needs to be a cap.
4184         checksum_failure_max_tries = 5
4185         v = checksum_failure_max_tries
4186         try:
4187                 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
4188                         checksum_failure_max_tries))
4189         except (ValueError, OverflowError):
4190                 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4191                         " contains non-integer value: '%s'\n") % \
4192                         mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
4193                 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4194                         "default value: %s\n") % checksum_failure_max_tries,
4195                         noiselevel=-1)
4196                 v = checksum_failure_max_tries
4197         if v < 1:
4198                 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4199                         " contains value less than 1: '%s'\n") % v, noiselevel=-1)
4200                 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4201                         "default value: %s\n") % checksum_failure_max_tries,
4202                         noiselevel=-1)
4203                 v = checksum_failure_max_tries
4204         checksum_failure_max_tries = v
4205         del v
4206
4207         fetch_resume_size_default = "350K"
4208         fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
4209         if fetch_resume_size is not None:
4210                 fetch_resume_size = "".join(fetch_resume_size.split())
4211                 if not fetch_resume_size:
4212                         # If it's undefined or empty, silently use the default.
4213                         fetch_resume_size = fetch_resume_size_default
4214                 match = _fetch_resume_size_re.match(fetch_resume_size)
4215                 if match is None or \
4216                         (match.group(2).upper() not in _size_suffix_map):
4217                         writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
4218                                 " contains an unrecognized format: '%s'\n") % \
4219                                 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
4220                         writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
4221                                 "default value: %s\n") % fetch_resume_size_default,
4222                                 noiselevel=-1)
4223                         fetch_resume_size = None
4224         if fetch_resume_size is None:
4225                 fetch_resume_size = fetch_resume_size_default
4226                 match = _fetch_resume_size_re.match(fetch_resume_size)
4227         fetch_resume_size = int(match.group(1)) * \
4228                 2 ** _size_suffix_map[match.group(2).upper()]
4229
4230         # Behave like the package has RESTRICT="primaryuri" after a
4231         # couple of checksum failures, to increase the probablility
4232         # of success before checksum_failure_max_tries is reached.
4233         checksum_failure_primaryuri = 2
4234         thirdpartymirrors = mysettings.thirdpartymirrors()
4235
4236         # In the background parallel-fetch process, it's safe to skip checksum
4237         # verification of pre-existing files in $DISTDIR that have the correct
4238         # file size. The parent process will verify their checksums prior to
4239         # the unpack phase.
4240
4241         parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
4242         if parallel_fetchonly:
4243                 fetchonly = 1
4244
4245         check_config_instance(mysettings)
4246
4247         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
4248                 CUSTOM_MIRRORS_FILE), recursive=1)
4249
4250         mymirrors=[]
4251
4252         if listonly or ("distlocks" not in features):
4253                 use_locks = 0
4254
4255         fetch_to_ro = 0
4256         if "skiprocheck" in features:
4257                 fetch_to_ro = 1
4258
4259         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
4260                 if use_locks:
4261                         writemsg(colorize("BAD",
4262                                 _("!!! For fetching to a read-only filesystem, "
4263                                 "locking should be turned off.\n")), noiselevel=-1)
4264                         writemsg(_("!!! This can be done by adding -distlocks to "
4265                                 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
4266 #                       use_locks = 0
4267
4268         # local mirrors are always added
4269         if "local" in custommirrors:
4270                 mymirrors += custommirrors["local"]
4271
4272         if "nomirror" in restrict or \
4273            "mirror" in restrict:
4274                 # We don't add any mirrors.
4275                 pass
4276         else:
4277                 if try_mirrors:
4278                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
4279
4280         skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
4281         pkgdir = mysettings.get("O")
4282         if not (pkgdir is None or skip_manifest):
4283                 mydigests = Manifest(
4284                         pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
4285         else:
4286                 # no digests because fetch was not called for a specific package
4287                 mydigests = {}
4288
4289         ro_distdirs = [x for x in \
4290                 util.shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
4291                 if os.path.isdir(x)]
4292
4293         fsmirrors = []
4294         for x in range(len(mymirrors)-1,-1,-1):
4295                 if mymirrors[x] and mymirrors[x][0]=='/':
4296                         fsmirrors += [mymirrors[x]]
4297                         del mymirrors[x]
4298
4299         restrict_fetch = "fetch" in restrict
4300         custom_local_mirrors = custommirrors.get("local", [])
4301         if restrict_fetch:
4302                 # With fetch restriction, a normal uri may only be fetched from
4303                 # custom local mirrors (if available).  A mirror:// uri may also
4304                 # be fetched from specific mirrors (effectively overriding fetch
4305                 # restriction, but only for specific mirrors).
4306                 locations = custom_local_mirrors
4307         else:
4308                 locations = mymirrors
4309
4310         file_uri_tuples = []
4311         if isinstance(myuris, dict):
4312                 for myfile, uri_set in myuris.items():
4313                         for myuri in uri_set:
4314                                 file_uri_tuples.append((myfile, myuri))
4315         else:
4316                 for myuri in myuris:
4317                         file_uri_tuples.append((os.path.basename(myuri), myuri))
4318
4319         filedict={}
4320         primaryuri_indexes={}
4321         primaryuri_dict = {}
4322         thirdpartymirror_uris = {}
4323         for myfile, myuri in file_uri_tuples:
4324                 if myfile not in filedict:
4325                         filedict[myfile]=[]
4326                         for y in range(0,len(locations)):
4327                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
4328                 if myuri[:9]=="mirror://":
4329                         eidx = myuri.find("/", 9)
4330                         if eidx != -1:
4331                                 mirrorname = myuri[9:eidx]
4332                                 path = myuri[eidx+1:]
4333
4334                                 # Try user-defined mirrors first
4335                                 if mirrorname in custommirrors:
4336                                         for cmirr in custommirrors[mirrorname]:
4337                                                 filedict[myfile].append(
4338                                                         cmirr.rstrip("/") + "/" + path)
4339
4340                                 # now try the official mirrors
4341                                 if mirrorname in thirdpartymirrors:
4342                                         shuffle(thirdpartymirrors[mirrorname])
4343
4344                                         uris = [locmirr.rstrip("/") + "/" + path \
4345                                                 for locmirr in thirdpartymirrors[mirrorname]]
4346                                         filedict[myfile].extend(uris)
4347                                         thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
4348
4349                                 if not filedict[myfile]:
4350                                         writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
4351                         else:
4352                                 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
4353                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
4354                 else:
4355                         if restrict_fetch:
4356                                 # Only fetch from specific mirrors is allowed.
4357                                 continue
4358                         if "primaryuri" in restrict:
4359                                 # Use the source site first.
4360                                 if myfile in primaryuri_indexes:
4361                                         primaryuri_indexes[myfile] += 1
4362                                 else:
4363                                         primaryuri_indexes[myfile] = 0
4364                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
4365                         else:
4366                                 filedict[myfile].append(myuri)
4367                         primaryuris = primaryuri_dict.get(myfile)
4368                         if primaryuris is None:
4369                                 primaryuris = []
4370                                 primaryuri_dict[myfile] = primaryuris
4371                         primaryuris.append(myuri)
4372
4373         # Prefer thirdpartymirrors over normal mirrors in cases when
4374         # the file does not yet exist on the normal mirrors.
4375         for myfile, uris in thirdpartymirror_uris.items():
4376                 primaryuri_dict.setdefault(myfile, []).extend(uris)
4377
4378         can_fetch=True
4379
4380         if listonly:
4381                 can_fetch = False
4382
4383         if can_fetch and not fetch_to_ro:
4384                 global _userpriv_test_write_file_cache
4385                 dirmode  = 0o2070
4386                 filemode =   0o60
4387                 modemask =    0o2
4388                 dir_gid = portage_gid
4389                 if "FAKED_MODE" in mysettings:
4390                         # When inside fakeroot, directories with portage's gid appear
4391                         # to have root's gid. Therefore, use root's gid instead of
4392                         # portage's gid to avoid spurrious permissions adjustments
4393                         # when inside fakeroot.
4394                         dir_gid = 0
4395                 distdir_dirs = [""]
4396                 if "distlocks" in features:
4397                         distdir_dirs.append(".locks")
4398                 try:
4399                         
4400                         for x in distdir_dirs:
4401                                 mydir = os.path.join(mysettings["DISTDIR"], x)
4402                                 write_test_file = os.path.join(
4403                                         mydir, ".__portage_test_write__")
4404
4405                                 try:
4406                                         st = os.stat(mydir)
4407                                 except OSError:
4408                                         st = None
4409
4410                                 if st is not None and stat.S_ISDIR(st.st_mode):
4411                                         if not (userfetch or userpriv):
4412                                                 continue
4413                                         if _userpriv_test_write_file(mysettings, write_test_file):
4414                                                 continue
4415
4416                                 _userpriv_test_write_file_cache.pop(write_test_file, None)
4417                                 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
4418                                         if st is None:
4419                                                 # The directory has just been created
4420                                                 # and therefore it must be empty.
4421                                                 continue
4422                                         writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
4423                                                 noiselevel=-1)
4424                                         def onerror(e):
4425                                                 raise # bail out on the first error that occurs during recursion
4426                                         if not apply_recursive_permissions(mydir,
4427                                                 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
4428                                                 filemode=filemode, filemask=modemask, onerror=onerror):
4429                                                 raise portage.exception.OperationNotPermitted(
4430                                                         _("Failed to apply recursive permissions for the portage group."))
4431                 except portage.exception.PortageException as e:
4432                         if not os.path.isdir(mysettings["DISTDIR"]):
4433                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4434                                 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
4435                                 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
4436
4437         if can_fetch and \
4438                 not fetch_to_ro and \
4439                 not os.access(mysettings["DISTDIR"], os.W_OK):
4440                 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
4441                         noiselevel=-1)
4442                 can_fetch = False
4443
4444         if can_fetch and use_locks and locks_in_subdir:
4445                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
4446                         if not os.access(distlocks_subdir, os.W_OK):
4447                                 writemsg(_("!!! No write access to write to %s.  Aborting.\n") % distlocks_subdir,
4448                                         noiselevel=-1)
4449                                 return 0
4450                         del distlocks_subdir
4451
4452         distdir_writable = can_fetch and not fetch_to_ro
4453         failed_files = set()
4454         restrict_fetch_msg = False
4455
4456         for myfile in filedict:
4457                 """
4458                 fetched  status
4459                 0        nonexistent
4460                 1        partially downloaded
4461                 2        completely downloaded
4462                 """
4463                 fetched = 0
4464
4465                 orig_digests = mydigests.get(myfile, {})
4466                 size = orig_digests.get("size")
4467                 if size == 0:
4468                         # Zero-byte distfiles are always invalid, so discard their digests.
4469                         del mydigests[myfile]
4470                         orig_digests.clear()
4471                         size = None
4472                 pruned_digests = orig_digests
4473                 if parallel_fetchonly:
4474                         pruned_digests = {}
4475                         if size is not None:
4476                                 pruned_digests["size"] = size
4477
4478                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
4479                 has_space = True
4480                 has_space_superuser = True
4481                 file_lock = None
4482                 if listonly:
4483                         writemsg_stdout("\n", noiselevel=-1)
4484                 else:
4485                         # check if there is enough space in DISTDIR to completely store myfile
4486                         # overestimate the filesize so we aren't bitten by FS overhead
4487                         if size is not None and hasattr(os, "statvfs"):
4488                                 vfs_stat = os.statvfs(mysettings["DISTDIR"])
4489                                 try:
4490                                         mysize = os.stat(myfile_path).st_size
4491                                 except OSError as e:
4492                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4493                                                 raise
4494                                         del e
4495                                         mysize = 0
4496                                 if (size - mysize + vfs_stat.f_bsize) >= \
4497                                         (vfs_stat.f_bsize * vfs_stat.f_bavail):
4498
4499                                         if (size - mysize + vfs_stat.f_bsize) >= \
4500                                                 (vfs_stat.f_bsize * vfs_stat.f_bfree):
4501                                                 has_space_superuser = False
4502
4503                                         if not has_space_superuser:
4504                                                 has_space = False
4505                                         elif secpass < 2:
4506                                                 has_space = False
4507                                         elif userfetch:
4508                                                 has_space = False
4509
4510                         if not has_space:
4511                                 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
4512                                         (myfile, mysettings["DISTDIR"]), noiselevel=-1)
4513
4514                                 if has_space_superuser:
4515                                         writemsg(_("!!! Insufficient privileges to use "
4516                                                 "remaining space.\n"), noiselevel=-1)
4517                                         if userfetch:
4518                                                 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
4519                                                         " in /etc/make.conf in order to fetch with\n"
4520                                                         "!!! superuser privileges.\n"), noiselevel=-1)
4521
4522                         if distdir_writable and use_locks:
4523
4524                                 if locks_in_subdir:
4525                                         lock_file = os.path.join(mysettings["DISTDIR"],
4526                                                 locks_in_subdir, myfile)
4527                                 else:
4528                                         lock_file = myfile_path
4529
4530                                 lock_kwargs = {}
4531                                 if fetchonly:
4532                                         lock_kwargs["flags"] = os.O_NONBLOCK
4533
4534                                 try:
4535                                         file_lock = portage.locks.lockfile(myfile_path,
4536                                                 wantnewlockfile=1, **lock_kwargs)
4537                                 except portage.exception.TryAgain:
4538                                         writemsg(_(">>> File '%s' is already locked by "
4539                                                 "another fetcher. Continuing...\n") % myfile,
4540                                                 noiselevel=-1)
4541                                         continue
4542                 try:
4543                         if not listonly:
4544
4545                                 eout = portage.output.EOutput()
4546                                 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
4547                                 match, mystat = _check_distfile(
4548                                         myfile_path, pruned_digests, eout)
4549                                 if match:
4550                                         if distdir_writable:
4551                                                 try:
4552                                                         apply_secpass_permissions(myfile_path,
4553                                                                 gid=portage_gid, mode=0o664, mask=0o2,
4554                                                                 stat_cached=mystat)
4555                                                 except portage.exception.PortageException as e:
4556                                                         if not os.access(myfile_path, os.R_OK):
4557                                                                 writemsg(_("!!! Failed to adjust permissions:"
4558                                                                         " %s\n") % str(e), noiselevel=-1)
4559                                                         del e
4560                                         continue
4561
4562                                 if distdir_writable and mystat is None:
4563                                         # Remove broken symlinks if necessary.
4564                                         try:
4565                                                 os.unlink(myfile_path)
4566                                         except OSError:
4567                                                 pass
4568
4569                                 if mystat is not None:
4570                                         if stat.S_ISDIR(mystat.st_mode):
4571                                                 portage.util.writemsg_level(
4572                                                         _("!!! Unable to fetch file since "
4573                                                         "a directory is in the way: \n"
4574                                                         "!!!   %s\n") % myfile_path,
4575                                                         level=logging.ERROR, noiselevel=-1)
4576                                                 return 0
4577
4578                                         if mystat.st_size == 0:
4579                                                 if distdir_writable:
4580                                                         try:
4581                                                                 os.unlink(myfile_path)
4582                                                         except OSError:
4583                                                                 pass
4584                                         elif distdir_writable:
4585                                                 if mystat.st_size < fetch_resume_size and \
4586                                                         mystat.st_size < size:
4587                                                         # If the file already exists and the size does not
4588                                                         # match the existing digests, it may be that the
4589                                                         # user is attempting to update the digest. In this
4590                                                         # case, the digestgen() function will advise the
4591                                                         # user to use `ebuild --force foo.ebuild manifest`
4592                                                         # in order to force the old digests to be replaced.
4593                                                         # Since the user may want to keep this file, rename
4594                                                         # it instead of deleting it.
4595                                                         writemsg(_(">>> Renaming distfile with size "
4596                                                                 "%d (smaller than " "PORTAGE_FETCH_RESU"
4597                                                                 "ME_MIN_SIZE)\n") % mystat.st_size)
4598                                                         temp_filename = \
4599                                                                 _checksum_failure_temp_file(
4600                                                                 mysettings["DISTDIR"], myfile)
4601                                                         writemsg_stdout(_("Refetching... "
4602                                                                 "File renamed to '%s'\n\n") % \
4603                                                                 temp_filename, noiselevel=-1)
4604                                                 elif mystat.st_size >= size:
4605                                                         temp_filename = \
4606                                                                 _checksum_failure_temp_file(
4607                                                                 mysettings["DISTDIR"], myfile)
4608                                                         writemsg_stdout(_("Refetching... "
4609                                                                 "File renamed to '%s'\n\n") % \
4610                                                                 temp_filename, noiselevel=-1)
4611
4612                                 if distdir_writable and ro_distdirs:
4613                                         readonly_file = None
4614                                         for x in ro_distdirs:
4615                                                 filename = os.path.join(x, myfile)
4616                                                 match, mystat = _check_distfile(
4617                                                         filename, pruned_digests, eout)
4618                                                 if match:
4619                                                         readonly_file = filename
4620                                                         break
4621                                         if readonly_file is not None:
4622                                                 try:
4623                                                         os.unlink(myfile_path)
4624                                                 except OSError as e:
4625                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4626                                                                 raise
4627                                                         del e
4628                                                 os.symlink(readonly_file, myfile_path)
4629                                                 continue
4630
4631                                 if fsmirrors and not os.path.exists(myfile_path) and has_space:
4632                                         for mydir in fsmirrors:
4633                                                 mirror_file = os.path.join(mydir, myfile)
4634                                                 try:
4635                                                         shutil.copyfile(mirror_file, myfile_path)
4636                                                         writemsg(_("Local mirror has file: %s\n") % myfile)
4637                                                         break
4638                                                 except (IOError, OSError) as e:
4639                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4640                                                                 raise
4641                                                         del e
4642
4643                                 try:
4644                                         mystat = os.stat(myfile_path)
4645                                 except OSError as e:
4646                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4647                                                 raise
4648                                         del e
4649                                 else:
4650                                         try:
4651                                                 apply_secpass_permissions(
4652                                                         myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
4653                                                         stat_cached=mystat)
4654                                         except portage.exception.PortageException as e:
4655                                                 if not os.access(myfile_path, os.R_OK):
4656                                                         writemsg(_("!!! Failed to adjust permissions:"
4657                                                                 " %s\n") % str(e), noiselevel=-1)
4658
4659                                         # If the file is empty then it's obviously invalid. Remove
4660                                         # the empty file and try to download if possible.
4661                                         if mystat.st_size == 0:
4662                                                 if distdir_writable:
4663                                                         try:
4664                                                                 os.unlink(myfile_path)
4665                                                         except EnvironmentError:
4666                                                                 pass
4667                                         elif myfile not in mydigests:
4668                                                 # We don't have a digest, but the file exists.  We must
4669                                                 # assume that it is fully downloaded.
4670                                                 continue
4671                                         else:
4672                                                 if mystat.st_size < mydigests[myfile]["size"] and \
4673                                                         not restrict_fetch:
4674                                                         fetched = 1 # Try to resume this download.
4675                                                 elif parallel_fetchonly and \
4676                                                         mystat.st_size == mydigests[myfile]["size"]:
4677                                                         eout = portage.output.EOutput()
4678                                                         eout.quiet = \
4679                                                                 mysettings.get("PORTAGE_QUIET") == "1"
4680                                                         eout.ebegin(
4681                                                                 "%s size ;-)" % (myfile, ))
4682                                                         eout.eend(0)
4683                                                         continue
4684                                                 else:
4685                                                         verified_ok, reason = portage.checksum.verify_all(
4686                                                                 myfile_path, mydigests[myfile])
4687                                                         if not verified_ok:
4688                                                                 writemsg(_("!!! Previously fetched"
4689                                                                         " file: '%s'\n") % myfile, noiselevel=-1)
4690                                                                 writemsg(_("!!! Reason: %s\n") % reason[0],
4691                                                                         noiselevel=-1)
4692                                                                 writemsg(_("!!! Got:      %s\n"
4693                                                                         "!!! Expected: %s\n") % \
4694                                                                         (reason[1], reason[2]), noiselevel=-1)
4695                                                                 if reason[0] == _("Insufficient data for checksum verification"):
4696                                                                         return 0
4697                                                                 if distdir_writable:
4698                                                                         temp_filename = \
4699                                                                                 _checksum_failure_temp_file(
4700                                                                                 mysettings["DISTDIR"], myfile)
4701                                                                         writemsg_stdout(_("Refetching... "
4702                                                                                 "File renamed to '%s'\n\n") % \
4703                                                                                 temp_filename, noiselevel=-1)
4704                                                         else:
4705                                                                 eout = portage.output.EOutput()
4706                                                                 eout.quiet = \
4707                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
4708                                                                 digests = mydigests.get(myfile)
4709                                                                 if digests:
4710                                                                         digests = list(digests)
4711                                                                         digests.sort()
4712                                                                         eout.ebegin(
4713                                                                                 "%s %s ;-)" % (myfile, " ".join(digests)))
4714                                                                         eout.eend(0)
4715                                                                 continue # fetch any remaining files
4716
4717                         # Create a reversed list since that is optimal for list.pop().
4718                         uri_list = filedict[myfile][:]
4719                         uri_list.reverse()
4720                         checksum_failure_count = 0
4721                         tried_locations = set()
4722                         while uri_list:
4723                                 loc = uri_list.pop()
4724                                 # Eliminate duplicates here in case we've switched to
4725                                 # "primaryuri" mode on the fly due to a checksum failure.
4726                                 if loc in tried_locations:
4727                                         continue
4728                                 tried_locations.add(loc)
4729                                 if listonly:
4730                                         writemsg_stdout(loc+" ", noiselevel=-1)
4731                                         continue
4732                                 # allow different fetchcommands per protocol
4733                                 protocol = loc[0:loc.find("://")]
4734
4735                                 missing_file_param = False
4736                                 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
4737                                 fetchcommand = mysettings.get(fetchcommand_var)
4738                                 if fetchcommand is None:
4739                                         fetchcommand_var = "FETCHCOMMAND"
4740                                         fetchcommand = mysettings.get(fetchcommand_var)
4741                                         if fetchcommand is None:
4742                                                 portage.util.writemsg_level(
4743                                                         _("!!! %s is unset. It should "
4744                                                         "have been defined in\n!!! %s/make.globals.\n") \
4745                                                         % (fetchcommand_var,
4746                                                         portage.const.GLOBAL_CONFIG_PATH),
4747                                                         level=logging.ERROR, noiselevel=-1)
4748                                                 return 0
4749                                 if "${FILE}" not in fetchcommand:
4750                                         portage.util.writemsg_level(
4751                                                 _("!!! %s does not contain the required ${FILE}"
4752                                                 " parameter.\n") % fetchcommand_var,
4753                                                 level=logging.ERROR, noiselevel=-1)
4754                                         missing_file_param = True
4755
4756                                 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
4757                                 resumecommand = mysettings.get(resumecommand_var)
4758                                 if resumecommand is None:
4759                                         resumecommand_var = "RESUMECOMMAND"
4760                                         resumecommand = mysettings.get(resumecommand_var)
4761                                         if resumecommand is None:
4762                                                 portage.util.writemsg_level(
4763                                                         _("!!! %s is unset. It should "
4764                                                         "have been defined in\n!!! %s/make.globals.\n") \
4765                                                         % (resumecommand_var,
4766                                                         portage.const.GLOBAL_CONFIG_PATH),
4767                                                         level=logging.ERROR, noiselevel=-1)
4768                                                 return 0
4769                                 if "${FILE}" not in resumecommand:
4770                                         portage.util.writemsg_level(
4771                                                 _("!!! %s does not contain the required ${FILE}"
4772                                                 " parameter.\n") % resumecommand_var,
4773                                                 level=logging.ERROR, noiselevel=-1)
4774                                         missing_file_param = True
4775
4776                                 if missing_file_param:
4777                                         portage.util.writemsg_level(
4778                                                 _("!!! Refer to the make.conf(5) man page for "
4779                                                 "information about how to\n!!! correctly specify "
4780                                                 "FETCHCOMMAND and RESUMECOMMAND.\n"),
4781                                                 level=logging.ERROR, noiselevel=-1)
4782                                         if myfile != os.path.basename(loc):
4783                                                 return 0
4784
4785                                 if not can_fetch:
4786                                         if fetched != 2:
4787                                                 try:
4788                                                         mysize = os.stat(myfile_path).st_size
4789                                                 except OSError as e:
4790                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4791                                                                 raise
4792                                                         del e
4793                                                         mysize = 0
4794
4795                                                 if mysize == 0:
4796                                                         writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
4797                                                                 noiselevel=-1)
4798                                                 elif size is None or size > mysize:
4799                                                         writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
4800                                                                 noiselevel=-1)
4801                                                 else:
4802                                                         writemsg(_("!!! File %s is incorrect size, "
4803                                                                 "but unable to retry.\n") % myfile, noiselevel=-1)
4804                                                 return 0
4805                                         else:
4806                                                 continue
4807
4808                                 if fetched != 2 and has_space:
4809                                         #we either need to resume or start the download
4810                                         if fetched == 1:
4811                                                 try:
4812                                                         mystat = os.stat(myfile_path)
4813                                                 except OSError as e:
4814                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4815                                                                 raise
4816                                                         del e
4817                                                         fetched = 0
4818                                                 else:
4819                                                         if mystat.st_size < fetch_resume_size:
4820                                                                 writemsg(_(">>> Deleting distfile with size "
4821                                                                         "%d (smaller than " "PORTAGE_FETCH_RESU"
4822                                                                         "ME_MIN_SIZE)\n") % mystat.st_size)
4823                                                                 try:
4824                                                                         os.unlink(myfile_path)
4825                                                                 except OSError as e:
4826                                                                         if e.errno not in \
4827                                                                                 (errno.ENOENT, errno.ESTALE):
4828                                                                                 raise
4829                                                                         del e
4830                                                                 fetched = 0
4831                                         if fetched == 1:
4832                                                 #resume mode:
4833                                                 writemsg(_(">>> Resuming download...\n"))
4834                                                 locfetch=resumecommand
4835                                                 command_var = resumecommand_var
4836                                         else:
4837                                                 #normal mode:
4838                                                 locfetch=fetchcommand
4839                                                 command_var = fetchcommand_var
4840                                         writemsg_stdout(_(">>> Downloading '%s'\n") % \
4841                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
4842                                         variables = {
4843                                                 "DISTDIR": mysettings["DISTDIR"],
4844                                                 "URI":     loc,
4845                                                 "FILE":    myfile
4846                                         }
4847
4848                                         myfetch = util.shlex_split(locfetch)
4849                                         myfetch = [varexpand(x, mydict=variables) for x in myfetch]
4850                                         myret = -1
4851                                         try:
4852
4853                                                 myret = _spawn_fetch(mysettings, myfetch)
4854
4855                                         finally:
4856                                                 try:
4857                                                         apply_secpass_permissions(myfile_path,
4858                                                                 gid=portage_gid, mode=0o664, mask=0o2)
4859                                                 except portage.exception.FileNotFound as e:
4860                                                         pass
4861                                                 except portage.exception.PortageException as e:
4862                                                         if not os.access(myfile_path, os.R_OK):
4863                                                                 writemsg(_("!!! Failed to adjust permissions:"
4864                                                                         " %s\n") % str(e), noiselevel=-1)
4865
4866                                         # If the file is empty then it's obviously invalid.  Don't
4867                                         # trust the return value from the fetcher.  Remove the
4868                                         # empty file and try to download again.
4869                                         try:
4870                                                 if os.stat(myfile_path).st_size == 0:
4871                                                         os.unlink(myfile_path)
4872                                                         fetched = 0
4873                                                         continue
4874                                         except EnvironmentError:
4875                                                 pass
4876
4877                                         if mydigests is not None and myfile in mydigests:
4878                                                 try:
4879                                                         mystat = os.stat(myfile_path)
4880                                                 except OSError as e:
4881                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4882                                                                 raise
4883                                                         del e
4884                                                         fetched = 0
4885                                                 else:
4886
4887                                                         if stat.S_ISDIR(mystat.st_mode):
4888                                                                 # This can happen if FETCHCOMMAND erroneously
4889                                                                 # contains wget's -P option where it should
4890                                                                 # instead have -O.
4891                                                                 portage.util.writemsg_level(
4892                                                                         _("!!! The command specified in the "
4893                                                                         "%s variable appears to have\n!!! "
4894                                                                         "created a directory instead of a "
4895                                                                         "normal file.\n") % command_var,
4896                                                                         level=logging.ERROR, noiselevel=-1)
4897                                                                 portage.util.writemsg_level(
4898                                                                         _("!!! Refer to the make.conf(5) "
4899                                                                         "man page for information about how "
4900                                                                         "to\n!!! correctly specify "
4901                                                                         "FETCHCOMMAND and RESUMECOMMAND.\n"),
4902                                                                         level=logging.ERROR, noiselevel=-1)
4903                                                                 return 0
4904
4905                                                         # no exception?  file exists. let digestcheck() report
4906                                                         # an appropriately for size or checksum errors
4907
4908                                                         # If the fetcher reported success and the file is
4909                                                         # too small, it's probably because the digest is
4910                                                         # bad (upstream changed the distfile).  In this
4911                                                         # case we don't want to attempt to resume. Show a
4912                                                         # digest verification failure to that the user gets
4913                                                         # a clue about what just happened.
4914                                                         if myret != os.EX_OK and \
4915                                                                 mystat.st_size < mydigests[myfile]["size"]:
4916                                                                 # Fetch failed... Try the next one... Kill 404 files though.
4917                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
4918                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
4919                                                                         if html404.search(codecs.open(
4920                                                                                 _unicode_encode(myfile_path,
4921                                                                                 encoding=_encodings['fs'], errors='strict'),
4922                                                                                 mode='r', encoding=_encodings['content'], errors='replace'
4923                                                                                 ).read()):
4924                                                                                 try:
4925                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
4926                                                                                         writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
4927                                                                                         fetched = 0
4928                                                                                         continue
4929                                                                                 except (IOError, OSError):
4930                                                                                         pass
4931                                                                 fetched = 1
4932                                                                 continue
4933                                                         if True:
4934                                                                 # File is the correct size--check the checksums for the fetched
4935                                                                 # file NOW, for those users who don't have a stable/continuous
4936                                                                 # net connection. This way we have a chance to try to download
4937                                                                 # from another mirror...
4938                                                                 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
4939                                                                 if not verified_ok:
4940                                                                         print(reason)
4941                                                                         writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
4942                                                                                 noiselevel=-1)
4943                                                                         writemsg(_("!!! Reason: %s\n") % reason[0],
4944                                                                                 noiselevel=-1)
4945                                                                         writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
4946                                                                                 (reason[1], reason[2]), noiselevel=-1)
4947                                                                         if reason[0] == _("Insufficient data for checksum verification"):
4948                                                                                 return 0
4949                                                                         temp_filename = \
4950                                                                                 _checksum_failure_temp_file(
4951                                                                                 mysettings["DISTDIR"], myfile)
4952                                                                         writemsg_stdout(_("Refetching... "
4953                                                                                 "File renamed to '%s'\n\n") % \
4954                                                                                 temp_filename, noiselevel=-1)
4955                                                                         fetched=0
4956                                                                         checksum_failure_count += 1
4957                                                                         if checksum_failure_count == \
4958                                                                                 checksum_failure_primaryuri:
4959                                                                                 # Switch to "primaryuri" mode in order
4960                                                                                 # to increase the probablility of
4961                                                                                 # of success.
4962                                                                                 primaryuris = \
4963                                                                                         primaryuri_dict.get(myfile)
4964                                                                                 if primaryuris:
4965                                                                                         uri_list.extend(
4966                                                                                                 reversed(primaryuris))
4967                                                                         if checksum_failure_count >= \
4968                                                                                 checksum_failure_max_tries:
4969                                                                                 break
4970                                                                 else:
4971                                                                         eout = portage.output.EOutput()
4972                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
4973                                                                         digests = mydigests.get(myfile)
4974                                                                         if digests:
4975                                                                                 eout.ebegin("%s %s ;-)" % \
4976                                                                                         (myfile, " ".join(sorted(digests))))
4977                                                                                 eout.eend(0)
4978                                                                         fetched=2
4979                                                                         break
4980                                         else:
4981                                                 if not myret:
4982                                                         fetched=2
4983                                                         break
4984                                                 elif mydigests!=None:
4985                                                         writemsg(_("No digest file available and download failed.\n\n"),
4986                                                                 noiselevel=-1)
4987                 finally:
4988                         if use_locks and file_lock:
4989                                 portage.locks.unlockfile(file_lock)
4990
4991                 if listonly:
4992                         writemsg_stdout("\n", noiselevel=-1)
4993                 if fetched != 2:
4994                         if restrict_fetch and not restrict_fetch_msg:
4995                                 restrict_fetch_msg = True
4996                                 msg = _("\n!!! %s/%s"
4997                                         " has fetch restriction turned on.\n"
4998                                         "!!! This probably means that this "
4999                                         "ebuild's files must be downloaded\n"
5000                                         "!!! manually.  See the comments in"
5001                                         " the ebuild for more information.\n\n") % \
5002                                         (mysettings["CATEGORY"], mysettings["PF"])
5003                                 portage.util.writemsg_level(msg,
5004                                         level=logging.ERROR, noiselevel=-1)
5005                                 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
5006                                         os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
5007
5008                                 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
5009                                 private_tmpdir = None
5010                                 if not parallel_fetchonly and not have_builddir:
5011                                         # When called by digestgen(), it's normal that
5012                                         # PORTAGE_BUILDDIR doesn't exist. It's helpful
5013                                         # to show the pkg_nofetch output though, so go
5014                                         # ahead and create a temporary PORTAGE_BUILDDIR.
5015                                         # Use a temporary config instance to avoid altering
5016                                         # the state of the one that's been passed in.
5017                                         mysettings = config(clone=mysettings)
5018                                         from tempfile import mkdtemp
5019                                         try:
5020                                                 private_tmpdir = mkdtemp("", "._portage_fetch_.",
5021                                                         global_tmpdir)
5022                                         except OSError as e:
5023                                                 if e.errno != portage.exception.PermissionDenied.errno:
5024                                                         raise
5025                                                 raise portage.exception.PermissionDenied(global_tmpdir)
5026                                         mysettings["PORTAGE_TMPDIR"] = private_tmpdir
5027                                         mysettings.backup_changes("PORTAGE_TMPDIR")
5028                                         debug = mysettings.get("PORTAGE_DEBUG") == "1"
5029                                         portage.doebuild_environment(mysettings["EBUILD"], "fetch",
5030                                                 mysettings["ROOT"], mysettings, debug, 1, None)
5031                                         prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
5032                                         have_builddir = True
5033
5034                                 if not parallel_fetchonly and have_builddir:
5035                                         # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
5036                                         # ensuring sane $PWD (bug #239560) and storing elog
5037                                         # messages. Therefore, calling code needs to ensure that
5038                                         # PORTAGE_BUILDDIR is already clean and locked here.
5039
5040                                         # All the pkg_nofetch goes to stderr since it's considered
5041                                         # to be an error message.
5042                                         fd_pipes = {
5043                                                 0 : sys.stdin.fileno(),
5044                                                 1 : sys.stderr.fileno(),
5045                                                 2 : sys.stderr.fileno(),
5046                                         }
5047
5048                                         ebuild_phase = mysettings.get("EBUILD_PHASE")
5049                                         try:
5050                                                 mysettings["EBUILD_PHASE"] = "nofetch"
5051                                                 spawn(_shell_quote(EBUILD_SH_BINARY) + \
5052                                                         " nofetch", mysettings, fd_pipes=fd_pipes)
5053                                         finally:
5054                                                 if ebuild_phase is None:
5055                                                         mysettings.pop("EBUILD_PHASE", None)
5056                                                 else:
5057                                                         mysettings["EBUILD_PHASE"] = ebuild_phase
5058                                                 if private_tmpdir is not None:
5059                                                         shutil.rmtree(private_tmpdir)
5060
5061                         elif restrict_fetch:
5062                                 pass
5063                         elif listonly:
5064                                 pass
5065                         elif not filedict[myfile]:
5066                                 writemsg(_("Warning: No mirrors available for file"
5067                                         " '%s'\n") % (myfile), noiselevel=-1)
5068                         else:
5069                                 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
5070                                         noiselevel=-1)
5071
5072                         if listonly:
5073                                 continue
5074                         elif fetchonly:
5075                                 failed_files.add(myfile)
5076                                 continue
5077                         return 0
5078         if failed_files:
5079                 return 0
5080         return 1
5081
5082 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
5083         """
5084         Generates a digest file if missing.  Assumes all files are available.
5085         DEPRECATED: this now only is a compability wrapper for 
5086                     portage.manifest.Manifest()
5087         NOTE: manifestonly and overwrite are useless with manifest2 and
5088               are therefore ignored."""
5089         if myportdb is None:
5090                 writemsg("Warning: myportdb not specified to digestgen\n")
5091                 global portdb
5092                 myportdb = portdb
5093         global _doebuild_manifest_exempt_depend
5094         try:
5095                 _doebuild_manifest_exempt_depend += 1
5096                 distfiles_map = {}
5097                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
5098                 for cpv in fetchlist_dict:
5099                         try:
5100                                 for myfile in fetchlist_dict[cpv]:
5101                                         distfiles_map.setdefault(myfile, []).append(cpv)
5102                         except portage.exception.InvalidDependString as e:
5103                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5104                                 del e
5105                                 return 0
5106                 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
5107                 manifest1_compat = False
5108                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
5109                         fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
5110                 # Don't require all hashes since that can trigger excessive
5111                 # fetches when sufficient digests already exist.  To ease transition
5112                 # while Manifest 1 is being removed, only require hashes that will
5113                 # exist before and after the transition.
5114                 required_hash_types = set()
5115                 required_hash_types.add("size")
5116                 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
5117                 dist_hashes = mf.fhashdict.get("DIST", {})
5118
5119                 # To avoid accidental regeneration of digests with the incorrect
5120                 # files (such as partially downloaded files), trigger the fetch
5121                 # code if the file exists and it's size doesn't match the current
5122                 # manifest entry. If there really is a legitimate reason for the
5123                 # digest to change, `ebuild --force digest` can be used to avoid
5124                 # triggering this code (or else the old digests can be manually
5125                 # removed from the Manifest).
5126                 missing_files = []
5127                 for myfile in distfiles_map:
5128                         myhashes = dist_hashes.get(myfile)
5129                         if not myhashes:
5130                                 try:
5131                                         st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5132                                 except OSError:
5133                                         st = None
5134                                 if st is None or st.st_size == 0:
5135                                         missing_files.append(myfile)
5136                                 continue
5137                         size = myhashes.get("size")
5138
5139                         try:
5140                                 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5141                         except OSError as e:
5142                                 if e.errno != errno.ENOENT:
5143                                         raise
5144                                 del e
5145                                 if size == 0:
5146                                         missing_files.append(myfile)
5147                                         continue
5148                                 if required_hash_types.difference(myhashes):
5149                                         missing_files.append(myfile)
5150                                         continue
5151                         else:
5152                                 if st.st_size == 0 or size is not None and size != st.st_size:
5153                                         missing_files.append(myfile)
5154                                         continue
5155
5156                 if missing_files:
5157                                 mytree = os.path.realpath(os.path.dirname(
5158                                         os.path.dirname(mysettings["O"])))
5159                                 fetch_settings = config(clone=mysettings)
5160                                 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5161                                 for myfile in missing_files:
5162                                         uris = set()
5163                                         for cpv in distfiles_map[myfile]:
5164                                                 myebuild = os.path.join(mysettings["O"],
5165                                                         catsplit(cpv)[1] + ".ebuild")
5166                                                 # for RESTRICT=fetch, mirror, etc...
5167                                                 doebuild_environment(myebuild, "fetch",
5168                                                         mysettings["ROOT"], fetch_settings,
5169                                                         debug, 1, myportdb)
5170                                                 uris.update(myportdb.getFetchMap(
5171                                                         cpv, mytree=mytree)[myfile])
5172
5173                                         fetch_settings["A"] = myfile # for use by pkg_nofetch()
5174
5175                                         try:
5176                                                 st = os.stat(os.path.join(
5177                                                         mysettings["DISTDIR"],myfile))
5178                                         except OSError:
5179                                                 st = None
5180
5181                                         if not fetch({myfile : uris}, fetch_settings):
5182                                                 writemsg(_("!!! Fetch failed for %s, can't update "
5183                                                         "Manifest\n") % myfile, noiselevel=-1)
5184                                                 if myfile in dist_hashes and \
5185                                                         st is not None and st.st_size > 0:
5186                                                         # stat result is obtained before calling fetch(),
5187                                                         # since fetch may rename the existing file if the
5188                                                         # digest does not match.
5189                                                         writemsg(_("!!! If you would like to "
5190                                                                 "forcefully replace the existing "
5191                                                                 "Manifest entry\n!!! for %s, use "
5192                                                                 "the following command:\n") % myfile + \
5193                                                                 "!!!    " + colorize("INFORM",
5194                                                                 "ebuild --force %s manifest" % \
5195                                                                 os.path.basename(myebuild)) + "\n",
5196                                                                 noiselevel=-1)
5197                                                 return 0
5198                 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
5199                 try:
5200                         mf.create(requiredDistfiles=myarchives,
5201                                 assumeDistHashesSometimes=True,
5202                                 assumeDistHashesAlways=(
5203                                 "assume-digests" in mysettings.features))
5204                 except portage.exception.FileNotFound as e:
5205                         writemsg(_("!!! File %s doesn't exist, can't update "
5206                                 "Manifest\n") % e, noiselevel=-1)
5207                         return 0
5208                 except portage.exception.PortagePackageException as e:
5209                         writemsg(("!!! %s\n") % (e,), noiselevel=-1)
5210                         return 0
5211                 try:
5212                         mf.write(sign=False)
5213                 except portage.exception.PermissionDenied as e:
5214                         writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
5215                         return 0
5216                 if "assume-digests" not in mysettings.features:
5217                         distlist = list(mf.fhashdict.get("DIST", {}))
5218                         distlist.sort()
5219                         auto_assumed = []
5220                         for filename in distlist:
5221                                 if not os.path.exists(
5222                                         os.path.join(mysettings["DISTDIR"], filename)):
5223                                         auto_assumed.append(filename)
5224                         if auto_assumed:
5225                                 mytree = os.path.realpath(
5226                                         os.path.dirname(os.path.dirname(mysettings["O"])))
5227                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
5228                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
5229                                 pkgs.sort()
5230                                 writemsg_stdout("  digest.assumed" + portage.output.colorize("WARN",
5231                                         str(len(auto_assumed)).rjust(18)) + "\n")
5232                                 for pkg_key in pkgs:
5233                                         fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
5234                                         pv = pkg_key.split("/")[1]
5235                                         for filename in auto_assumed:
5236                                                 if filename in fetchlist:
5237                                                         writemsg_stdout(
5238                                                                 "   %s::%s\n" % (pv, filename))
5239                 return 1
5240         finally:
5241                 _doebuild_manifest_exempt_depend -= 1
5242
5243 def digestParseFile(myfilename, mysettings=None):
5244         """(filename) -- Parses a given file for entries matching:
5245         <checksumkey> <checksum_hex_string> <filename> <filesize>
5246         Ignores lines that don't start with a valid checksum identifier
5247         and returns a dict with the filenames as keys and {checksumkey:checksum}
5248         as the values.
5249         DEPRECATED: this function is now only a compability wrapper for
5250                     portage.manifest.Manifest()."""
5251
5252         mysplit = myfilename.split(os.sep)
5253         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
5254                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
5255         elif mysplit[-1] == "Manifest":
5256                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
5257
5258         if mysettings is None:
5259                 global settings
5260                 mysettings = config(clone=settings)
5261
5262         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
5263
5264 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
5265         """Verifies checksums.  Assumes all files have been downloaded.
5266         DEPRECATED: this is now only a compability wrapper for 
5267                     portage.manifest.Manifest()."""
5268         if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
5269                 return 1
5270         pkgdir = mysettings["O"]
5271         manifest_path = os.path.join(pkgdir, "Manifest")
5272         if not os.path.exists(manifest_path):
5273                 writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
5274                         noiselevel=-1)
5275                 if strict:
5276                         return 0
5277                 else:
5278                         return 1
5279         mf = Manifest(pkgdir, mysettings["DISTDIR"])
5280         manifest_empty = True
5281         for d in mf.fhashdict.values():
5282                 if d:
5283                         manifest_empty = False
5284                         break
5285         if manifest_empty:
5286                 writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
5287                         noiselevel=-1)
5288                 if strict:
5289                         return 0
5290                 else:
5291                         return 1
5292         eout = portage.output.EOutput()
5293         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5294         try:
5295                 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
5296                         eout.ebegin(_("checking ebuild checksums ;-)"))
5297                         mf.checkTypeHashes("EBUILD")
5298                         eout.eend(0)
5299                         eout.ebegin(_("checking auxfile checksums ;-)"))
5300                         mf.checkTypeHashes("AUX")
5301                         eout.eend(0)
5302                         eout.ebegin(_("checking miscfile checksums ;-)"))
5303                         mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
5304                         eout.eend(0)
5305                 for f in myfiles:
5306                         eout.ebegin(_("checking %s ;-)") % f)
5307                         ftype = mf.findFile(f)
5308                         if ftype is None:
5309                                 raise KeyError(f)
5310                         mf.checkFileHashes(ftype, f)
5311                         eout.eend(0)
5312         except KeyError as e:
5313                 eout.eend(1)
5314                 writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
5315                 return 0
5316         except portage.exception.FileNotFound as e:
5317                 eout.eend(1)
5318                 writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
5319                         noiselevel=-1)
5320                 return 0
5321         except portage.exception.DigestException as e:
5322                 eout.eend(1)
5323                 writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
5324                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
5325                 writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
5326                 writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
5327                 writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
5328                 return 0
5329         # Make sure that all of the ebuilds are actually listed in the Manifest.
5330         glep55 = 'parse-eapi-glep-55' in mysettings.features
5331         for f in os.listdir(pkgdir):
5332                 pf = None
5333                 if glep55:
5334                         pf, eapi = _split_ebuild_name_glep55(f)
5335                 elif f[-7:] == '.ebuild':
5336                         pf = f[:-7]
5337                 if pf is not None and not mf.hasFile("EBUILD", f):
5338                         writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5339                                 os.path.join(pkgdir, f), noiselevel=-1)
5340                         if strict:
5341                                 return 0
5342         """ epatch will just grab all the patches out of a directory, so we have to
5343         make sure there aren't any foreign files that it might grab."""
5344         filesdir = os.path.join(pkgdir, "files")
5345
5346         for parent, dirs, files in os.walk(filesdir):
5347                 try:
5348                         parent = _unicode_decode(parent,
5349                                 encoding=_encodings['fs'], errors='strict')
5350                 except UnicodeDecodeError:
5351                         parent = _unicode_decode(parent,
5352                                 encoding=_encodings['fs'], errors='replace')
5353                         writemsg(_("!!! Path contains invalid "
5354                                 "character(s) for encoding '%s': '%s'") \
5355                                 % (_encodings['fs'], parent), noiselevel=-1)
5356                         if strict:
5357                                 return 0
5358                         continue
5359                 for d in dirs:
5360                         if d.startswith(".") or d == "CVS":
5361                                 dirs.remove(d)
5362                 for f in files:
5363                         try:
5364                                 f = _unicode_decode(f,
5365                                         encoding=_encodings['fs'], errors='strict')
5366                         except UnicodeDecodeError:
5367                                 f = _unicode_decode(f,
5368                                         encoding=_encodings['fs'], errors='replace')
5369                                 if f.startswith("."):
5370                                         continue
5371                                 f = os.path.join(parent, f)[len(filesdir) + 1:]
5372                                 writemsg(_("!!! File name contains invalid "
5373                                         "character(s) for encoding '%s': '%s'") \
5374                                         % (_encodings['fs'], f), noiselevel=-1)
5375                                 if strict:
5376                                         return 0
5377                                 continue
5378                         if f.startswith("."):
5379                                 continue
5380                         f = os.path.join(parent, f)[len(filesdir) + 1:]
5381                         file_type = mf.findFile(f)
5382                         if file_type != "AUX" and not f.startswith("digest-"):
5383                                 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5384                                         os.path.join(filesdir, f), noiselevel=-1)
5385                                 if strict:
5386                                         return 0
5387         return 1
5388
5389 # parse actionmap to spawn ebuild with the appropriate args
5390 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
5391         logfile=None, fd_pipes=None, returnpid=False):
5392         if not returnpid and \
5393                 (alwaysdep or "noauto" not in mysettings.features):
5394                 # process dependency first
5395                 if "dep" in actionmap[mydo]:
5396                         retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
5397                                 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
5398                                 fd_pipes=fd_pipes, returnpid=returnpid)
5399                         if retval:
5400                                 return retval
5401
5402         eapi = mysettings["EAPI"]
5403
5404         if mydo == "configure" and eapi in ("0", "1"):
5405                 return os.EX_OK
5406
5407         if mydo == "prepare" and eapi in ("0", "1"):
5408                 return os.EX_OK
5409
5410         if mydo == "pretend" and eapi in ("0", "1", "2"):
5411                 return os.EX_OK
5412
5413         kwargs = actionmap[mydo]["args"]
5414         mysettings["EBUILD_PHASE"] = mydo
5415         _doebuild_exit_status_unlink(
5416                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5417
5418         try:
5419                 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
5420                         mysettings, debug=debug, logfile=logfile,
5421                         fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
5422         finally:
5423                 mysettings["EBUILD_PHASE"] = ""
5424
5425         if returnpid:
5426                 return phase_retval
5427
5428         msg = _doebuild_exit_status_check(mydo, mysettings)
5429         if msg:
5430                 if phase_retval == os.EX_OK:
5431                         phase_retval = 1
5432                 from textwrap import wrap
5433                 from portage.elog.messages import eerror
5434                 for l in wrap(msg, 72):
5435                         eerror(l, phase=mydo, key=mysettings.mycpv)
5436
5437         _post_phase_userpriv_perms(mysettings)
5438         if mydo == "install":
5439                 _check_build_log(mysettings)
5440                 if phase_retval == os.EX_OK:
5441                         _post_src_install_chost_fix(mysettings)
5442                         phase_retval = _post_src_install_checks(mysettings)
5443
5444         if mydo == "test" and phase_retval != os.EX_OK and \
5445                 "test-fail-continue" in mysettings.features:
5446                 phase_retval = os.EX_OK
5447
5448         return phase_retval
5449
5450 _post_phase_cmds = {
5451
5452         "install" : [
5453                 "install_qa_check",
5454                 "install_symlink_html_docs"],
5455
5456         "preinst" : [
5457                 "preinst_bsdflags",
5458                 "preinst_sfperms",
5459                 "preinst_selinux_labels",
5460                 "preinst_suid_scan",
5461                 "preinst_mask"],
5462
5463         "postinst" : [
5464                 "postinst_bsdflags"]
5465 }
5466
5467 def _post_phase_userpriv_perms(mysettings):
5468         if "userpriv" in mysettings.features and secpass >= 2:
5469                 """ Privileged phases may have left files that need to be made
5470                 writable to a less privileged user."""
5471                 apply_recursive_permissions(mysettings["T"],
5472                         uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
5473                         filemode=0o60, filemask=0)
5474
5475 def _post_src_install_checks(mysettings):
5476         _post_src_install_uid_fix(mysettings)
5477         global _post_phase_cmds
5478         retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
5479         if retval != os.EX_OK:
5480                 writemsg(_("!!! install_qa_check failed; exiting.\n"),
5481                         noiselevel=-1)
5482         return retval
5483
5484 def _check_build_log(mysettings, out=None):
5485         """
5486         Search the content of $PORTAGE_LOG_FILE if it exists
5487         and generate the following QA Notices when appropriate:
5488
5489           * Automake "maintainer mode"
5490           * command not found
5491           * Unrecognized configure options
5492         """
5493         logfile = mysettings.get("PORTAGE_LOG_FILE")
5494         if logfile is None:
5495                 return
5496         try:
5497                 f = codecs.open(_unicode_encode(logfile,
5498                         encoding=_encodings['fs'], errors='strict'),
5499                         mode='r', encoding=_encodings['content'], errors='replace')
5500         except EnvironmentError:
5501                 return
5502
5503         am_maintainer_mode = []
5504         bash_command_not_found = []
5505         bash_command_not_found_re = re.compile(
5506                 r'(.*): line (\d*): (.*): command not found$')
5507         command_not_found_exclude_re = re.compile(r'/configure: line ')
5508         helper_missing_file = []
5509         helper_missing_file_re = re.compile(
5510                 r'^!!! (do|new).*: .* does not exist$')
5511
5512         configure_opts_warn = []
5513         configure_opts_warn_re = re.compile(
5514                 r'^configure: WARNING: [Uu]nrecognized options: ')
5515
5516         # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
5517         #
5518         #Configuration:
5519         #  Automake:                   ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
5520         am_maintainer_mode_re = re.compile(r'/missing --run ')
5521         am_maintainer_mode_exclude_re = \
5522                 re.compile(r'(/missing --run (autoheader|makeinfo)|^\s*Automake:\s)')
5523
5524         make_jobserver_re = \
5525                 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
5526         make_jobserver = []
5527
5528         try:
5529                 for line in f:
5530                         if am_maintainer_mode_re.search(line) is not None and \
5531                                 am_maintainer_mode_exclude_re.search(line) is None:
5532                                 am_maintainer_mode.append(line.rstrip("\n"))
5533
5534                         if bash_command_not_found_re.match(line) is not None and \
5535                                 command_not_found_exclude_re.search(line) is None:
5536                                 bash_command_not_found.append(line.rstrip("\n"))
5537
5538                         if helper_missing_file_re.match(line) is not None:
5539                                 helper_missing_file.append(line.rstrip("\n"))
5540
5541                         if configure_opts_warn_re.match(line) is not None:
5542                                 configure_opts_warn.append(line.rstrip("\n"))
5543
5544                         if make_jobserver_re.match(line) is not None:
5545                                 make_jobserver.append(line.rstrip("\n"))
5546
5547         finally:
5548                 f.close()
5549
5550         from portage.elog.messages import eqawarn
5551         def _eqawarn(lines):
5552                 for line in lines:
5553                         eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
5554         from textwrap import wrap
5555         wrap_width = 70
5556
5557         if am_maintainer_mode:
5558                 msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
5559                 msg.append("")
5560                 msg.extend("\t" + line for line in am_maintainer_mode)
5561                 msg.append("")
5562                 msg.extend(wrap(_(
5563                         "If you patch Makefile.am, "
5564                         "configure.in,  or configure.ac then you "
5565                         "should use autotools.eclass and "
5566                         "eautomake or eautoreconf. Exceptions "
5567                         "are limited to system packages "
5568                         "for which it is impossible to run "
5569                         "autotools during stage building. "
5570                         "See http://www.gentoo.org/p"
5571                         "roj/en/qa/autofailure.xml for more information."),
5572                         wrap_width))
5573                 _eqawarn(msg)
5574
5575         if bash_command_not_found:
5576                 msg = [_("QA Notice: command not found:")]
5577                 msg.append("")
5578                 msg.extend("\t" + line for line in bash_command_not_found)
5579                 _eqawarn(msg)
5580
5581         if helper_missing_file:
5582                 msg = [_("QA Notice: file does not exist:")]
5583                 msg.append("")
5584                 msg.extend("\t" + line[4:] for line in helper_missing_file)
5585                 _eqawarn(msg)
5586
5587         if configure_opts_warn:
5588                 msg = [_("QA Notice: Unrecognized configure options:")]
5589                 msg.append("")
5590                 msg.extend("\t" + line for line in configure_opts_warn)
5591                 _eqawarn(msg)
5592
5593         if make_jobserver:
5594                 msg = [_("QA Notice: make jobserver unavailable:")]
5595                 msg.append("")
5596                 msg.extend("\t" + line for line in make_jobserver)
5597                 _eqawarn(msg)
5598
5599 def _post_src_install_chost_fix(settings):
5600         """
5601         It's possible that the ebuild has changed the
5602         CHOST variable, so revert it to the initial
5603         setting.
5604         """
5605         chost = settings.get('CHOST')
5606         if chost:
5607                 write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
5608                         'build-info', 'CHOST'), chost + '\n')
5609
5610 def _post_src_install_uid_fix(mysettings, out=None):
5611         """
5612         Files in $D with user and group bits that match the "portage"
5613         user or group are automatically mapped to PORTAGE_INST_UID and
5614         PORTAGE_INST_GID if necessary. The chown system call may clear
5615         S_ISUID and S_ISGID bits, so those bits are restored if
5616         necessary.
5617         """
5618
5619         os = _os_merge
5620
5621         inst_uid = int(mysettings["PORTAGE_INST_UID"])
5622         inst_gid = int(mysettings["PORTAGE_INST_GID"])
5623
5624         if bsd_chflags:
5625                 # Temporarily remove all of the flags in order to avoid EPERM errors.
5626                 os.system("mtree -c -p %s -k flags > %s" % \
5627                         (_shell_quote(mysettings["D"]),
5628                         _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5629                 os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
5630                         (_shell_quote(mysettings["D"]),))
5631                 os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
5632                         (_shell_quote(mysettings["D"]),))
5633
5634         destdir = mysettings["D"]
5635         unicode_errors = []
5636
5637         while True:
5638
5639                 unicode_error = False
5640                 size = 0
5641                 counted_inodes = set()
5642
5643                 for parent, dirs, files in os.walk(destdir):
5644                         try:
5645                                 parent = _unicode_decode(parent,
5646                                         encoding=_encodings['merge'], errors='strict')
5647                         except UnicodeDecodeError:
5648                                 new_parent = _unicode_decode(parent,
5649                                         encoding=_encodings['merge'], errors='replace')
5650                                 new_parent = _unicode_encode(new_parent,
5651                                         encoding=_encodings['merge'], errors='backslashreplace')
5652                                 new_parent = _unicode_decode(new_parent,
5653                                         encoding=_encodings['merge'], errors='replace')
5654                                 os.rename(parent, new_parent)
5655                                 unicode_error = True
5656                                 unicode_errors.append(new_parent[len(destdir):])
5657                                 break
5658
5659                         for fname in chain(dirs, files):
5660                                 try:
5661                                         fname = _unicode_decode(fname,
5662                                                 encoding=_encodings['merge'], errors='strict')
5663                                 except UnicodeDecodeError:
5664                                         fpath = _os.path.join(
5665                                                 parent.encode(_encodings['merge']), fname)
5666                                         new_fname = _unicode_decode(fname,
5667                                                 encoding=_encodings['merge'], errors='replace')
5668                                         new_fname = _unicode_encode(new_fname,
5669                                                 encoding=_encodings['merge'], errors='backslashreplace')
5670                                         new_fname = _unicode_decode(new_fname,
5671                                                 encoding=_encodings['merge'], errors='replace')
5672                                         new_fpath = os.path.join(parent, new_fname)
5673                                         os.rename(fpath, new_fpath)
5674                                         unicode_error = True
5675                                         unicode_errors.append(new_fpath[len(destdir):])
5676                                         fname = new_fname
5677                                         fpath = new_fpath
5678                                 else:
5679                                         fpath = os.path.join(parent, fname)
5680
5681                                 mystat = os.lstat(fpath)
5682                                 if stat.S_ISREG(mystat.st_mode) and \
5683                                         mystat.st_ino not in counted_inodes:
5684                                         counted_inodes.add(mystat.st_ino)
5685                                         size += mystat.st_size
5686                                 if mystat.st_uid != portage_uid and \
5687                                         mystat.st_gid != portage_gid:
5688                                         continue
5689                                 myuid = -1
5690                                 mygid = -1
5691                                 if mystat.st_uid == portage_uid:
5692                                         myuid = inst_uid
5693                                 if mystat.st_gid == portage_gid:
5694                                         mygid = inst_gid
5695                                 apply_secpass_permissions(
5696                                         _unicode_encode(fpath, encoding=_encodings['merge']),
5697                                         uid=myuid, gid=mygid,
5698                                         mode=mystat.st_mode, stat_cached=mystat,
5699                                         follow_links=False)
5700
5701                         if unicode_error:
5702                                 break
5703
5704                 if not unicode_error:
5705                         break
5706
5707         if unicode_errors:
5708                 from portage.elog.messages import eerror
5709                 for l in _merge_unicode_error(unicode_errors):
5710                         eerror(l, phase='install', key=mysettings.mycpv, out=out)
5711
5712         open(_unicode_encode(os.path.join(mysettings['PORTAGE_BUILDDIR'],
5713                 'build-info', 'SIZE')), 'w').write(str(size) + '\n')
5714
5715         if bsd_chflags:
5716                 # Restore all of the flags saved above.
5717                 os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
5718                         (_shell_quote(mysettings["D"]),
5719                         _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5720
5721 def _merge_unicode_error(errors):
5722         from textwrap import wrap
5723         lines = []
5724
5725         msg = _("This package installs one or more file names containing "
5726                 "characters that do not match your current locale "
5727                 "settings. The current setting for filesystem encoding is '%s'.") \
5728                 % _encodings['merge']
5729         lines.extend(wrap(msg, 72))
5730
5731         lines.append("")
5732         errors.sort()
5733         lines.extend("\t" + x for x in errors)
5734         lines.append("")
5735
5736         if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
5737                 msg = _("For best results, UTF-8 encoding is recommended. See "
5738                         "the Gentoo Linux Localization Guide for instructions "
5739                         "about how to configure your locale for UTF-8 encoding:")
5740                 lines.extend(wrap(msg, 72))
5741                 lines.append("")
5742                 lines.append("\t" + \
5743                         "http://www.gentoo.org/doc/en/guide-localization.xml")
5744                 lines.append("")
5745
5746         return lines
5747
5748 def _post_pkg_preinst_cmd(mysettings):
5749         """
5750         Post phase logic and tasks that have been factored out of
5751         ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
5752         can be used to wipe out any gmon.out files created during
5753         previous functions (in case any tools were built with -pg
5754         in CFLAGS).
5755         """
5756
5757         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5758         misc_sh_binary = os.path.join(portage_bin_path,
5759                 os.path.basename(MISC_SH_BINARY))
5760
5761         mysettings["EBUILD_PHASE"] = ""
5762         global _post_phase_cmds
5763         myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
5764
5765         return myargs
5766
5767 def _post_pkg_postinst_cmd(mysettings):
5768         """
5769         Post phase logic and tasks that have been factored out of
5770         build.sh.
5771         """
5772
5773         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5774         misc_sh_binary = os.path.join(portage_bin_path,
5775                 os.path.basename(MISC_SH_BINARY))
5776
5777         mysettings["EBUILD_PHASE"] = ""
5778         global _post_phase_cmds
5779         myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
5780
5781         return myargs
5782
5783 def _spawn_misc_sh(mysettings, commands, **kwargs):
5784         """
5785         @param mysettings: the ebuild config
5786         @type mysettings: config
5787         @param commands: a list of function names to call in misc-functions.sh
5788         @type commands: list
5789         @rtype: int
5790         @returns: the return value from the spawn() call
5791         """
5792
5793         # Note: PORTAGE_BIN_PATH may differ from the global
5794         # constant when portage is reinstalling itself.
5795         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5796         misc_sh_binary = os.path.join(portage_bin_path,
5797                 os.path.basename(MISC_SH_BINARY))
5798         mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
5799         _doebuild_exit_status_unlink(
5800                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5801         debug = mysettings.get("PORTAGE_DEBUG") == "1"
5802         logfile = mysettings.get("PORTAGE_LOG_FILE")
5803         mydo = mysettings["EBUILD_PHASE"]
5804         try:
5805                 rval = spawn(mycommand, mysettings, debug=debug,
5806                         logfile=logfile, **kwargs)
5807         finally:
5808                 pass
5809
5810         msg = _doebuild_exit_status_check(mydo, mysettings)
5811         if msg:
5812                 if rval == os.EX_OK:
5813                         rval = 1
5814                 from textwrap import wrap
5815                 from portage.elog.messages import eerror
5816                 for l in wrap(msg, 72):
5817                         eerror(l, phase=mydo, key=mysettings.mycpv)
5818
5819         return rval
5820
5821 _testing_eapis = frozenset(["3_pre1"])
5822 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
5823
5824 def _eapi_is_deprecated(eapi):
5825         return eapi in _deprecated_eapis
5826
5827 def eapi_is_supported(eapi):
5828         eapi = str(eapi).strip()
5829
5830         if _eapi_is_deprecated(eapi):
5831                 return True
5832
5833         if eapi in _testing_eapis:
5834                 return True
5835
5836         try:
5837                 eapi = int(eapi)
5838         except ValueError:
5839                 eapi = -1
5840         if eapi < 0:
5841                 return False
5842         return eapi <= portage.const.EAPI
5843
5844 # Generally, it's best not to assume that cache entries for unsupported EAPIs
5845 # can be validated. However, the current package manager specification does not
5846 # guarantee that the EAPI can be parsed without sourcing the ebuild, so
5847 # it's too costly to discard existing cache entries for unsupported EAPIs.
5848 # Therefore, by default, assume that cache entries for unsupported EAPIs can be
5849 # validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
5850 # since the EAPI can be determined without the incurring the cost of sourcing
5851 # the ebuild.
5852 _validate_cache_for_unsupported_eapis = True
5853
5854 _parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
5855 _parse_eapi_ebuild_head_max_lines = 30
5856
5857 def _parse_eapi_ebuild_head(f):
5858         count = 0
5859         for line in f:
5860                 m = _parse_eapi_ebuild_head_re.match(line)
5861                 if m is not None:
5862                         return m.group(1).strip()
5863                 count += 1
5864                 if count >= _parse_eapi_ebuild_head_max_lines:
5865                         break
5866         return '0'
5867
5868 # True when FEATURES=parse-eapi-glep-55 is enabled.
5869 _glep_55_enabled = False
5870
5871 _split_ebuild_name_glep55_re = re.compile(r'^(.*)\.ebuild(-([^.]+))?$')
5872
5873 def _split_ebuild_name_glep55(name):
5874         """
5875         @returns: (pkg-ver-rev, eapi)
5876         """
5877         m = _split_ebuild_name_glep55_re.match(name)
5878         if m is None:
5879                 return (None, None)
5880         return (m.group(1), m.group(3))
5881
5882 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
5883
5884         ebuild_path = os.path.abspath(myebuild)
5885         pkg_dir     = os.path.dirname(ebuild_path)
5886
5887         if "CATEGORY" in mysettings.configdict["pkg"]:
5888                 cat = mysettings.configdict["pkg"]["CATEGORY"]
5889         else:
5890                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
5891
5892         eapi = None
5893         if 'parse-eapi-glep-55' in mysettings.features:
5894                 mypv, eapi = portage._split_ebuild_name_glep55(
5895                         os.path.basename(myebuild))
5896         else:
5897                 mypv = os.path.basename(ebuild_path)[:-7]
5898
5899         mycpv = cat+"/"+mypv
5900         mysplit=pkgsplit(mypv,silent=0)
5901         if mysplit is None:
5902                 raise portage.exception.IncorrectParameter(
5903                         _("Invalid ebuild path: '%s'") % myebuild)
5904
5905         # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
5906         # so that the caller can override it.
5907         tmpdir = mysettings["PORTAGE_TMPDIR"]
5908
5909         if mydo == 'depend':
5910                 if mycpv != mysettings.mycpv:
5911                         # Don't pass in mydbapi here since the resulting aux_get
5912                         # call would lead to infinite 'depend' phase recursion.
5913                         mysettings.setcpv(mycpv)
5914         else:
5915                 # If IUSE isn't in configdict['pkg'], it means that setcpv()
5916                 # hasn't been called with the mydb argument, so we have to
5917                 # call it here (portage code always calls setcpv properly,
5918                 # but api consumers might not).
5919                 if mycpv != mysettings.mycpv or \
5920                         'IUSE' not in mysettings.configdict['pkg']:
5921                         # Reload env.d variables and reset any previous settings.
5922                         mysettings.reload()
5923                         mysettings.reset()
5924                         mysettings.setcpv(mycpv, mydb=mydbapi)
5925
5926         # config.reset() might have reverted a change made by the caller,
5927         # so restore it to it's original value.
5928         mysettings["PORTAGE_TMPDIR"] = tmpdir
5929
5930         mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
5931         mysettings["EBUILD_PHASE"] = mydo
5932
5933         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
5934
5935         # We are disabling user-specific bashrc files.
5936         mysettings["BASH_ENV"] = INVALID_ENV_FILE
5937
5938         if debug: # Otherwise it overrides emerge's settings.
5939                 # We have no other way to set debug... debug can't be passed in
5940                 # due to how it's coded... Don't overwrite this so we can use it.
5941                 mysettings["PORTAGE_DEBUG"] = "1"
5942
5943         mysettings["ROOT"]     = myroot
5944         mysettings["STARTDIR"] = getcwd()
5945         mysettings["EBUILD"]   = ebuild_path
5946         mysettings["O"]        = pkg_dir
5947         mysettings.configdict["pkg"]["CATEGORY"] = cat
5948         mysettings["FILESDIR"] = pkg_dir+"/files"
5949         mysettings["PF"]       = mypv
5950
5951         if hasattr(mydbapi, '_repo_info'):
5952                 mytree = os.path.dirname(os.path.dirname(pkg_dir))
5953                 repo_info = mydbapi._repo_info[mytree]
5954                 mysettings['PORTDIR'] = repo_info.portdir
5955                 mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
5956
5957         mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
5958         mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
5959         mysettings["RPMDIR"]  = os.path.realpath(mysettings["RPMDIR"])
5960
5961         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
5962         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
5963
5964         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
5965         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
5966         mysettings["PN"] = mysplit[0]
5967         mysettings["PV"] = mysplit[1]
5968         mysettings["PR"] = mysplit[2]
5969
5970         if portage.util.noiselimit < 0:
5971                 mysettings["PORTAGE_QUIET"] = "1"
5972
5973         if mydo == 'depend' and \
5974                 'EAPI' not in mysettings.configdict['pkg']:
5975
5976                 if eapi is not None:
5977                         # From parse-eapi-glep-55 above.
5978                         pass
5979                 elif 'parse-eapi-ebuild-head' in mysettings.features:
5980                         eapi = _parse_eapi_ebuild_head(
5981                                 codecs.open(_unicode_encode(ebuild_path,
5982                                 encoding=_encodings['fs'], errors='strict'),
5983                                 mode='r', encoding=_encodings['content'], errors='replace'))
5984
5985                 if eapi is not None:
5986                         if not eapi_is_supported(eapi):
5987                                 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
5988                         mysettings.configdict['pkg']['EAPI'] = eapi
5989
5990         if mydo != "depend":
5991                 # Metadata vars such as EAPI and RESTRICT are
5992                 # set by the above config.setcpv() call.
5993                 eapi = mysettings["EAPI"]
5994                 if not eapi_is_supported(eapi):
5995                         # can't do anything with this.
5996                         raise portage.exception.UnsupportedAPIException(mycpv, eapi)
5997
5998         if mysplit[2] == "r0":
5999                 mysettings["PVR"]=mysplit[1]
6000         else:
6001                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
6002
6003         if "PATH" in mysettings:
6004                 mysplit=mysettings["PATH"].split(":")
6005         else:
6006                 mysplit=[]
6007         # Note: PORTAGE_BIN_PATH may differ from the global constant
6008         # when portage is reinstalling itself.
6009         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6010         if portage_bin_path not in mysplit:
6011                 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
6012
6013         # Sandbox needs cannonical paths.
6014         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
6015                 mysettings["PORTAGE_TMPDIR"])
6016         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
6017         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
6018         
6019         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
6020         # locations in order to prevent interference.
6021         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
6022                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6023                         mysettings["PKG_TMPDIR"],
6024                         mysettings["CATEGORY"], mysettings["PF"])
6025         else:
6026                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6027                         mysettings["BUILD_PREFIX"],
6028                         mysettings["CATEGORY"], mysettings["PF"])
6029
6030         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
6031         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
6032         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
6033         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
6034
6035         mysettings["PORTAGE_BASHRC"] = os.path.join(
6036                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
6037         mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
6038                 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
6039
6040         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
6041         if eapi not in ('0', '1', '2'):
6042                 # Discard KV for EAPIs that don't support it. Cache KV is restored
6043                 # from the backupenv whenever config.reset() is called.
6044                 mysettings.pop('KV', None)
6045         elif mydo != 'depend' and 'KV' not in mysettings and \
6046                 mydo in ('compile', 'config', 'configure', 'info',
6047                 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
6048                 'prepare', 'prerm', 'setup', 'test', 'unpack'):
6049                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
6050                 if mykv:
6051                         # Regular source tree
6052                         mysettings["KV"]=mykv
6053                 else:
6054                         mysettings["KV"]=""
6055                 mysettings.backup_changes("KV")
6056
6057         # Allow color.map to control colors associated with einfo, ewarn, etc...
6058         mycolors = []
6059         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
6060                 mycolors.append("%s=$'%s'" % \
6061                         (c, portage.output.style_to_ansi_code(c)))
6062         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
6063
6064 def prepare_build_dirs(myroot, mysettings, cleanup):
6065
6066         clean_dirs = [mysettings["HOME"]]
6067
6068         # We enable cleanup when we want to make sure old cruft (such as the old
6069         # environment) doesn't interfere with the current phase.
6070         if cleanup:
6071                 clean_dirs.append(mysettings["T"])
6072
6073         for clean_dir in clean_dirs:
6074                 try:
6075                         shutil.rmtree(clean_dir)
6076                 except OSError as oe:
6077                         if errno.ENOENT == oe.errno:
6078                                 pass
6079                         elif errno.EPERM == oe.errno:
6080                                 writemsg("%s\n" % oe, noiselevel=-1)
6081                                 writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
6082                                         clean_dir, noiselevel=-1)
6083                                 return 1
6084                         else:
6085                                 raise
6086
6087         def makedirs(dir_path):
6088                 try:
6089                         os.makedirs(dir_path)
6090                 except OSError as oe:
6091                         if errno.EEXIST == oe.errno:
6092                                 pass
6093                         elif errno.EPERM == oe.errno:
6094                                 writemsg("%s\n" % oe, noiselevel=-1)
6095                                 writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
6096                                         dir_path, noiselevel=-1)
6097                                 return False
6098                         else:
6099                                 raise
6100                 return True
6101
6102         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
6103
6104         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
6105         mydirs.append(os.path.dirname(mydirs[-1]))
6106
6107         try:
6108                 for mydir in mydirs:
6109                         portage.util.ensure_dirs(mydir)
6110                         portage.util.apply_secpass_permissions(mydir,
6111                                 gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
6112                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
6113                         """These directories don't necessarily need to be group writable.
6114                         However, the setup phase is commonly run as a privileged user prior
6115                         to the other phases being run by an unprivileged user.  Currently,
6116                         we use the portage group to ensure that the unprivleged user still
6117                         has write access to these directories in any case."""
6118                         portage.util.ensure_dirs(mysettings[dir_key], mode=0o775)
6119                         portage.util.apply_secpass_permissions(mysettings[dir_key],
6120                                 uid=portage_uid, gid=portage_gid)
6121         except portage.exception.PermissionDenied as e:
6122                 writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
6123                 return 1
6124         except portage.exception.OperationNotPermitted as e:
6125                 writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
6126                 return 1
6127         except portage.exception.FileNotFound as e:
6128                 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
6129                 return 1
6130
6131         _prepare_workdir(mysettings)
6132         if mysettings.get('EBUILD_PHASE') != 'fetch':
6133                 # Avoid spurious permissions adjustments when fetching with
6134                 # a temporary PORTAGE_TMPDIR setting (for fetchonly).
6135                 _prepare_features_dirs(mysettings)
6136
6137 def _adjust_perms_msg(settings, msg):
6138
6139         def write(msg):
6140                 writemsg(msg, noiselevel=-1)
6141
6142         background = settings.get("PORTAGE_BACKGROUND") == "1"
6143         log_path = settings.get("PORTAGE_LOG_FILE")
6144         log_file = None
6145
6146         if background and log_path is not None:
6147                 try:
6148                         log_file = codecs.open(_unicode_encode(log_path,
6149                                 encoding=_encodings['fs'], errors='strict'),
6150                                 mode='a', encoding=_encodings['content'], errors='replace')
6151                 except IOError:
6152                         def write(msg):
6153                                 pass
6154                 else:
6155                         def write(msg):
6156                                 log_file.write(_unicode_decode(msg))
6157                                 log_file.flush()
6158
6159         try:
6160                 write(msg)
6161         finally:
6162                 if log_file is not None:
6163                         log_file.close()
6164
6165 def _prepare_features_dirs(mysettings):
6166
6167         features_dirs = {
6168                 "ccache":{
6169                         "path_dir": "/usr/lib/ccache/bin",
6170                         "basedir_var":"CCACHE_DIR",
6171                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
6172                         "always_recurse":False},
6173                 "distcc":{
6174                         "path_dir": "/usr/lib/distcc/bin",
6175                         "basedir_var":"DISTCC_DIR",
6176                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
6177                         "subdirs":("lock", "state"),
6178                         "always_recurse":True}
6179         }
6180         dirmode  = 0o2070
6181         filemode =   0o60
6182         modemask =    0o2
6183         restrict = mysettings.get("PORTAGE_RESTRICT","").split()
6184         from portage.data import secpass
6185         droppriv = secpass >= 2 and \
6186                 "userpriv" in mysettings.features and \
6187                 "userpriv" not in restrict
6188         for myfeature, kwargs in features_dirs.items():
6189                 if myfeature in mysettings.features:
6190                         failure = False
6191                         basedir = mysettings.get(kwargs["basedir_var"])
6192                         if basedir is None or not basedir.strip():
6193                                 basedir = kwargs["default_dir"]
6194                                 mysettings[kwargs["basedir_var"]] = basedir
6195                         try:
6196                                 path_dir = kwargs["path_dir"]
6197                                 if not os.path.isdir(path_dir):
6198                                         raise portage.exception.DirectoryNotFound(path_dir)
6199
6200                                 mydirs = [mysettings[kwargs["basedir_var"]]]
6201                                 if "subdirs" in kwargs:
6202                                         for subdir in kwargs["subdirs"]:
6203                                                 mydirs.append(os.path.join(basedir, subdir))
6204                                 for mydir in mydirs:
6205                                         modified = portage.util.ensure_dirs(mydir)
6206                                         # Generally, we only want to apply permissions for
6207                                         # initial creation.  Otherwise, we don't know exactly what
6208                                         # permissions the user wants, so should leave them as-is.
6209                                         droppriv_fix = False
6210                                         if droppriv:
6211                                                 st = os.stat(mydir)
6212                                                 if st.st_gid != portage_gid or \
6213                                                         not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
6214                                                         droppriv_fix = True
6215                                                 if not droppriv_fix:
6216                                                         # Check permissions of files in the directory.
6217                                                         for filename in os.listdir(mydir):
6218                                                                 try:
6219                                                                         subdir_st = os.lstat(
6220                                                                                 os.path.join(mydir, filename))
6221                                                                 except OSError:
6222                                                                         continue
6223                                                                 if subdir_st.st_gid != portage_gid or \
6224                                                                         ((stat.S_ISDIR(subdir_st.st_mode) and \
6225                                                                         not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
6226                                                                         droppriv_fix = True
6227                                                                         break
6228
6229                                         if droppriv_fix:
6230                                                 _adjust_perms_msg(mysettings,
6231                                                         colorize("WARN", " * ") + \
6232                                                         _("Adjusting permissions "
6233                                                         "for FEATURES=userpriv: '%s'\n") % mydir)
6234                                         elif modified:
6235                                                 _adjust_perms_msg(mysettings,
6236                                                         colorize("WARN", " * ") + \
6237                                                         _("Adjusting permissions "
6238                                                         "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
6239
6240                                         if modified or kwargs["always_recurse"] or droppriv_fix:
6241                                                 def onerror(e):
6242                                                         raise   # The feature is disabled if a single error
6243                                                                         # occurs during permissions adjustment.
6244                                                 if not apply_recursive_permissions(mydir,
6245                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
6246                                                 filemode=filemode, filemask=modemask, onerror=onerror):
6247                                                         raise portage.exception.OperationNotPermitted(
6248                                                                 _("Failed to apply recursive permissions for the portage group."))
6249
6250                         except portage.exception.DirectoryNotFound as e:
6251                                 failure = True
6252                                 writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
6253                                         (e,), noiselevel=-1)
6254                                 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6255                                         noiselevel=-1)
6256
6257                         except portage.exception.PortageException as e:
6258                                 failure = True
6259                                 writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
6260                                 writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
6261                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
6262                                 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6263                                         noiselevel=-1)
6264
6265                         if failure:
6266                                 mysettings.features.remove(myfeature)
6267                                 mysettings['FEATURES'] = ' '.join(sorted(mysettings.features))
6268                                 time.sleep(5)
6269
6270 def _prepare_workdir(mysettings):
6271         workdir_mode = 0o700
6272         try:
6273                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
6274                 if mode.isdigit():
6275                         parsed_mode = int(mode, 8)
6276                 elif mode == "":
6277                         raise KeyError()
6278                 else:
6279                         raise ValueError()
6280                 if parsed_mode & 0o7777 != parsed_mode:
6281                         raise ValueError("Invalid file mode: %s" % mode)
6282                 else:
6283                         workdir_mode = parsed_mode
6284         except KeyError as e:
6285                 writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
6286         except ValueError as e:
6287                 if len(str(e)) > 0:
6288                         writemsg("%s\n" % e)
6289                 writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
6290                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
6291         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
6292         try:
6293                 apply_secpass_permissions(mysettings["WORKDIR"],
6294                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
6295         except portage.exception.FileNotFound:
6296                 pass # ebuild.sh will create it
6297
6298         if mysettings.get("PORT_LOGDIR", "") == "":
6299                 while "PORT_LOGDIR" in mysettings:
6300                         del mysettings["PORT_LOGDIR"]
6301         if "PORT_LOGDIR" in mysettings:
6302                 try:
6303                         modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
6304                         if modified:
6305                                 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
6306                                         uid=portage_uid, gid=portage_gid, mode=0o2770)
6307                 except portage.exception.PortageException as e:
6308                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
6309                         writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
6310                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
6311                         writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
6312                         while "PORT_LOGDIR" in mysettings:
6313                                 del mysettings["PORT_LOGDIR"]
6314         if "PORT_LOGDIR" in mysettings and \
6315                 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
6316                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
6317                 if not os.path.exists(logid_path):
6318                         open(_unicode_encode(logid_path), 'w')
6319                 logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
6320                         time.gmtime(os.stat(logid_path).st_mtime)),
6321                         encoding=_encodings['content'], errors='replace')
6322                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6323                         mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
6324                         (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6325                 del logid_path, logid_time
6326         else:
6327                 # NOTE: When sesandbox is enabled, the local SELinux security policies
6328                 # may not allow output to be piped out of the sesandbox domain. The
6329                 # current policy will allow it to work when a pty is available, but
6330                 # not through a normal pipe. See bug #162404.
6331                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6332                         mysettings["T"], "build.log")
6333
6334 def _doebuild_exit_status_check(mydo, settings):
6335         """
6336         Returns an error string if the shell appeared
6337         to exit unsuccessfully, None otherwise.
6338         """
6339         exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
6340         if not exit_status_file or \
6341                 os.path.exists(exit_status_file):
6342                 return None
6343         msg = _("The ebuild phase '%s' has exited "
6344         "unexpectedly. This type of behavior "
6345         "is known to be triggered "
6346         "by things such as failed variable "
6347         "assignments (bug #190128) or bad substitution "
6348         "errors (bug #200313). Normally, before exiting, bash should "
6349         "have displayed an error message above. If bash did not "
6350         "produce an error message above, it's possible "
6351         "that the ebuild has called `exit` when it "
6352         "should have called `die` instead. This behavior may also "
6353         "be triggered by a corrupt bash binary or a hardware "
6354         "problem such as memory or cpu malfunction. If the problem is not "
6355         "reproducible or it appears to occur randomly, then it is likely "
6356         "to be triggered by a hardware problem. "
6357         "If you suspect a hardware problem then you should "
6358         "try some basic hardware diagnostics such as memtest. "
6359         "Please do not report this as a bug unless it is consistently "
6360         "reproducible and you are sure that your bash binary and hardware "
6361         "are functioning properly.") % mydo
6362         return msg
6363
6364 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
6365         msg = _doebuild_exit_status_check(mydo, settings)
6366         if msg:
6367                 if retval == os.EX_OK:
6368                         retval = 1
6369                 from textwrap import wrap
6370                 from portage.elog.messages import eerror
6371                 for l in wrap(msg, 72):
6372                         eerror(l, phase=mydo, key=settings.mycpv)
6373         return retval
6374
6375 def _doebuild_exit_status_unlink(exit_status_file):
6376         """
6377         Double check to make sure it really doesn't exist
6378         and raise an OSError if it still does (it shouldn't).
6379         OSError if necessary.
6380         """
6381         if not exit_status_file:
6382                 return
6383         try:
6384                 os.unlink(exit_status_file)
6385         except OSError:
6386                 pass
6387         if os.path.exists(exit_status_file):
6388                 os.unlink(exit_status_file)
6389
6390 _doebuild_manifest_exempt_depend = 0
6391 _doebuild_manifest_cache = None
6392 _doebuild_broken_ebuilds = set()
6393 _doebuild_broken_manifests = set()
6394
6395 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
6396         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
6397         mydbapi=None, vartree=None, prev_mtimes=None,
6398         fd_pipes=None, returnpid=False):
6399
6400         """
6401         Wrapper function that invokes specific ebuild phases through the spawning
6402         of ebuild.sh
6403         
6404         @param myebuild: name of the ebuild to invoke the phase on (CPV)
6405         @type myebuild: String
6406         @param mydo: Phase to run
6407         @type mydo: String
6408         @param myroot: $ROOT (usually '/', see man make.conf)
6409         @type myroot: String
6410         @param mysettings: Portage Configuration
6411         @type mysettings: instance of portage.config
6412         @param debug: Turns on various debug information (eg, debug for spawn)
6413         @type debug: Boolean
6414         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
6415         @type listonly: Boolean
6416         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
6417         @type fetchonly: Boolean
6418         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
6419         @type cleanup: Boolean
6420         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
6421         @type dbkey: Dict or String
6422         @param use_cache: Enables the cache
6423         @type use_cache: Boolean
6424         @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
6425         @type fetchall: Boolean
6426         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
6427         @type tree: String
6428         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
6429         @type mydbapi: portdbapi instance
6430         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
6431         @type vartree: vartree instance
6432         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
6433         @type prev_mtimes: dictionary
6434         @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
6435                 for example.
6436         @type fd_pipes: Dictionary
6437         @param returnpid: Return a list of process IDs for a successful spawn, or
6438                 an integer value if spawn is unsuccessful. NOTE: This requires the
6439                 caller clean up all returned PIDs.
6440         @type returnpid: Boolean
6441         @rtype: Boolean
6442         @returns:
6443         1. 0 for success
6444         2. 1 for error
6445         
6446         Most errors have an accompanying error message.
6447         
6448         listonly and fetchonly are only really necessary for operations involving 'fetch'
6449         prev_mtimes are only necessary for merge operations.
6450         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
6451         
6452         """
6453         
6454         if not tree:
6455                 writemsg("Warning: tree not specified to doebuild\n")
6456                 tree = "porttree"
6457         global db
6458         
6459         # chunked out deps for each phase, so that ebuild binary can use it 
6460         # to collapse targets down.
6461         actionmap_deps={
6462         "setup":  [],
6463         "unpack": ["setup"],
6464         "prepare": ["unpack"],
6465         "configure": ["prepare"],
6466         "compile":["configure"],
6467         "test":   ["compile"],
6468         "install":["test"],
6469         "rpm":    ["install"],
6470         "package":["install"],
6471         }
6472         
6473         if mydbapi is None:
6474                 mydbapi = db[myroot][tree].dbapi
6475
6476         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
6477                 vartree = db[myroot]["vartree"]
6478
6479         features = mysettings.features
6480         noauto = "noauto" in features
6481         from portage.data import secpass
6482
6483         clean_phases = ("clean", "cleanrm")
6484         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
6485                         "config", "info", "setup", "depend", "pretend",
6486                         "fetch", "fetchall", "digest",
6487                         "unpack", "prepare", "configure", "compile", "test",
6488                         "install", "rpm", "qmerge", "merge",
6489                         "package","unmerge", "manifest"]
6490
6491         if mydo not in validcommands:
6492                 validcommands.sort()
6493                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
6494                         noiselevel=-1)
6495                 for vcount in range(len(validcommands)):
6496                         if vcount%6 == 0:
6497                                 writemsg("\n!!! ", noiselevel=-1)
6498                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
6499                 writemsg("\n", noiselevel=-1)
6500                 return 1
6501
6502         if mydo == "fetchall":
6503                 fetchall = 1
6504                 mydo = "fetch"
6505
6506         parallel_fetchonly = mydo in ("fetch", "fetchall") and \
6507                 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
6508
6509         if mydo not in clean_phases and not os.path.exists(myebuild):
6510                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
6511                         noiselevel=-1)
6512                 return 1
6513
6514         global _doebuild_manifest_exempt_depend
6515
6516         if "strict" in features and \
6517                 "digest" not in features and \
6518                 tree == "porttree" and \
6519                 mydo not in ("digest", "manifest", "help") and \
6520                 not _doebuild_manifest_exempt_depend:
6521                 # Always verify the ebuild checksums before executing it.
6522                 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
6523                         _doebuild_broken_ebuilds
6524
6525                 if myebuild in _doebuild_broken_ebuilds:
6526                         return 1
6527
6528                 pkgdir = os.path.dirname(myebuild)
6529                 manifest_path = os.path.join(pkgdir, "Manifest")
6530
6531                 # Avoid checking the same Manifest several times in a row during a
6532                 # regen with an empty cache.
6533                 if _doebuild_manifest_cache is None or \
6534                         _doebuild_manifest_cache.getFullname() != manifest_path:
6535                         _doebuild_manifest_cache = None
6536                         if not os.path.exists(manifest_path):
6537                                 out = portage.output.EOutput()
6538                                 out.eerror(_("Manifest not found for '%s'") % (myebuild,))
6539                                 _doebuild_broken_ebuilds.add(myebuild)
6540                                 return 1
6541                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
6542
6543                 else:
6544                         mf = _doebuild_manifest_cache
6545
6546                 try:
6547                         mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
6548                 except KeyError:
6549                         out = portage.output.EOutput()
6550                         out.eerror(_("Missing digest for '%s'") % (myebuild,))
6551                         _doebuild_broken_ebuilds.add(myebuild)
6552                         return 1
6553                 except portage.exception.FileNotFound:
6554                         out = portage.output.EOutput()
6555                         out.eerror(_("A file listed in the Manifest "
6556                                 "could not be found: '%s'") % (myebuild,))
6557                         _doebuild_broken_ebuilds.add(myebuild)
6558                         return 1
6559                 except portage.exception.DigestException as e:
6560                         out = portage.output.EOutput()
6561                         out.eerror(_("Digest verification failed:"))
6562                         out.eerror("%s" % e.value[0])
6563                         out.eerror(_("Reason: %s") % e.value[1])
6564                         out.eerror(_("Got: %s") % e.value[2])
6565                         out.eerror(_("Expected: %s") % e.value[3])
6566                         _doebuild_broken_ebuilds.add(myebuild)
6567                         return 1
6568
6569                 if mf.getFullname() in _doebuild_broken_manifests:
6570                         return 1
6571
6572                 if mf is not _doebuild_manifest_cache:
6573
6574                         # Make sure that all of the ebuilds are
6575                         # actually listed in the Manifest.
6576                         glep55 = 'parse-eapi-glep-55' in mysettings.features
6577                         for f in os.listdir(pkgdir):
6578                                 pf = None
6579                                 if glep55:
6580                                         pf, eapi = _split_ebuild_name_glep55(f)
6581                                 elif f[-7:] == '.ebuild':
6582                                         pf = f[:-7]
6583                                 if pf is not None and not mf.hasFile("EBUILD", f):
6584                                         f = os.path.join(pkgdir, f)
6585                                         if f not in _doebuild_broken_ebuilds:
6586                                                 out = portage.output.EOutput()
6587                                                 out.eerror(_("A file is not listed in the "
6588                                                         "Manifest: '%s'") % (f,))
6589                                         _doebuild_broken_manifests.add(manifest_path)
6590                                         return 1
6591
6592                         # Only cache it if the above stray files test succeeds.
6593                         _doebuild_manifest_cache = mf
6594
6595         def exit_status_check(retval):
6596                 msg = _doebuild_exit_status_check(mydo, mysettings)
6597                 if msg:
6598                         if retval == os.EX_OK:
6599                                 retval = 1
6600                         from textwrap import wrap
6601                         from portage.elog.messages import eerror
6602                         for l in wrap(msg, 72):
6603                                 eerror(l, phase=mydo, key=mysettings.mycpv)
6604                 return retval
6605
6606         # Note: PORTAGE_BIN_PATH may differ from the global
6607         # constant when portage is reinstalling itself.
6608         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6609         ebuild_sh_binary = os.path.join(portage_bin_path,
6610                 os.path.basename(EBUILD_SH_BINARY))
6611         misc_sh_binary = os.path.join(portage_bin_path,
6612                 os.path.basename(MISC_SH_BINARY))
6613
6614         logfile=None
6615         builddir_lock = None
6616         tmpdir = None
6617         tmpdir_orig = None
6618
6619         try:
6620                 if mydo in ("digest", "manifest", "help"):
6621                         # Temporarily exempt the depend phase from manifest checks, in case
6622                         # aux_get calls trigger cache generation.
6623                         _doebuild_manifest_exempt_depend += 1
6624
6625                 # If we don't need much space and we don't need a constant location,
6626                 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
6627                 # so that there's no need for locking and it can be used even if the
6628                 # user isn't in the portage group.
6629                 if mydo in ("info",):
6630                         from tempfile import mkdtemp
6631                         tmpdir = mkdtemp()
6632                         tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
6633                         mysettings["PORTAGE_TMPDIR"] = tmpdir
6634
6635                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
6636                         use_cache, mydbapi)
6637
6638                 if mydo in clean_phases:
6639                         retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
6640                                 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
6641                                 logfile=None, returnpid=returnpid)
6642                         return retval
6643
6644                 restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
6645                 # get possible slot information from the deps file
6646                 if mydo == "depend":
6647                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
6648                         droppriv = "userpriv" in mysettings.features
6649                         if returnpid:
6650                                 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6651                                         mysettings, fd_pipes=fd_pipes, returnpid=True,
6652                                         droppriv=droppriv)
6653                                 return mypids
6654                         elif isinstance(dbkey, dict):
6655                                 mysettings["dbkey"] = ""
6656                                 pr, pw = os.pipe()
6657                                 fd_pipes = {
6658                                         0:sys.stdin.fileno(),
6659                                         1:sys.stdout.fileno(),
6660                                         2:sys.stderr.fileno(),
6661                                         9:pw}
6662                                 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6663                                         mysettings,
6664                                         fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
6665                                 os.close(pw) # belongs exclusively to the child process now
6666                                 f = os.fdopen(pr, 'rb')
6667                                 for k, v in zip(auxdbkeys,
6668                                         (_unicode_decode(line).rstrip('\n') for line in f)):
6669                                         dbkey[k] = v
6670                                 f.close()
6671                                 retval = os.waitpid(mypids[0], 0)[1]
6672                                 portage.process.spawned_pids.remove(mypids[0])
6673                                 # If it got a signal, return the signal that was sent, but
6674                                 # shift in order to distinguish it from a return value. (just
6675                                 # like portage.process.spawn() would do).
6676                                 if retval & 0xff:
6677                                         retval = (retval & 0xff) << 8
6678                                 else:
6679                                         # Otherwise, return its exit code.
6680                                         retval = retval >> 8
6681                                 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
6682                                         # Don't trust bash's returncode if the
6683                                         # number of lines is incorrect.
6684                                         retval = 1
6685                                 return retval
6686                         elif dbkey:
6687                                 mysettings["dbkey"] = dbkey
6688                         else:
6689                                 mysettings["dbkey"] = \
6690                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
6691
6692                         return spawn(_shell_quote(ebuild_sh_binary) + " depend",
6693                                 mysettings,
6694                                 droppriv=droppriv)
6695
6696                 # Validate dependency metadata here to ensure that ebuilds with invalid
6697                 # data are never installed via the ebuild command. Don't bother when
6698                 # returnpid == True since there's no need to do this every time emerge
6699                 # executes a phase.
6700                 if not returnpid:
6701                         rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
6702                         if rval != os.EX_OK:
6703                                 return rval
6704
6705                 if "PORTAGE_TMPDIR" not in mysettings or \
6706                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
6707                         writemsg(_("The directory specified in your "
6708                                 "PORTAGE_TMPDIR variable, '%s',\n"
6709                                 "does not exist.  Please create this directory or "
6710                                 "correct your PORTAGE_TMPDIR setting.\n") % mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
6711                         return 1
6712                 
6713                 # as some people use a separate PORTAGE_TMPDIR mount
6714                 # we prefer that as the checks below would otherwise be pointless
6715                 # for those people.
6716                 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
6717                         checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
6718                 else:
6719                         checkdir = mysettings["PORTAGE_TMPDIR"]
6720
6721                 if not os.access(checkdir, os.W_OK):
6722                         writemsg(_("%s is not writable.\n"
6723                                 "Likely cause is that you've mounted it as readonly.\n") % checkdir,
6724                                 noiselevel=-1)
6725                         return 1
6726                 else:
6727                         from tempfile import NamedTemporaryFile
6728                         fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
6729                         os.chmod(fd.name, 0o755)
6730                         if not os.access(fd.name, os.X_OK):
6731                                 writemsg(_("Can not execute files in %s\n"
6732                                         "Likely cause is that you've mounted it with one of the\n"
6733                                         "following mount options: 'noexec', 'user', 'users'\n\n"
6734                                         "Please make sure that portage can execute files in this directory.\n") % checkdir,
6735                                         noiselevel=-1)
6736                                 fd.close()
6737                                 return 1
6738                         fd.close()
6739                 del checkdir
6740
6741                 if mydo == "unmerge":
6742                         return unmerge(mysettings["CATEGORY"],
6743                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
6744
6745                 # Build directory creation isn't required for any of these.
6746                 # In the fetch phase, the directory is needed only for RESTRICT=fetch
6747                 # in order to satisfy the sane $PWD requirement (from bug #239560)
6748                 # when pkg_nofetch is spawned.
6749                 have_build_dirs = False
6750                 if not parallel_fetchonly and \
6751                         mydo not in ('digest', 'help', 'manifest') and \
6752                         not (mydo == 'fetch' and 'fetch' not in restrict):
6753                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
6754                         if mystatus:
6755                                 return mystatus
6756                         have_build_dirs = True
6757
6758                         # emerge handles logging externally
6759                         if not returnpid:
6760                                 # PORTAGE_LOG_FILE is set by the
6761                                 # above prepare_build_dirs() call.
6762                                 logfile = mysettings.get("PORTAGE_LOG_FILE")
6763
6764                 if have_build_dirs:
6765                         env_file = os.path.join(mysettings["T"], "environment")
6766                         env_stat = None
6767                         saved_env = None
6768                         try:
6769                                 env_stat = os.stat(env_file)
6770                         except OSError as e:
6771                                 if e.errno != errno.ENOENT:
6772                                         raise
6773                                 del e
6774                         if not env_stat:
6775                                 saved_env = os.path.join(
6776                                         os.path.dirname(myebuild), "environment.bz2")
6777                                 if not os.path.isfile(saved_env):
6778                                         saved_env = None
6779                         if saved_env:
6780                                 retval = os.system(
6781                                         "bzip2 -dc %s > %s" % \
6782                                         (_shell_quote(saved_env),
6783                                         _shell_quote(env_file)))
6784                                 try:
6785                                         env_stat = os.stat(env_file)
6786                                 except OSError as e:
6787                                         if e.errno != errno.ENOENT:
6788                                                 raise
6789                                         del e
6790                                 if os.WIFEXITED(retval) and \
6791                                         os.WEXITSTATUS(retval) == os.EX_OK and \
6792                                         env_stat and env_stat.st_size > 0:
6793                                         # This is a signal to ebuild.sh, so that it knows to filter
6794                                         # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
6795                                         # would be preserved between normal phases.
6796                                         open(_unicode_encode(env_file + '.raw'), 'w')
6797                                 else:
6798                                         writemsg(_("!!! Error extracting saved "
6799                                                 "environment: '%s'\n") % \
6800                                                 saved_env, noiselevel=-1)
6801                                         try:
6802                                                 os.unlink(env_file)
6803                                         except OSError as e:
6804                                                 if e.errno != errno.ENOENT:
6805                                                         raise
6806                                                 del e
6807                                         env_stat = None
6808                         if env_stat:
6809                                 pass
6810                         else:
6811                                 for var in ("ARCH", ):
6812                                         value = mysettings.get(var)
6813                                         if value and value.strip():
6814                                                 continue
6815                                         msg = _("%(var)s is not set... "
6816                                                 "Are you missing the '%(configroot)setc/make.profile' symlink? "
6817                                                 "Is the symlink correct? "
6818                                                 "Is your portage tree complete?") % \
6819                                                 {"var": var, "configroot": mysettings["PORTAGE_CONFIGROOT"]}
6820                                         from portage.elog.messages import eerror
6821                                         from textwrap import wrap
6822                                         for line in wrap(msg, 70):
6823                                                 eerror(line, phase="setup", key=mysettings.mycpv)
6824                                         from portage.elog import elog_process
6825                                         elog_process(mysettings.mycpv, mysettings)
6826                                         return 1
6827                         del env_file, env_stat, saved_env
6828                         _doebuild_exit_status_unlink(
6829                                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6830                 else:
6831                         mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
6832
6833                 # if any of these are being called, handle them -- running them out of
6834                 # the sandbox -- and stop now.
6835                 if mydo == "help":
6836                         return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
6837                                 mysettings, debug=debug, free=1, logfile=logfile)
6838                 elif mydo == "setup":
6839                         retval = spawn(
6840                                 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
6841                                 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
6842                                 returnpid=returnpid)
6843                         if returnpid:
6844                                 return retval
6845                         retval = exit_status_check(retval)
6846                         if secpass >= 2:
6847                                 """ Privileged phases may have left files that need to be made
6848                                 writable to a less privileged user."""
6849                                 apply_recursive_permissions(mysettings["T"],
6850                                         uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
6851                                         filemode=0o60, filemask=0)
6852                         return retval
6853                 elif mydo == "preinst":
6854                         phase_retval = spawn(
6855                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
6856                                 mysettings, debug=debug, free=1, logfile=logfile,
6857                                 fd_pipes=fd_pipes, returnpid=returnpid)
6858
6859                         if returnpid:
6860                                 return phase_retval
6861
6862                         phase_retval = exit_status_check(phase_retval)
6863                         if phase_retval == os.EX_OK:
6864                                 _doebuild_exit_status_unlink(
6865                                         mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6866                                 mysettings.pop("EBUILD_PHASE", None)
6867                                 phase_retval = spawn(
6868                                         " ".join(_post_pkg_preinst_cmd(mysettings)),
6869                                         mysettings, debug=debug, free=1, logfile=logfile)
6870                                 phase_retval = exit_status_check(phase_retval)
6871                                 if phase_retval != os.EX_OK:
6872                                         writemsg(_("!!! post preinst failed; exiting.\n"),
6873                                                 noiselevel=-1)
6874                         return phase_retval
6875                 elif mydo == "postinst":
6876                         phase_retval = spawn(
6877                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
6878                                 mysettings, debug=debug, free=1, logfile=logfile,
6879                                 fd_pipes=fd_pipes, returnpid=returnpid)
6880
6881                         if returnpid:
6882                                 return phase_retval
6883
6884                         phase_retval = exit_status_check(phase_retval)
6885                         if phase_retval == os.EX_OK:
6886                                 _doebuild_exit_status_unlink(
6887                                         mysettings.get("EBUILD_EXIT_STATUS_FILE"))
6888                                 mysettings.pop("EBUILD_PHASE", None)
6889                                 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
6890                                         mysettings, debug=debug, free=1, logfile=logfile)
6891                                 phase_retval = exit_status_check(phase_retval)
6892                                 if phase_retval != os.EX_OK:
6893                                         writemsg(_("!!! post postinst failed; exiting.\n"),
6894                                                 noiselevel=-1)
6895                         return phase_retval
6896                 elif mydo in ("prerm", "postrm", "config", "info"):
6897                         retval =  spawn(
6898                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
6899                                 mysettings, debug=debug, free=1, logfile=logfile,
6900                                 fd_pipes=fd_pipes, returnpid=returnpid)
6901
6902                         if returnpid:
6903                                 return retval
6904
6905                         retval = exit_status_check(retval)
6906                         return retval
6907
6908                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
6909
6910                 emerge_skip_distfiles = returnpid
6911                 emerge_skip_digest = returnpid
6912                 # Only try and fetch the files if we are going to need them ...
6913                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
6914                 # unpack compile install`, we will try and fetch 4 times :/
6915                 need_distfiles = not emerge_skip_distfiles and \
6916                         (mydo in ("fetch", "unpack") or \
6917                         mydo not in ("digest", "manifest") and "noauto" not in features)
6918                 alist = mysettings.configdict["pkg"].get("A")
6919                 aalist = mysettings.configdict["pkg"].get("AA")
6920                 if need_distfiles or alist is None or aalist is None:
6921                         # Make sure we get the correct tree in case there are overlays.
6922                         mytree = os.path.realpath(
6923                                 os.path.dirname(os.path.dirname(mysettings["O"])))
6924                         useflags = mysettings["PORTAGE_USE"].split()
6925                         try:
6926                                 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
6927                                         mytree=mytree)
6928                                 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
6929                         except portage.exception.InvalidDependString as e:
6930                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
6931                                 writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
6932                                         noiselevel=-1)
6933                                 del e
6934                                 return 1
6935                         mysettings.configdict["pkg"]["A"] = " ".join(alist)
6936                         mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
6937                 else:
6938                         alist = set(alist.split())
6939                         aalist = set(aalist.split())
6940                 if ("mirror" in features) or fetchall:
6941                         fetchme = aalist
6942                         checkme = aalist
6943                 else:
6944                         fetchme = alist
6945                         checkme = alist
6946
6947                 if mydo == "fetch":
6948                         # Files are already checked inside fetch(),
6949                         # so do not check them again.
6950                         checkme = []
6951
6952                 if not emerge_skip_distfiles and \
6953                         need_distfiles and not fetch(
6954                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
6955                         return 1
6956
6957                 if mydo == "fetch" and listonly:
6958                         return 0
6959
6960                 try:
6961                         if mydo == "manifest":
6962                                 return not digestgen(aalist, mysettings, overwrite=1,
6963                                         manifestonly=1, myportdb=mydbapi)
6964                         elif mydo == "digest":
6965                                 return not digestgen(aalist, mysettings, overwrite=1,
6966                                         myportdb=mydbapi)
6967                         elif mydo != 'fetch' and not emerge_skip_digest and \
6968                                 "digest" in mysettings.features:
6969                                 # Don't do this when called by emerge or when called just
6970                                 # for fetch (especially parallel-fetch) since it's not needed
6971                                 # and it can interfere with parallel tasks.
6972                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
6973                 except portage.exception.PermissionDenied as e:
6974                         writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
6975                         if mydo in ("digest", "manifest"):
6976                                 return 1
6977
6978                 # See above comment about fetching only when needed
6979                 if not emerge_skip_distfiles and \
6980                         not digestcheck(checkme, mysettings, "strict" in features):
6981                         return 1
6982
6983                 if mydo == "fetch":
6984                         return 0
6985
6986                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
6987                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
6988                         orig_distdir = mysettings["DISTDIR"]
6989                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
6990                         edpath = mysettings["DISTDIR"] = \
6991                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
6992                         portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
6993
6994                         # Remove any unexpected files or directories.
6995                         for x in os.listdir(edpath):
6996                                 symlink_path = os.path.join(edpath, x)
6997                                 st = os.lstat(symlink_path)
6998                                 if x in alist and stat.S_ISLNK(st.st_mode):
6999                                         continue
7000                                 if stat.S_ISDIR(st.st_mode):
7001                                         shutil.rmtree(symlink_path)
7002                                 else:
7003                                         os.unlink(symlink_path)
7004
7005                         # Check for existing symlinks and recreate if necessary.
7006                         for x in alist:
7007                                 symlink_path = os.path.join(edpath, x)
7008                                 target = os.path.join(orig_distdir, x)
7009                                 try:
7010                                         link_target = os.readlink(symlink_path)
7011                                 except OSError:
7012                                         os.symlink(target, symlink_path)
7013                                 else:
7014                                         if link_target != target:
7015                                                 os.unlink(symlink_path)
7016                                                 os.symlink(target, symlink_path)
7017
7018                 #initial dep checks complete; time to process main commands
7019
7020                 restrict = mysettings["PORTAGE_RESTRICT"].split()
7021                 nosandbox = (("userpriv" in features) and \
7022                         ("usersandbox" not in features) and \
7023                         "userpriv" not in restrict and \
7024                         "nouserpriv" not in restrict)
7025                 if nosandbox and ("userpriv" not in features or \
7026                         "userpriv" in restrict or \
7027                         "nouserpriv" in restrict):
7028                         nosandbox = ("sandbox" not in features and \
7029                                 "usersandbox" not in features)
7030
7031                 sesandbox = mysettings.selinux_enabled() and \
7032                         "sesandbox" in mysettings.features
7033
7034                 droppriv = "userpriv" in mysettings.features and \
7035                         "userpriv" not in restrict and \
7036                         secpass >= 2
7037
7038                 fakeroot = "fakeroot" in mysettings.features
7039
7040                 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
7041                 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
7042
7043                 # args are for the to spawn function
7044                 actionmap = {
7045 "pretend":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
7046 "setup":    {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
7047 "unpack":   {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
7048 "prepare":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
7049 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7050 "compile":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7051 "test":     {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7052 "install":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":0,         "sesandbox":sesandbox, "fakeroot":fakeroot}},
7053 "rpm":      {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
7054 "package":  {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
7055                 }
7056
7057                 # merge the deps in so we have again a 'full' actionmap
7058                 # be glad when this can die.
7059                 for x in actionmap:
7060                         if len(actionmap_deps.get(x, [])):
7061                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
7062
7063                 if mydo in actionmap:
7064                         if mydo == "package":
7065                                 # Make sure the package directory exists before executing
7066                                 # this phase. This can raise PermissionDenied if
7067                                 # the current user doesn't have write access to $PKGDIR.
7068                                 parent_dir = os.path.join(mysettings["PKGDIR"],
7069                                         mysettings["CATEGORY"])
7070                                 portage.util.ensure_dirs(parent_dir)
7071                                 if not os.access(parent_dir, os.W_OK):
7072                                         raise portage.exception.PermissionDenied(
7073                                                 "access('%s', os.W_OK)" % parent_dir)
7074                         retval = spawnebuild(mydo,
7075                                 actionmap, mysettings, debug, logfile=logfile,
7076                                 fd_pipes=fd_pipes, returnpid=returnpid)
7077                 elif mydo=="qmerge":
7078                         # check to ensure install was run.  this *only* pops up when users
7079                         # forget it and are using ebuild
7080                         if not os.path.exists(
7081                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
7082                                 writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
7083                                         noiselevel=-1)
7084                                 return 1
7085                         # qmerge is a special phase that implies noclean.
7086                         if "noclean" not in mysettings.features:
7087                                 mysettings.features.add("noclean")
7088                         #qmerge is specifically not supposed to do a runtime dep check
7089                         retval = merge(
7090                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
7091                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
7092                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
7093                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
7094                 elif mydo=="merge":
7095                         retval = spawnebuild("install", actionmap, mysettings, debug,
7096                                 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
7097                                 returnpid=returnpid)
7098                         retval = exit_status_check(retval)
7099                         if retval != os.EX_OK:
7100                                 # The merge phase handles this already.  Callers don't know how
7101                                 # far this function got, so we have to call elog_process() here
7102                                 # so that it's only called once.
7103                                 from portage.elog import elog_process
7104                                 elog_process(mysettings.mycpv, mysettings)
7105                         if retval == os.EX_OK:
7106                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
7107                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
7108                                         "build-info"), myroot, mysettings,
7109                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
7110                                         vartree=vartree, prev_mtimes=prev_mtimes)
7111                 else:
7112                         print(_("!!! Unknown mydo: %s") % mydo)
7113                         return 1
7114
7115                 return retval
7116
7117         finally:
7118
7119                 if tmpdir:
7120                         mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
7121                         shutil.rmtree(tmpdir)
7122                 if builddir_lock:
7123                         portage.locks.unlockdir(builddir_lock)
7124
7125                 # Make sure that DISTDIR is restored to it's normal value before we return!
7126                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
7127                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
7128                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
7129
7130                 if logfile:
7131                         try:
7132                                 if os.stat(logfile).st_size == 0:
7133                                         os.unlink(logfile)
7134                         except OSError:
7135                                 pass
7136
7137                 if mydo in ("digest", "manifest", "help"):
7138                         # If necessary, depend phase has been triggered by aux_get calls
7139                         # and the exemption is no longer needed.
7140                         _doebuild_manifest_exempt_depend -= 1
7141
7142 def _validate_deps(mysettings, myroot, mydo, mydbapi):
7143
7144         invalid_dep_exempt_phases = \
7145                 set(["clean", "cleanrm", "help", "prerm", "postrm"])
7146         dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7147         misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
7148         other_keys = ["SLOT"]
7149         all_keys = dep_keys + misc_keys + other_keys
7150         metadata = dict(zip(all_keys,
7151                 mydbapi.aux_get(mysettings.mycpv, all_keys)))
7152
7153         class FakeTree(object):
7154                 def __init__(self, mydb):
7155                         self.dbapi = mydb
7156         dep_check_trees = {myroot:{}}
7157         dep_check_trees[myroot]["porttree"] = \
7158                 FakeTree(fakedbapi(settings=mysettings))
7159
7160         msgs = []
7161         for dep_type in dep_keys:
7162                 mycheck = dep_check(metadata[dep_type], None, mysettings,
7163                         myuse="all", myroot=myroot, trees=dep_check_trees)
7164                 if not mycheck[0]:
7165                         msgs.append("  %s: %s\n    %s\n" % (
7166                                 dep_type, metadata[dep_type], mycheck[1]))
7167
7168         for k in misc_keys:
7169                 try:
7170                         portage.dep.use_reduce(
7171                                 portage.dep.paren_reduce(metadata[k]), matchall=True)
7172                 except portage.exception.InvalidDependString as e:
7173                         msgs.append("  %s: %s\n    %s\n" % (
7174                                 k, metadata[k], str(e)))
7175
7176         if not metadata["SLOT"]:
7177                 msgs.append(_("  SLOT is undefined\n"))
7178
7179         if msgs:
7180                 portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
7181                         (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
7182                 for x in msgs:
7183                         portage.util.writemsg_level(x,
7184                                 level=logging.ERROR, noiselevel=-1)
7185                 if mydo not in invalid_dep_exempt_phases:
7186                         return 1
7187
7188         return os.EX_OK
7189
7190 expandcache={}
7191
7192 def _movefile(src, dest, **kwargs):
7193         """Calls movefile and raises a PortageException if an error occurs."""
7194         if movefile(src, dest, **kwargs) is None:
7195                 raise portage.exception.PortageException(
7196                         "mv '%s' '%s'" % (src, dest))
7197
7198 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
7199                 hardlink_candidates=None, encoding=_encodings['fs']):
7200         """moves a file from src to dest, preserving all permissions and attributes; mtime will
7201         be preserved even when moving across filesystems.  Returns true on success and false on
7202         failure.  Move is atomic."""
7203         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
7204
7205         if mysettings is None:
7206                 global settings
7207                 mysettings = settings
7208
7209         selinux_enabled = mysettings.selinux_enabled()
7210         if selinux_enabled:
7211                 selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
7212
7213         lchown = _unicode_func_wrapper(data.lchown, encoding=encoding)
7214         os = _unicode_module_wrapper(_os,
7215                 encoding=encoding, overrides=_os_overrides)
7216         shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
7217
7218         try:
7219                 if not sstat:
7220                         sstat=os.lstat(src)
7221
7222         except SystemExit as e:
7223                 raise
7224         except Exception as e:
7225                 print(_("!!! Stating source file failed... movefile()"))
7226                 print("!!!",e)
7227                 return None
7228
7229         destexists=1
7230         try:
7231                 dstat=os.lstat(dest)
7232         except (OSError, IOError):
7233                 dstat=os.lstat(os.path.dirname(dest))
7234                 destexists=0
7235
7236         if bsd_chflags:
7237                 if destexists and dstat.st_flags != 0:
7238                         bsd_chflags.lchflags(dest, 0)
7239                 # Use normal stat/chflags for the parent since we want to
7240                 # follow any symlinks to the real parent directory.
7241                 pflags = os.stat(os.path.dirname(dest)).st_flags
7242                 if pflags != 0:
7243                         bsd_chflags.chflags(os.path.dirname(dest), 0)
7244
7245         if destexists:
7246                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
7247                         try:
7248                                 os.unlink(dest)
7249                                 destexists=0
7250                         except SystemExit as e:
7251                                 raise
7252                         except Exception as e:
7253                                 pass
7254
7255         if stat.S_ISLNK(sstat[stat.ST_MODE]):
7256                 try:
7257                         target=os.readlink(src)
7258                         if mysettings and mysettings["D"]:
7259                                 if target.find(mysettings["D"])==0:
7260                                         target=target[len(mysettings["D"]):]
7261                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
7262                                 os.unlink(dest)
7263                         if selinux_enabled:
7264                                 selinux.symlink(target, dest, src)
7265                         else:
7266                                 os.symlink(target,dest)
7267                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7268                         # utime() only works on the target of a symlink, so it's not
7269                         # possible to perserve mtime on symlinks.
7270                         return os.lstat(dest)[stat.ST_MTIME]
7271                 except SystemExit as e:
7272                         raise
7273                 except Exception as e:
7274                         print(_("!!! failed to properly create symlink:"))
7275                         print("!!!",dest,"->",target)
7276                         print("!!!",e)
7277                         return None
7278
7279         hardlinked = False
7280         # Since identical files might be merged to multiple filesystems,
7281         # so os.link() calls might fail for some paths, so try them all.
7282         # For atomic replacement, first create the link as a temp file
7283         # and them use os.rename() to replace the destination.
7284         if hardlink_candidates:
7285                 head, tail = os.path.split(dest)
7286                 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
7287                         (tail, os.getpid()))
7288                 try:
7289                         os.unlink(hardlink_tmp)
7290                 except OSError as e:
7291                         if e.errno != errno.ENOENT:
7292                                 writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
7293                                         (hardlink_tmp,), noiselevel=-1)
7294                                 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7295                                 return None
7296                         del e
7297                 for hardlink_src in hardlink_candidates:
7298                         try:
7299                                 os.link(hardlink_src, hardlink_tmp)
7300                         except OSError:
7301                                 continue
7302                         else:
7303                                 try:
7304                                         os.rename(hardlink_tmp, dest)
7305                                 except OSError as e:
7306                                         writemsg(_("!!! Failed to rename %s to %s\n") % \
7307                                                 (hardlink_tmp, dest), noiselevel=-1)
7308                                         writemsg("!!! %s\n" % (e,), noiselevel=-1)
7309                                         return None
7310                                 hardlinked = True
7311                                 break
7312
7313         renamefailed=1
7314         if hardlinked:
7315                 renamefailed = False
7316         if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
7317                 try:
7318                         if selinux_enabled:
7319                                 ret = selinux.rename(src, dest)
7320                         else:
7321                                 ret=os.rename(src,dest)
7322                         renamefailed=0
7323                 except SystemExit as e:
7324                         raise
7325                 except Exception as e:
7326                         if e[0]!=errno.EXDEV:
7327                                 # Some random error.
7328                                 print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
7329                                 print("!!!",e)
7330                                 return None
7331                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
7332         if renamefailed:
7333                 didcopy=0
7334                 if stat.S_ISREG(sstat[stat.ST_MODE]):
7335                         try: # For safety copy then move it over.
7336                                 if selinux_enabled:
7337                                         selinux.copyfile(src, dest + "#new")
7338                                         selinux.rename(dest + "#new", dest)
7339                                 else:
7340                                         shutil.copyfile(src,dest+"#new")
7341                                         os.rename(dest+"#new",dest)
7342                                 didcopy=1
7343                         except SystemExit as e:
7344                                 raise
7345                         except Exception as e:
7346                                 print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
7347                                 print("!!!",e)
7348                                 return None
7349                 else:
7350                         #we don't yet handle special, so we need to fall back to /bin/mv
7351                         a = process.spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
7352                         if a != os.EX_OK:
7353                                 writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
7354                                 writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
7355                                         {"src": _unicode_decode(src, encoding=encoding),
7356                                         "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
7357                                 writemsg("!!! %s\n" % a, noiselevel=-1)
7358                                 return None # failure
7359                 try:
7360                         if didcopy:
7361                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7362                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7363                                 else:
7364                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7365                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
7366                                 os.unlink(src)
7367                 except SystemExit as e:
7368                         raise
7369                 except Exception as e:
7370                         print(_("!!! Failed to chown/chmod/unlink in movefile()"))
7371                         print("!!!",dest)
7372                         print("!!!",e)
7373                         return None
7374
7375         try:
7376                 if hardlinked:
7377                         newmtime = long(os.stat(dest).st_mtime)
7378                 else:
7379                         if newmtime is not None:
7380                                 os.utime(dest, (newmtime, newmtime))
7381                         else:
7382                                 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
7383                                 newmtime = long(sstat.st_mtime)
7384         except OSError:
7385                 # The utime can fail here with EPERM even though the move succeeded.
7386                 # Instead of failing, use stat to return the mtime if possible.
7387                 try:
7388                         newmtime = long(os.stat(dest).st_mtime)
7389                 except OSError as e:
7390                         writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
7391                         writemsg("!!! %s\n" % dest, noiselevel=-1)
7392                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
7393                         return None
7394
7395         if bsd_chflags:
7396                 # Restore the flags we saved before moving
7397                 if pflags:
7398                         bsd_chflags.chflags(os.path.dirname(dest), pflags)
7399
7400         return newmtime
7401
7402 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
7403         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
7404         scheduler=None):
7405         if not os.access(myroot, os.W_OK):
7406                 writemsg(_("Permission denied: access('%s', W_OK)\n") % myroot,
7407                         noiselevel=-1)
7408                 return errno.EACCES
7409         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
7410                 vartree=vartree, blockers=blockers, scheduler=scheduler)
7411         return mylink.merge(pkgloc, infloc, myroot, myebuild,
7412                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7413
7414 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
7415         ldpath_mtimes=None, scheduler=None):
7416         mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
7417                 vartree=vartree, scheduler=scheduler)
7418         vartree = mylink.vartree
7419         try:
7420                 mylink.lockdb()
7421                 if mylink.exists():
7422                         vartree.dbapi.plib_registry.load()
7423                         vartree.dbapi.plib_registry.pruneNonExisting()
7424                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
7425                                 ldpath_mtimes=ldpath_mtimes)
7426                         if retval == os.EX_OK:
7427                                 mylink.delete()
7428                         return retval
7429                 return os.EX_OK
7430         finally:
7431                 vartree.dbapi.linkmap._clear_cache()
7432                 mylink.unlockdb()
7433
7434 def dep_virtual(mysplit, mysettings):
7435         "Does virtual dependency conversion"
7436         newsplit=[]
7437         myvirtuals = mysettings.getvirtuals()
7438         for x in mysplit:
7439                 if isinstance(x, list):
7440                         newsplit.append(dep_virtual(x, mysettings))
7441                 else:
7442                         mykey=dep_getkey(x)
7443                         mychoices = myvirtuals.get(mykey, None)
7444                         if mychoices:
7445                                 if len(mychoices) == 1:
7446                                         a = x.replace(mykey, dep_getkey(mychoices[0]), 1)
7447                                 else:
7448                                         if x[0]=="!":
7449                                                 # blocker needs "and" not "or(||)".
7450                                                 a=[]
7451                                         else:
7452                                                 a=['||']
7453                                         for y in mychoices:
7454                                                 a.append(x.replace(mykey, dep_getkey(y), 1))
7455                                 newsplit.append(a)
7456                         else:
7457                                 newsplit.append(x)
7458         return newsplit
7459
7460 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
7461         trees=None, use_mask=None, use_force=None, **kwargs):
7462         """Recursively expand new-style virtuals so as to collapse one or more
7463         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
7464         zero cost regardless of whether or not they are currently installed. Virtual
7465         blockers are supported but only when the virtual expands to a single
7466         atom because it wouldn't necessarily make sense to block all the components
7467         of a compound virtual.  When more than one new-style virtual is matched,
7468         the matches are sorted from highest to lowest versions and the atom is
7469         expanded to || ( highest match ... lowest match )."""
7470         newsplit = []
7471         mytrees = trees[myroot]
7472         portdb = mytrees["porttree"].dbapi
7473         atom_graph = mytrees.get("atom_graph")
7474         parent = mytrees.get("parent")
7475         virt_parent = mytrees.get("virt_parent")
7476         graph_parent = None
7477         eapi = None
7478         if parent is not None:
7479                 if virt_parent is not None:
7480                         graph_parent = virt_parent
7481                         eapi = virt_parent[0].metadata['EAPI']
7482                 else:
7483                         graph_parent = parent
7484                         eapi = parent.metadata["EAPI"]
7485         repoman = not mysettings.local_config
7486         if kwargs["use_binaries"]:
7487                 portdb = trees[myroot]["bintree"].dbapi
7488         myvirtuals = mysettings.getvirtuals()
7489         pprovideddict = mysettings.pprovideddict
7490         myuse = kwargs["myuse"]
7491         for x in mysplit:
7492                 if x == "||":
7493                         newsplit.append(x)
7494                         continue
7495                 elif isinstance(x, list):
7496                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
7497                                 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
7498                                 use_force=use_force, **kwargs))
7499                         continue
7500
7501                 if not isinstance(x, portage.dep.Atom):
7502                         try:
7503                                 x = portage.dep.Atom(x)
7504                         except portage.exception.InvalidAtom:
7505                                 if portage.dep._dep_check_strict:
7506                                         raise portage.exception.ParseError(
7507                                                 _("invalid atom: '%s'") % x)
7508                                 else:
7509                                         # Only real Atom instances are allowed past this point.
7510                                         continue
7511                         else:
7512                                 if x.blocker and x.blocker.overlap.forbid and \
7513                                         eapi in ("0", "1") and portage.dep._dep_check_strict:
7514                                         raise portage.exception.ParseError(
7515                                                 _("invalid atom: '%s'") % (x,))
7516                                 if x.use and eapi in ("0", "1") and \
7517                                         portage.dep._dep_check_strict:
7518                                         raise portage.exception.ParseError(
7519                                                 _("invalid atom: '%s'") % (x,))
7520
7521                 if repoman and x.use and x.use.conditional:
7522                         evaluated_atom = portage.dep.remove_slot(x)
7523                         if x.slot:
7524                                 evaluated_atom += ":%s" % x.slot
7525                         evaluated_atom += str(x.use._eval_qa_conditionals(
7526                                 use_mask, use_force))
7527                         x = portage.dep.Atom(evaluated_atom)
7528
7529                 if not repoman and \
7530                         myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
7531                         if x.use.conditional:
7532                                 evaluated_atom = portage.dep.remove_slot(x)
7533                                 if x.slot:
7534                                         evaluated_atom += ":%s" % x.slot
7535                                 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
7536                                 x = portage.dep.Atom(evaluated_atom)
7537
7538                 mykey = x.cp
7539                 if not mykey.startswith("virtual/"):
7540                         newsplit.append(x)
7541                         if atom_graph is not None:
7542                                 atom_graph.add(x, graph_parent)
7543                         continue
7544                 mychoices = myvirtuals.get(mykey, [])
7545                 if x.blocker:
7546                         # Virtual blockers are no longer expanded here since
7547                         # the un-expanded virtual atom is more useful for
7548                         # maintaining a cache of blocker atoms.
7549                         newsplit.append(x)
7550                         if atom_graph is not None:
7551                                 atom_graph.add(x, graph_parent)
7552                         continue
7553
7554                 if repoman or not hasattr(portdb, 'match_pkgs'):
7555                         if portdb.cp_list(x.cp):
7556                                 newsplit.append(x)
7557                         else:
7558                                 # TODO: Add PROVIDE check for repoman.
7559                                 a = []
7560                                 for y in mychoices:
7561                                         a.append(dep.Atom(x.replace(x.cp, y.cp, 1)))
7562                                 if not a:
7563                                         newsplit.append(x)
7564                                 elif len(a) == 1:
7565                                         newsplit.append(a[0])
7566                                 else:
7567                                         newsplit.append(['||'] + a)
7568                         continue
7569
7570                 pkgs = []
7571                 # Ignore USE deps here, since otherwise we might not
7572                 # get any matches. Choices with correct USE settings
7573                 # will be preferred in dep_zapdeps().
7574                 matches = portdb.match_pkgs(x.without_use)
7575                 # Use descending order to prefer higher versions.
7576                 matches.reverse()
7577                 for pkg in matches:
7578                         # only use new-style matches
7579                         if pkg.cp.startswith("virtual/"):
7580                                 pkgs.append(pkg)
7581                 if not (pkgs or mychoices):
7582                         # This one couldn't be expanded as a new-style virtual.  Old-style
7583                         # virtuals have already been expanded by dep_virtual, so this one
7584                         # is unavailable and dep_zapdeps will identify it as such.  The
7585                         # atom is not eliminated here since it may still represent a
7586                         # dependency that needs to be satisfied.
7587                         newsplit.append(x)
7588                         if atom_graph is not None:
7589                                 atom_graph.add(x, graph_parent)
7590                         continue
7591
7592                 a = []
7593                 for pkg in pkgs:
7594                         virt_atom = '=' + pkg.cpv
7595                         if x.use:
7596                                 virt_atom += str(x.use)
7597                         virt_atom = dep.Atom(virt_atom)
7598                         # According to GLEP 37, RDEPEND is the only dependency
7599                         # type that is valid for new-style virtuals. Repoman
7600                         # should enforce this.
7601                         depstring = pkg.metadata['RDEPEND']
7602                         pkg_kwargs = kwargs.copy()
7603                         pkg_kwargs["myuse"] = pkg.use.enabled
7604                         if edebug:
7605                                 util.writemsg_level(_("Virtual Parent:      %s\n") \
7606                                         % (pkg,), noiselevel=-1, level=logging.DEBUG)
7607                                 util.writemsg_level(_("Virtual Depstring:   %s\n") \
7608                                         % (depstring,), noiselevel=-1, level=logging.DEBUG)
7609
7610                         # Set EAPI used for validation in dep_check() recursion.
7611                         mytrees["virt_parent"] = (pkg, virt_atom)
7612
7613                         try:
7614                                 mycheck = dep_check(depstring, mydbapi, mysettings,
7615                                         myroot=myroot, trees=trees, **pkg_kwargs)
7616                         finally:
7617                                 # Restore previous EAPI after recursion.
7618                                 if virt_parent is not None:
7619                                         mytrees["virt_parent"] = virt_parent
7620                                 else:
7621                                         del mytrees["virt_parent"]
7622
7623                         if not mycheck[0]:
7624                                 raise portage.exception.ParseError(
7625                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
7626
7627                         # pull in the new-style virtual
7628                         mycheck[1].append(virt_atom)
7629                         a.append(mycheck[1])
7630                         if atom_graph is not None:
7631                                 atom_graph.add(virt_atom, graph_parent)
7632                 # Plain old-style virtuals.  New-style virtuals are preferred.
7633                 if not pkgs:
7634                                 for y in mychoices:
7635                                         new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7636                                         matches = portdb.match(new_atom)
7637                                         # portdb is an instance of depgraph._dep_check_composite_db, so
7638                                         # USE conditionals are already evaluated.
7639                                         if matches and mykey in \
7640                                                 portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
7641                                                 a.append(new_atom)
7642                                                 if atom_graph is not None:
7643                                                         atom_graph.add(new_atom, graph_parent)
7644
7645                 if not a and mychoices:
7646                         # Check for a virtual package.provided match.
7647                         for y in mychoices:
7648                                 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7649                                 if match_from_list(new_atom,
7650                                         pprovideddict.get(new_atom.cp, [])):
7651                                         a.append(new_atom)
7652                                         if atom_graph is not None:
7653                                                 atom_graph.add(new_atom, graph_parent)
7654
7655                 if not a:
7656                         newsplit.append(x)
7657                         if atom_graph is not None:
7658                                 atom_graph.add(x, graph_parent)
7659                 elif len(a) == 1:
7660                         newsplit.append(a[0])
7661                 else:
7662                         newsplit.append(['||'] + a)
7663
7664         return newsplit
7665
7666 def dep_eval(deplist):
7667         if not deplist:
7668                 return 1
7669         if deplist[0]=="||":
7670                 #or list; we just need one "1"
7671                 for x in deplist[1:]:
7672                         if isinstance(x, list):
7673                                 if dep_eval(x)==1:
7674                                         return 1
7675                         elif x==1:
7676                                         return 1
7677                 #XXX: unless there's no available atoms in the list
7678                 #in which case we need to assume that everything is
7679                 #okay as some ebuilds are relying on an old bug.
7680                 if len(deplist) == 1:
7681                         return 1
7682                 return 0
7683         else:
7684                 for x in deplist:
7685                         if isinstance(x, list):
7686                                 if dep_eval(x)==0:
7687                                         return 0
7688                         elif x==0 or x==2:
7689                                 return 0
7690                 return 1
7691
7692 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
7693         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
7694         Returned deplist contains steps that must be taken to satisfy dependencies."""
7695         if trees is None:
7696                 global db
7697                 trees = db
7698         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
7699         if not reduced or unreduced == ["||"] or dep_eval(reduced):
7700                 return []
7701
7702         if unreduced[0] != "||":
7703                 unresolved = []
7704                 for x, satisfied in zip(unreduced, reduced):
7705                         if isinstance(x, list):
7706                                 unresolved += dep_zapdeps(x, satisfied, myroot,
7707                                         use_binaries=use_binaries, trees=trees)
7708                         elif not satisfied:
7709                                 unresolved.append(x)
7710                 return unresolved
7711
7712         # We're at a ( || atom ... ) type level and need to make a choice
7713         deps = unreduced[1:]
7714         satisfieds = reduced[1:]
7715
7716         # Our preference order is for an the first item that:
7717         # a) contains all unmasked packages with the same key as installed packages
7718         # b) contains all unmasked packages
7719         # c) contains masked installed packages
7720         # d) is the first item
7721
7722         preferred_installed = []
7723         preferred_in_graph = []
7724         preferred_any_slot = []
7725         preferred_non_installed = []
7726         unsat_use_in_graph = []
7727         unsat_use_installed = []
7728         unsat_use_non_installed = []
7729         other = []
7730
7731         # Alias the trees we'll be checking availability against
7732         parent   = trees[myroot].get("parent")
7733         priority = trees[myroot].get("priority")
7734         graph_db = trees[myroot].get("graph_db")
7735         vardb = None
7736         if "vartree" in trees[myroot]:
7737                 vardb = trees[myroot]["vartree"].dbapi
7738         if use_binaries:
7739                 mydbapi = trees[myroot]["bintree"].dbapi
7740         else:
7741                 mydbapi = trees[myroot]["porttree"].dbapi
7742
7743         # Sort the deps into installed, not installed but already 
7744         # in the graph and other, not installed and not in the graph
7745         # and other, with values of [[required_atom], availablility]
7746         for x, satisfied in zip(deps, satisfieds):
7747                 if isinstance(x, list):
7748                         atoms = dep_zapdeps(x, satisfied, myroot,
7749                                 use_binaries=use_binaries, trees=trees)
7750                 else:
7751                         atoms = [x]
7752                 if not vardb:
7753                         # called by repoman
7754                         other.append((atoms, None, False))
7755                         continue
7756
7757                 all_available = True
7758                 all_use_satisfied = True
7759                 versions = {}
7760                 for atom in atoms:
7761                         if atom.blocker:
7762                                 continue
7763                         # Ignore USE dependencies here since we don't want USE
7764                         # settings to adversely affect || preference evaluation.
7765                         avail_pkg = mydbapi.match(atom.without_use)
7766                         if avail_pkg:
7767                                 avail_pkg = avail_pkg[-1] # highest (ascending order)
7768                                 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7769                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7770                         if not avail_pkg:
7771                                 all_available = False
7772                                 all_use_satisfied = False
7773                                 break
7774
7775                         if atom.use:
7776                                 avail_pkg_use = mydbapi.match(atom)
7777                                 if not avail_pkg_use:
7778                                         all_use_satisfied = False
7779                                 else:
7780                                         # highest (ascending order)
7781                                         avail_pkg_use = avail_pkg_use[-1]
7782                                         if avail_pkg_use != avail_pkg:
7783                                                 avail_pkg = avail_pkg_use
7784                                                 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7785                                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7786
7787                         versions[avail_slot] = avail_pkg
7788
7789                 this_choice = (atoms, versions, all_available)
7790                 if all_available:
7791                         # The "all installed" criterion is not version or slot specific.
7792                         # If any version of a package is already in the graph then we
7793                         # assume that it is preferred over other possible packages choices.
7794                         all_installed = True
7795                         for atom in set(dep.Atom(atom.cp) for atom in atoms \
7796                                 if not atom.blocker):
7797                                 # New-style virtuals have zero cost to install.
7798                                 if not vardb.match(atom) and not atom.startswith("virtual/"):
7799                                         all_installed = False
7800                                         break
7801                         all_installed_slots = False
7802                         if all_installed:
7803                                 all_installed_slots = True
7804                                 for slot_atom in versions:
7805                                         # New-style virtuals have zero cost to install.
7806                                         if not vardb.match(slot_atom) and \
7807                                                 not slot_atom.startswith("virtual/"):
7808                                                 all_installed_slots = False
7809                                                 break
7810                         if graph_db is None:
7811                                 if all_use_satisfied:
7812                                         if all_installed:
7813                                                 if all_installed_slots:
7814                                                         preferred_installed.append(this_choice)
7815                                                 else:
7816                                                         preferred_any_slot.append(this_choice)
7817                                         else:
7818                                                 preferred_non_installed.append(this_choice)
7819                                 else:
7820                                         if all_installed_slots:
7821                                                 unsat_use_installed.append(this_choice)
7822                                         else:
7823                                                 unsat_use_non_installed.append(this_choice)
7824                         else:
7825                                 all_in_graph = True
7826                                 for slot_atom in versions:
7827                                         # New-style virtuals have zero cost to install.
7828                                         if not graph_db.match(slot_atom) and \
7829                                                 not slot_atom.startswith("virtual/"):
7830                                                 all_in_graph = False
7831                                                 break
7832                                 circular_atom = None
7833                                 if all_in_graph:
7834                                         if parent is None or priority is None:
7835                                                 pass
7836                                         elif priority.buildtime:
7837                                                 # Check if the atom would result in a direct circular
7838                                                 # dependency and try to avoid that if it seems likely
7839                                                 # to be unresolvable. This is only relevant for
7840                                                 # buildtime deps that aren't already satisfied by an
7841                                                 # installed package.
7842                                                 cpv_slot_list = [parent]
7843                                                 for atom in atoms:
7844                                                         if atom.blocker:
7845                                                                 continue
7846                                                         if vardb.match(atom):
7847                                                                 # If the atom is satisfied by an installed
7848                                                                 # version then it's not a circular dep.
7849                                                                 continue
7850                                                         if atom.cp != parent.cp:
7851                                                                 continue
7852                                                         if match_from_list(atom, cpv_slot_list):
7853                                                                 circular_atom = atom
7854                                                                 break
7855                                 if circular_atom is not None:
7856                                         other.append(this_choice)
7857                                 else:
7858                                         if all_use_satisfied:
7859                                                 if all_in_graph:
7860                                                         preferred_in_graph.append(this_choice)
7861                                                 elif all_installed:
7862                                                         if all_installed_slots:
7863                                                                 preferred_installed.append(this_choice)
7864                                                         else:
7865                                                                 preferred_any_slot.append(this_choice)
7866                                                 else:
7867                                                         preferred_non_installed.append(this_choice)
7868                                         else:
7869                                                 if all_in_graph:
7870                                                         unsat_use_in_graph.append(this_choice)
7871                                                 elif all_installed_slots:
7872                                                         unsat_use_installed.append(this_choice)
7873                                                 else:
7874                                                         unsat_use_non_installed.append(this_choice)
7875                 else:
7876                         other.append(this_choice)
7877
7878         # unsat_use_* must come after preferred_non_installed
7879         # for correct ordering in cases like || ( foo[a] foo[b] ).
7880         preferred = preferred_in_graph + preferred_installed + \
7881                 preferred_any_slot + preferred_non_installed + \
7882                 unsat_use_in_graph + unsat_use_installed + unsat_use_non_installed + \
7883                 other
7884
7885         for allow_masked in (False, True):
7886                 for atoms, versions, all_available in preferred:
7887                         if all_available or allow_masked:
7888                                 return atoms
7889
7890         assert(False) # This point should not be reachable
7891
7892 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
7893         '''
7894         @rtype: Atom
7895         '''
7896         if not len(mydep):
7897                 return mydep
7898         if mydep[0]=="*":
7899                 mydep=mydep[1:]
7900         orig_dep = mydep
7901         if isinstance(orig_dep, dep.Atom):
7902                 mydep = orig_dep.cp
7903         else:
7904                 mydep = orig_dep
7905                 has_cat = '/' in orig_dep
7906                 if not has_cat:
7907                         alphanum = re.search(r'\w', orig_dep)
7908                         if alphanum:
7909                                 mydep = orig_dep[:alphanum.start()] + "null/" + \
7910                                         orig_dep[alphanum.start():]
7911                 try:
7912                         mydep = dep.Atom(mydep)
7913                 except exception.InvalidAtom:
7914                         # Missing '=' prefix is allowed for backward compatibility.
7915                         if not dep.isvalidatom("=" + mydep):
7916                                 raise
7917                         mydep = dep.Atom('=' + mydep)
7918                         orig_dep = '=' + orig_dep
7919                 if not has_cat:
7920                         null_cat, pn = catsplit(mydep.cp)
7921                         mydep = pn
7922                 else:
7923                         mydep = mydep.cp
7924         expanded = cpv_expand(mydep, mydb=mydb,
7925                 use_cache=use_cache, settings=settings)
7926         return portage.dep.Atom(orig_dep.replace(mydep, expanded, 1))
7927
7928 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
7929         use_cache=1, use_binaries=0, myroot="/", trees=None):
7930         """Takes a depend string and parses the condition."""
7931         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
7932         #check_config_instance(mysettings)
7933         if trees is None:
7934                 trees = globals()["db"]
7935         if use=="yes":
7936                 if myuse is None:
7937                         #default behavior
7938                         myusesplit = mysettings["PORTAGE_USE"].split()
7939                 else:
7940                         myusesplit = myuse
7941                         # We've been given useflags to use.
7942                         #print "USE FLAGS PASSED IN."
7943                         #print myuse
7944                         #if "bindist" in myusesplit:
7945                         #       print "BINDIST is set!"
7946                         #else:
7947                         #       print "BINDIST NOT set."
7948         else:
7949                 #we are being run by autouse(), don't consult USE vars yet.
7950                 # WE ALSO CANNOT USE SETTINGS
7951                 myusesplit=[]
7952
7953         #convert parenthesis to sublists
7954         try:
7955                 mysplit = portage.dep.paren_reduce(depstring)
7956         except portage.exception.InvalidDependString as e:
7957                 return [0, str(e)]
7958
7959         mymasks = set()
7960         useforce = set()
7961         useforce.add(mysettings["ARCH"])
7962         if use == "all":
7963                 # This masking/forcing is only for repoman.  In other cases, relevant
7964                 # masking/forcing should have already been applied via
7965                 # config.regenerate().  Also, binary or installed packages may have
7966                 # been built with flags that are now masked, and it would be
7967                 # inconsistent to mask them now.  Additionally, myuse may consist of
7968                 # flags from a parent package that is being merged to a $ROOT that is
7969                 # different from the one that mysettings represents.
7970                 mymasks.update(mysettings.usemask)
7971                 mymasks.update(mysettings.archlist())
7972                 mymasks.discard(mysettings["ARCH"])
7973                 useforce.update(mysettings.useforce)
7974                 useforce.difference_update(mymasks)
7975         try:
7976                 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
7977                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
7978         except portage.exception.InvalidDependString as e:
7979                 return [0, str(e)]
7980
7981         # Do the || conversions
7982         mysplit=portage.dep.dep_opconvert(mysplit)
7983
7984         if mysplit == []:
7985                 #dependencies were reduced to nothing
7986                 return [1,[]]
7987
7988         # Recursively expand new-style virtuals so as to
7989         # collapse one or more levels of indirection.
7990         try:
7991                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
7992                         use=use, mode=mode, myuse=myuse,
7993                         use_force=useforce, use_mask=mymasks, use_cache=use_cache,
7994                         use_binaries=use_binaries, myroot=myroot, trees=trees)
7995         except portage.exception.ParseError as e:
7996                 return [0, str(e)]
7997
7998         mysplit2=mysplit[:]
7999         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
8000         if mysplit2 is None:
8001                 return [0, _("Invalid token")]
8002
8003         writemsg("\n\n\n", 1)
8004         writemsg("mysplit:  %s\n" % (mysplit), 1)
8005         writemsg("mysplit2: %s\n" % (mysplit2), 1)
8006
8007         try:
8008                 selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
8009                         use_binaries=use_binaries, trees=trees)
8010         except portage.exception.InvalidAtom as e:
8011                 if portage.dep._dep_check_strict:
8012                         raise # This shouldn't happen.
8013                 # dbapi.match() failed due to an invalid atom in
8014                 # the dependencies of an installed package.
8015                 return [0, _("Invalid atom: '%s'") % (e,)]
8016
8017         return [1, selected_atoms]
8018
8019 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
8020         "Reduces the deplist to ones and zeros"
8021         deplist=mydeplist[:]
8022         for mypos, token in enumerate(deplist):
8023                 if isinstance(deplist[mypos], list):
8024                         #recurse
8025                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
8026                 elif deplist[mypos]=="||":
8027                         pass
8028                 elif token[:1] == "!":
8029                         deplist[mypos] = False
8030                 else:
8031                         mykey = deplist[mypos].cp
8032                         if mysettings and mykey in mysettings.pprovideddict and \
8033                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
8034                                 deplist[mypos]=True
8035                         elif mydbapi is None:
8036                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
8037                                 # return all of deps the deps that have been selected
8038                                 # (excluding those satisfied by package.provided).
8039                                 deplist[mypos] = False
8040                         else:
8041                                 if mode:
8042                                         x = mydbapi.xmatch(mode, deplist[mypos])
8043                                         if mode.startswith("minimum-"):
8044                                                 mydep = []
8045                                                 if x:
8046                                                         mydep.append(x)
8047                                         else:
8048                                                 mydep = x
8049                                 else:
8050                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
8051                                 if mydep!=None:
8052                                         tmp=(len(mydep)>=1)
8053                                         if deplist[mypos][0]=="!":
8054                                                 tmp=False
8055                                         deplist[mypos]=tmp
8056                                 else:
8057                                         #encountered invalid string
8058                                         return None
8059         return deplist
8060
8061 _cpv_key_re = re.compile('^' + dep._cpv + '$', re.VERBOSE)
8062 def cpv_getkey(mycpv):
8063         """Calls pkgsplit on a cpv and returns only the cp."""
8064         m = _cpv_key_re.match(mycpv)
8065         if m is not None:
8066                 return m.group(2)
8067         myslash = mycpv.split("/", 1)
8068         mysplit=pkgsplit(myslash[-1])
8069         if mysplit is None:
8070                 return None
8071         mylen=len(myslash)
8072         if mylen==2:
8073                 return myslash[0]+"/"+mysplit[0]
8074         else:
8075                 return mysplit[0]
8076
8077 getCPFromCPV = cpv_getkey
8078
8079 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
8080         """This is deprecated because it just returns the first match instead of
8081         raising AmbiguousPackageName like cpv_expand does."""
8082         warnings.warn("portage.key_expand() is deprecated", DeprecationWarning)
8083         mysplit=mykey.split("/")
8084         if settings is None:
8085                 settings = globals()["settings"]
8086         virts = settings.getvirtuals("/")
8087         virts_p = settings.get_virts_p("/")
8088         if len(mysplit)==1:
8089                 if hasattr(mydb, "cp_list"):
8090                         for x in mydb.categories:
8091                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
8092                                         return dep.Atom(x + "/" + mykey)
8093                         if mykey in virts_p:
8094                                 return(virts_p[mykey][0])
8095                 return dep.Atom("null/" + mykey)
8096         elif mydb:
8097                 if hasattr(mydb, "cp_list"):
8098                         if not mydb.cp_list(mykey, use_cache=use_cache) and \
8099                                 virts and mykey in virts:
8100                                 return virts[mykey][0]
8101                 if not isinstance(mykey, dep.Atom):
8102                         mykey = dep.Atom(mykey)
8103                 return mykey
8104
8105 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
8106         """Given a string (packagename or virtual) expand it into a valid
8107         cat/package string. Virtuals use the mydb to determine which provided
8108         virtual is a valid choice and defaults to the first element when there
8109         are no installed/available candidates."""
8110         myslash=mycpv.split("/")
8111         mysplit=pkgsplit(myslash[-1])
8112         if settings is None:
8113                 settings = globals()["settings"]
8114         virts = settings.getvirtuals("/")
8115         virts_p = settings.get_virts_p("/")
8116         if len(myslash)>2:
8117                 # this is illegal case.
8118                 mysplit=[]
8119                 mykey=mycpv
8120         elif len(myslash)==2:
8121                 if mysplit:
8122                         mykey=myslash[0]+"/"+mysplit[0]
8123                 else:
8124                         mykey=mycpv
8125                 if mydb and virts and mykey in virts:
8126                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
8127                         if hasattr(mydb, "cp_list"):
8128                                 if not mydb.cp_list(mykey, use_cache=use_cache):
8129                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
8130                                         mykey_orig = mykey[:]
8131                                         for vkey in virts[mykey]:
8132                                                 # The virtuals file can contain a versioned atom, so
8133                                                 # it may be necessary to remove the operator and
8134                                                 # version from the atom before it is passed into
8135                                                 # dbapi.cp_list().
8136                                                 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
8137                                                         mykey = str(vkey)
8138                                                         writemsg(_("virts chosen: %s\n") % (mykey), 1)
8139                                                         break
8140                                         if mykey == mykey_orig:
8141                                                 mykey = str(virts[mykey][0])
8142                                                 writemsg(_("virts defaulted: %s\n") % (mykey), 1)
8143                         #we only perform virtual expansion if we are passed a dbapi
8144         else:
8145                 #specific cpv, no category, ie. "foo-1.0"
8146                 if mysplit:
8147                         myp=mysplit[0]
8148                 else:
8149                         # "foo" ?
8150                         myp=mycpv
8151                 mykey=None
8152                 matches=[]
8153                 if mydb and hasattr(mydb, "categories"):
8154                         for x in mydb.categories:
8155                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
8156                                         matches.append(x+"/"+myp)
8157                 if len(matches) > 1:
8158                         virtual_name_collision = False
8159                         if len(matches) == 2:
8160                                 for x in matches:
8161                                         if not x.startswith("virtual/"):
8162                                                 # Assume that the non-virtual is desired.  This helps
8163                                                 # avoid the ValueError for invalid deps that come from
8164                                                 # installed packages (during reverse blocker detection,
8165                                                 # for example).
8166                                                 mykey = x
8167                                         else:
8168                                                 virtual_name_collision = True
8169                         if not virtual_name_collision:
8170                                 # AmbiguousPackageName inherits from ValueError,
8171                                 # for backward compatibility with calling code
8172                                 # that already handles ValueError.
8173                                 raise portage.exception.AmbiguousPackageName(matches)
8174                 elif matches:
8175                         mykey=matches[0]
8176
8177                 if not mykey and not isinstance(mydb, list):
8178                         if myp in virts_p:
8179                                 mykey=virts_p[myp][0]
8180                         #again, we only perform virtual expansion if we have a dbapi (not a list)
8181                 if not mykey:
8182                         mykey="null/"+myp
8183         if mysplit:
8184                 if mysplit[2]=="r0":
8185                         return mykey+"-"+mysplit[1]
8186                 else:
8187                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
8188         else:
8189                 return mykey
8190
8191 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
8192         from portage.util import grablines
8193         if settings is None:
8194                 settings = globals()["settings"]
8195         if portdb is None:
8196                 portdb = globals()["portdb"]
8197         mysplit = catpkgsplit(mycpv)
8198         if not mysplit:
8199                 raise ValueError(_("invalid CPV: %s") % mycpv)
8200         if metadata is None:
8201                 db_keys = list(portdb._aux_cache_keys)
8202                 try:
8203                         metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8204                 except KeyError:
8205                         if not portdb.cpv_exists(mycpv):
8206                                 raise
8207         if metadata is None:
8208                 # Can't access SLOT due to corruption.
8209                 cpv_slot_list = [mycpv]
8210         else:
8211                 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8212         mycp=mysplit[0]+"/"+mysplit[1]
8213
8214         # XXX- This is a temporary duplicate of code from the config constructor.
8215         locations = [os.path.join(settings["PORTDIR"], "profiles")]
8216         locations.extend(settings.profiles)
8217         for ov in settings["PORTDIR_OVERLAY"].split():
8218                 profdir = os.path.join(normalize_path(ov), "profiles")
8219                 if os.path.isdir(profdir):
8220                         locations.append(profdir)
8221         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
8222                 USER_CONFIG_PATH))
8223         locations.reverse()
8224         pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
8225
8226         if mycp in settings.pmaskdict:
8227                 for x in settings.pmaskdict[mycp]:
8228                         if match_from_list(x, cpv_slot_list):
8229                                 comment = ""
8230                                 l = "\n"
8231                                 comment_valid = -1
8232                                 for pmask in pmasklists:
8233                                         pmask_filename = os.path.join(pmask[0], "package.mask")
8234                                         for i in range(len(pmask[1])):
8235                                                 l = pmask[1][i].strip()
8236                                                 if l == "":
8237                                                         comment = ""
8238                                                         comment_valid = -1
8239                                                 elif l[0] == "#":
8240                                                         comment += (l+"\n")
8241                                                         comment_valid = i + 1
8242                                                 elif l == x:
8243                                                         if comment_valid != i:
8244                                                                 comment = ""
8245                                                         if return_location:
8246                                                                 return (comment, pmask_filename)
8247                                                         else:
8248                                                                 return comment
8249                                                 elif comment_valid != -1:
8250                                                         # Apparently this comment applies to muliple masks, so
8251                                                         # it remains valid until a blank line is encountered.
8252                                                         comment_valid += 1
8253         if return_location:
8254                 return (None, None)
8255         else:
8256                 return None
8257
8258 def getmaskingstatus(mycpv, settings=None, portdb=None):
8259         if settings is None:
8260                 settings = config(clone=globals()["settings"])
8261         if portdb is None:
8262                 portdb = globals()["portdb"]
8263
8264         metadata = None
8265         installed = False
8266         if not isinstance(mycpv, basestring):
8267                 # emerge passed in a Package instance
8268                 pkg = mycpv
8269                 mycpv = pkg.cpv
8270                 metadata = pkg.metadata
8271                 installed = pkg.installed
8272
8273         mysplit = catpkgsplit(mycpv)
8274         if not mysplit:
8275                 raise ValueError(_("invalid CPV: %s") % mycpv)
8276         if metadata is None:
8277                 db_keys = list(portdb._aux_cache_keys)
8278                 try:
8279                         metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8280                 except KeyError:
8281                         if not portdb.cpv_exists(mycpv):
8282                                 raise
8283                         return ["corruption"]
8284                 if "?" in metadata["LICENSE"]:
8285                         settings.setcpv(mycpv, mydb=metadata)
8286                         metadata["USE"] = settings["PORTAGE_USE"]
8287                 else:
8288                         metadata["USE"] = ""
8289         mycp=mysplit[0]+"/"+mysplit[1]
8290
8291         rValue = []
8292
8293         # profile checking
8294         if settings._getProfileMaskAtom(mycpv, metadata):
8295                 rValue.append("profile")
8296
8297         # package.mask checking
8298         if settings._getMaskAtom(mycpv, metadata):
8299                 rValue.append("package.mask")
8300
8301         # keywords checking
8302         eapi = metadata["EAPI"]
8303         mygroups = settings._getKeywords(mycpv, metadata)
8304         licenses = metadata["LICENSE"]
8305         properties = metadata["PROPERTIES"]
8306         slot = metadata["SLOT"]
8307         if eapi.startswith("-"):
8308                 eapi = eapi[1:]
8309         if not eapi_is_supported(eapi):
8310                 return ["EAPI %s" % eapi]
8311         elif _eapi_is_deprecated(eapi) and not installed:
8312                 return ["EAPI %s" % eapi]
8313         egroups = settings.configdict["backupenv"].get(
8314                 "ACCEPT_KEYWORDS", "").split()
8315         pgroups = settings["ACCEPT_KEYWORDS"].split()
8316         myarch = settings["ARCH"]
8317         if pgroups and myarch not in pgroups:
8318                 """For operating systems other than Linux, ARCH is not necessarily a
8319                 valid keyword."""
8320                 myarch = pgroups[0].lstrip("~")
8321
8322         cp = dep_getkey(mycpv)
8323         pkgdict = settings.pkeywordsdict.get(cp)
8324         matches = False
8325         if pkgdict:
8326                 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8327                 for atom, pkgkeywords in pkgdict.items():
8328                         if match_from_list(atom, cpv_slot_list):
8329                                 matches = True
8330                                 pgroups.extend(pkgkeywords)
8331         if matches or egroups:
8332                 pgroups.extend(egroups)
8333                 inc_pgroups = set()
8334                 for x in pgroups:
8335                         if x.startswith("-"):
8336                                 if x == "-*":
8337                                         inc_pgroups.clear()
8338                                 else:
8339                                         inc_pgroups.discard(x[1:])
8340                         else:
8341                                 inc_pgroups.add(x)
8342                 pgroups = inc_pgroups
8343                 del inc_pgroups
8344
8345         kmask = "missing"
8346
8347         if '**' in pgroups:
8348                 kmask = None
8349         else:
8350                 for keyword in pgroups:
8351                         if keyword in mygroups:
8352                                 kmask = None
8353                                 break
8354
8355         if kmask:
8356                 fallback = None
8357                 for gp in mygroups:
8358                         if gp=="*":
8359                                 kmask=None
8360                                 break
8361                         elif gp=="-"+myarch and myarch in pgroups:
8362                                 kmask="-"+myarch
8363                                 break
8364                         elif gp=="~"+myarch and myarch in pgroups:
8365                                 kmask="~"+myarch
8366                                 break
8367
8368         try:
8369                 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
8370                 if missing_licenses:
8371                         allowed_tokens = set(["||", "(", ")"])
8372                         allowed_tokens.update(missing_licenses)
8373                         license_split = licenses.split()
8374                         license_split = [x for x in license_split \
8375                                 if x in allowed_tokens]
8376                         msg = license_split[:]
8377                         msg.append("license(s)")
8378                         rValue.append(" ".join(msg))
8379         except portage.exception.InvalidDependString as e:
8380                 rValue.append("LICENSE: "+str(e))
8381
8382         try:
8383                 missing_properties = settings._getMissingProperties(mycpv, metadata)
8384                 if missing_properties:
8385                         allowed_tokens = set(["||", "(", ")"])
8386                         allowed_tokens.update(missing_properties)
8387                         properties_split = properties.split()
8388                         properties_split = [x for x in properties_split \
8389                                         if x in allowed_tokens]
8390                         msg = properties_split[:]
8391                         msg.append("properties")
8392                         rValue.append(" ".join(msg))
8393         except portage.exception.InvalidDependString as e:
8394                 rValue.append("PROPERTIES: "+str(e))
8395
8396         # Only show KEYWORDS masks for installed packages
8397         # if they're not masked for any other reason.
8398         if kmask and (not installed or not rValue):
8399                 rValue.append(kmask+" keyword")
8400
8401         return rValue
8402
8403 auxdbkeys=[
8404   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
8405         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
8406         'KEYWORDS',  'INHERITED', 'IUSE',      'CDEPEND',
8407         'PDEPEND',   'PROVIDE', 'EAPI',
8408         'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
8409         'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
8410         ]
8411 auxdbkeylen=len(auxdbkeys)
8412
8413 from portage.dbapi import dbapi
8414 from portage.dbapi.virtual import fakedbapi
8415 from portage.dbapi.bintree import bindbapi, binarytree
8416 from portage.dbapi.vartree import vardbapi, vartree, dblink
8417 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
8418
8419 class FetchlistDict(portage.cache.mappings.Mapping):
8420         """This provide a mapping interface to retrieve fetch lists.  It's used
8421         to allow portage.manifest.Manifest to access fetch lists via a standard
8422         mapping interface rather than use the dbapi directly."""
8423         def __init__(self, pkgdir, settings, mydbapi):
8424                 """pkgdir is a directory containing ebuilds and settings is passed into
8425                 portdbapi.getfetchlist for __getitem__ calls."""
8426                 self.pkgdir = pkgdir
8427                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
8428                 self.settings = settings
8429                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
8430                 self.portdb = mydbapi
8431         def __getitem__(self, pkg_key):
8432                 """Returns the complete fetch list for a given package."""
8433                 return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
8434         def __contains__(self, cpv):
8435                 return cpv in self.__iter__()
8436         def has_key(self, pkg_key):
8437                 """Returns true if the given package exists within pkgdir."""
8438                 return pkg_key in self
8439
8440         def __iter__(self):
8441                 return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
8442
8443         def keys(self):
8444                 """Returns keys for all packages within pkgdir"""
8445                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
8446
8447         if sys.hexversion >= 0x3000000:
8448                 keys = __iter__
8449
8450 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
8451         vartree=None, prev_mtimes=None, blockers=None):
8452         """will merge a .tbz2 file, returning a list of runtime dependencies
8453                 that must be satisfied, or None if there was a merge error.     This
8454                 code assumes the package exists."""
8455         global db
8456         if mydbapi is None:
8457                 mydbapi = db[myroot]["bintree"].dbapi
8458         if vartree is None:
8459                 vartree = db[myroot]["vartree"]
8460         if mytbz2[-5:]!=".tbz2":
8461                 print(_("!!! Not a .tbz2 file"))
8462                 return 1
8463
8464         tbz2_lock = None
8465         mycat = None
8466         mypkg = None
8467         did_merge_phase = False
8468         success = False
8469         try:
8470                 """ Don't lock the tbz2 file because the filesytem could be readonly or
8471                 shared by a cluster."""
8472                 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
8473
8474                 mypkg = os.path.basename(mytbz2)[:-5]
8475                 xptbz2 = portage.xpak.tbz2(mytbz2)
8476                 mycat = xptbz2.getfile("CATEGORY")
8477                 if not mycat:
8478                         writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"),
8479                                 noiselevel=-1)
8480                         return 1
8481                 mycat = mycat.strip()
8482
8483                 # These are the same directories that would be used at build time.
8484                 builddir = os.path.join(
8485                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
8486                 catdir = os.path.dirname(builddir)
8487                 pkgloc = os.path.join(builddir, "image")
8488                 infloc = os.path.join(builddir, "build-info")
8489                 myebuild = os.path.join(
8490                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
8491                 portage.util.ensure_dirs(os.path.dirname(catdir),
8492                         uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8493                 catdir_lock = portage.locks.lockdir(catdir)
8494                 portage.util.ensure_dirs(catdir,
8495                         uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8496                 try:
8497                         shutil.rmtree(builddir)
8498                 except (IOError, OSError) as e:
8499                         if e.errno != errno.ENOENT:
8500                                 raise
8501                         del e
8502                 for mydir in (builddir, pkgloc, infloc):
8503                         portage.util.ensure_dirs(mydir, uid=portage_uid,
8504                                 gid=portage_gid, mode=0o755)
8505                 writemsg_stdout(_(">>> Extracting info\n"))
8506                 xptbz2.unpackinfo(infloc)
8507                 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
8508                 # Store the md5sum in the vdb.
8509                 fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
8510                 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
8511                 fp.close()
8512
8513                 # This gives bashrc users an opportunity to do various things
8514                 # such as remove binary packages after they're installed.
8515                 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
8516                 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
8517                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8518
8519                 # Eventually we'd like to pass in the saved ebuild env here.
8520                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8521                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
8522                 if retval != os.EX_OK:
8523                         writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
8524                         return retval
8525
8526                 writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
8527                 retval = portage.process.spawn_bash(
8528                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8529                         env=mysettings.environ())
8530                 if retval != os.EX_OK:
8531                         writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
8532                         return retval
8533                 #portage.locks.unlockfile(tbz2_lock)
8534                 #tbz2_lock = None
8535
8536                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8537                         treetype="bintree", blockers=blockers)
8538                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8539                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8540                 did_merge_phase = True
8541                 success = retval == os.EX_OK
8542                 return retval
8543         finally:
8544                 mysettings.pop("PORTAGE_BINPKG_FILE", None)
8545                 if tbz2_lock:
8546                         portage.locks.unlockfile(tbz2_lock)
8547                 if True:
8548                         if not did_merge_phase:
8549                                 # The merge phase handles this already.  Callers don't know how
8550                                 # far this function got, so we have to call elog_process() here
8551                                 # so that it's only called once.
8552                                 from portage.elog import elog_process
8553                                 elog_process(mycat + "/" + mypkg, mysettings)
8554                         try:
8555                                 if success:
8556                                         shutil.rmtree(builddir)
8557                         except (IOError, OSError) as e:
8558                                 if e.errno != errno.ENOENT:
8559                                         raise
8560                                 del e
8561
8562 def deprecated_profile_check(settings=None):
8563         config_root = "/"
8564         if settings is not None:
8565                 config_root = settings["PORTAGE_CONFIGROOT"]
8566         deprecated_profile_file = os.path.join(config_root,
8567                 DEPRECATED_PROFILE_FILE)
8568         if not os.access(deprecated_profile_file, os.R_OK):
8569                 return False
8570         dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
8571                 encoding=_encodings['fs'], errors='strict'), 
8572                 mode='r', encoding=_encodings['content'], errors='replace').readlines()
8573         writemsg(colorize("BAD", _("\n!!! Your current profile is "
8574                 "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
8575         if not dcontent:
8576                 writemsg(colorize("BAD", _("!!! Please refer to the "
8577                         "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
8578                 return True
8579         newprofile = dcontent[0]
8580         writemsg(colorize("BAD", _("!!! Please upgrade to the "
8581                 "following profile if possible:")) + "\n", noiselevel=-1)
8582         writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
8583         if len(dcontent) > 1:
8584                 writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
8585                 for myline in dcontent[1:]:
8586                         writemsg(myline, noiselevel=-1)
8587                 writemsg("\n\n", noiselevel=-1)
8588         return True
8589
8590 # gets virtual package settings
8591 def getvirtuals(myroot):
8592         global settings
8593         writemsg("--- DEPRECATED call to getvirtual\n")
8594         return settings.getvirtuals(myroot)
8595
8596 def commit_mtimedb(mydict=None, filename=None):
8597         if mydict is None:
8598                 global mtimedb
8599                 if "mtimedb" not in globals() or mtimedb is None:
8600                         return
8601                 mtimedb.commit()
8602                 return
8603         if filename is None:
8604                 global mtimedbfile
8605                 filename = mtimedbfile
8606         mydict["version"] = VERSION
8607         d = {} # for full backward compat, pickle it as a plain dict object.
8608         d.update(mydict)
8609         try:
8610                 f = atomic_ofstream(filename, mode='wb')
8611                 pickle.dump(d, f, protocol=2)
8612                 f.close()
8613                 portage.util.apply_secpass_permissions(filename,
8614                         uid=uid, gid=portage_gid, mode=0o644)
8615         except (IOError, OSError) as e:
8616                 pass
8617
8618 def portageexit():
8619         global uid,portage_gid,portdb,db
8620         if secpass and os.environ.get("SANDBOX_ON") != "1":
8621                 close_portdbapi_caches()
8622                 commit_mtimedb()
8623
8624 atexit_register(portageexit)
8625
8626 def _global_updates(trees, prev_mtimes):
8627         """
8628         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8629
8630         @param trees: A dictionary containing portage trees.
8631         @type trees: dict
8632         @param prev_mtimes: A dictionary containing mtimes of files located in
8633                 $PORTDIR/profiles/updates/.
8634         @type prev_mtimes: dict
8635         @rtype: None or List
8636         @return: None if no were no updates, otherwise a list of update commands
8637                 that have been performed.
8638         """
8639         # only do this if we're root and not running repoman/ebuild digest
8640         global secpass
8641         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8642                 return
8643         root = "/"
8644         mysettings = trees["/"]["vartree"].settings
8645         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8646
8647         try:
8648                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8649                         update_data = grab_updates(updpath)
8650                 else:
8651                         update_data = grab_updates(updpath, prev_mtimes)
8652         except portage.exception.DirectoryNotFound:
8653                 writemsg(_("--- 'profiles/updates' is empty or "
8654                         "not available. Empty portage tree?\n"), noiselevel=1)
8655                 return
8656         myupd = None
8657         if len(update_data) > 0:
8658                 do_upgrade_packagesmessage = 0
8659                 myupd = []
8660                 timestamps = {}
8661                 for mykey, mystat, mycontent in update_data:
8662                         writemsg_stdout("\n\n")
8663                         writemsg_stdout(colorize("GOOD",
8664                                 _("Performing Global Updates: "))+bold(mykey)+"\n")
8665                         writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
8666                         writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
8667                                 "%s='/var/db update'  %s='/var/db move'\n"
8668                                 "  %s='/var/db SLOT move'  %s='binary move'  "
8669                                 "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
8670                                 (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
8671                         valid_updates, errors = parse_updates(mycontent)
8672                         myupd.extend(valid_updates)
8673                         writemsg_stdout(len(valid_updates) * "." + "\n")
8674                         if len(errors) == 0:
8675                                 # Update our internal mtime since we
8676                                 # processed all of our directives.
8677                                 timestamps[mykey] = long(mystat.st_mtime)
8678                         else:
8679                                 for msg in errors:
8680                                         writemsg("%s\n" % msg, noiselevel=-1)
8681
8682                 world_file = os.path.join(root, WORLD_FILE)
8683                 world_list = grabfile(world_file)
8684                 world_modified = False
8685                 for update_cmd in myupd:
8686                         for pos, atom in enumerate(world_list):
8687                                 new_atom = update_dbentry(update_cmd, atom)
8688                                 if atom != new_atom:
8689                                         world_list[pos] = new_atom
8690                                         world_modified = True
8691                 if world_modified:
8692                         world_list.sort()
8693                         write_atomic(world_file,
8694                                 "".join("%s\n" % (x,) for x in world_list))
8695
8696                 update_config_files("/",
8697                         mysettings.get("CONFIG_PROTECT","").split(),
8698                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
8699                         myupd)
8700
8701                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8702                         settings=mysettings)
8703                 vardb = trees["/"]["vartree"].dbapi
8704                 bindb = trees["/"]["bintree"].dbapi
8705                 if not os.access(bindb.bintree.pkgdir, os.W_OK):
8706                         bindb = None
8707                 for update_cmd in myupd:
8708                         if update_cmd[0] == "move":
8709                                 moves = vardb.move_ent(update_cmd)
8710                                 if moves:
8711                                         writemsg_stdout(moves * "@")
8712                                 if bindb:
8713                                         moves = bindb.move_ent(update_cmd)
8714                                         if moves:
8715                                                 writemsg_stdout(moves * "%")
8716                         elif update_cmd[0] == "slotmove":
8717                                 moves = vardb.move_slot_ent(update_cmd)
8718                                 if moves:
8719                                         writemsg_stdout(moves * "s")
8720                                 if bindb:
8721                                         moves = bindb.move_slot_ent(update_cmd)
8722                                         if moves:
8723                                                 writemsg_stdout(moves * "S")
8724
8725                 # The above global updates proceed quickly, so they
8726                 # are considered a single mtimedb transaction.
8727                 if len(timestamps) > 0:
8728                         # We do not update the mtime in the mtimedb
8729                         # until after _all_ of the above updates have
8730                         # been processed because the mtimedb will
8731                         # automatically commit when killed by ctrl C.
8732                         for mykey, mtime in timestamps.items():
8733                                 prev_mtimes[mykey] = mtime
8734
8735                 # We gotta do the brute force updates for these now.
8736                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8737                 "fixpackages" in mysettings.features:
8738                         def onUpdate(maxval, curval):
8739                                 if curval > 0:
8740                                         writemsg_stdout("#")
8741                         vardb.update_ents(myupd, onUpdate=onUpdate)
8742                         if bindb:
8743                                 def onUpdate(maxval, curval):
8744                                         if curval > 0:
8745                                                 writemsg_stdout("*")
8746                                 bindb.update_ents(myupd, onUpdate=onUpdate)
8747                 else:
8748                         do_upgrade_packagesmessage = 1
8749
8750                 # Update progress above is indicated by characters written to stdout so
8751                 # we print a couple new lines here to separate the progress output from
8752                 # what follows.
8753                 print()
8754                 print()
8755
8756                 if do_upgrade_packagesmessage and bindb and \
8757                         bindb.cpv_all():
8758                         writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
8759                         writemsg_stdout(bold(_("Note: This can take a very long time.")))
8760                         writemsg_stdout("\n")
8761         if myupd:
8762                 return myupd
8763
8764 #continue setting up other trees
8765
8766 class MtimeDB(dict):
8767         def __init__(self, filename):
8768                 dict.__init__(self)
8769                 self.filename = filename
8770                 self._load(filename)
8771
8772         def _load(self, filename):
8773                 try:
8774                         f = open(_unicode_encode(filename), 'rb')
8775                         mypickle = pickle.Unpickler(f)
8776                         try:
8777                                 mypickle.find_global = None
8778                         except AttributeError:
8779                                 # TODO: If py3k, override Unpickler.find_class().
8780                                 pass
8781                         d = mypickle.load()
8782                         f.close()
8783                         del f
8784                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
8785                         if isinstance(e, pickle.UnpicklingError):
8786                                 writemsg(_("!!! Error loading '%s': %s\n") % \
8787                                         (filename, str(e)), noiselevel=-1)
8788                         del e
8789                         d = {}
8790
8791                 if "old" in d:
8792                         d["updates"] = d["old"]
8793                         del d["old"]
8794                 if "cur" in d:
8795                         del d["cur"]
8796
8797                 d.setdefault("starttime", 0)
8798                 d.setdefault("version", "")
8799                 for k in ("info", "ldpath", "updates"):
8800                         d.setdefault(k, {})
8801
8802                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
8803                         "starttime", "updates", "version"))
8804
8805                 for k in list(d):
8806                         if k not in mtimedbkeys:
8807                                 writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
8808                                 del d[k]
8809                 self.update(d)
8810                 self._clean_data = copy.deepcopy(d)
8811
8812         def commit(self):
8813                 if not self.filename:
8814                         return
8815                 d = {}
8816                 d.update(self)
8817                 # Only commit if the internal state has changed.
8818                 if d != self._clean_data:
8819                         commit_mtimedb(mydict=d, filename=self.filename)
8820                         self._clean_data = copy.deepcopy(d)
8821
8822 def create_trees(config_root=None, target_root=None, trees=None):
8823         if trees is None:
8824                 trees = {}
8825         else:
8826                 # clean up any existing portdbapi instances
8827                 for myroot in trees:
8828                         portdb = trees[myroot]["porttree"].dbapi
8829                         portdb.close_caches()
8830                         portdbapi.portdbapi_instances.remove(portdb)
8831                         del trees[myroot]["porttree"], myroot, portdb
8832
8833         settings = config(config_root=config_root, target_root=target_root,
8834                 config_incrementals=portage.const.INCREMENTALS)
8835         settings.lock()
8836
8837         myroots = [(settings["ROOT"], settings)]
8838         if settings["ROOT"] != "/":
8839
8840                 # When ROOT != "/" we only want overrides from the calling
8841                 # environment to apply to the config that's associated
8842                 # with ROOT != "/", so pass an empty dict for the env parameter.
8843                 settings = config(config_root=None, target_root="/", env={})
8844                 settings.lock()
8845                 myroots.append((settings["ROOT"], settings))
8846
8847         for myroot, mysettings in myroots:
8848                 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
8849                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
8850                 trees[myroot].addLazySingleton(
8851                         "vartree", vartree, myroot, categories=mysettings.categories,
8852                                 settings=mysettings)
8853                 trees[myroot].addLazySingleton("porttree",
8854                         portagetree, myroot, settings=mysettings)
8855                 trees[myroot].addLazySingleton("bintree",
8856                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
8857         return trees
8858
8859 class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
8860         """
8861         Instances of these serve as proxies to global variables
8862         that are initialized on demand.
8863         """
8864
8865         __slots__ = ('_name',)
8866
8867         def __init__(self, name):
8868                 proxy.objectproxy.ObjectProxy.__init__(self)
8869                 object.__setattr__(self, '_name', name)
8870
8871         def _get_target(self):
8872                 init_legacy_globals()
8873                 name = object.__getattribute__(self, '_name')
8874                 return globals()[name]
8875
8876 class _PortdbProxy(proxy.objectproxy.ObjectProxy):
8877         """
8878         The portdb is initialized separately from the rest
8879         of the variables, since sometimes the other variables
8880         are needed while the portdb is not.
8881         """
8882
8883         __slots__ = ()
8884
8885         def _get_target(self):
8886                 init_legacy_globals()
8887                 global db, portdb, root, _portdb_initialized
8888                 if not _portdb_initialized:
8889                         portdb = db[root]["porttree"].dbapi
8890                         _portdb_initialized = True
8891                 return portdb
8892
8893 class _MtimedbProxy(proxy.objectproxy.ObjectProxy):
8894         """
8895         The mtimedb is independent from the portdb and other globals.
8896         """
8897
8898         __slots__ = ('_name',)
8899
8900         def __init__(self, name):
8901                 proxy.objectproxy.ObjectProxy.__init__(self)
8902                 object.__setattr__(self, '_name', name)
8903
8904         def _get_target(self):
8905                 global mtimedb, mtimedbfile, _mtimedb_initialized
8906                 if not _mtimedb_initialized:
8907                         mtimedbfile = os.path.join(os.path.sep,
8908                                 CACHE_PATH, "mtimedb")
8909                         mtimedb = MtimeDB(mtimedbfile)
8910                         _mtimedb_initialized = True
8911                 name = object.__getattribute__(self, '_name')
8912                 return globals()[name]
8913
8914 _legacy_global_var_names = ("archlist", "db", "features",
8915         "groups", "mtimedb", "mtimedbfile", "pkglines",
8916         "portdb", "profiledir", "root", "selinux_enabled",
8917         "settings", "thirdpartymirrors", "usedefaults")
8918
8919 def _disable_legacy_globals():
8920         """
8921         This deletes the ObjectProxy instances that are used
8922         for lazy initialization of legacy global variables.
8923         The purpose of deleting them is to prevent new code
8924         from referencing these deprecated variables.
8925         """
8926         global _legacy_global_var_names
8927         for k in _legacy_global_var_names:
8928                 globals().pop(k, None)
8929
8930 # Initialization of legacy globals.  No functions/classes below this point
8931 # please!  When the above functions and classes become independent of the
8932 # below global variables, it will be possible to make the below code
8933 # conditional on a backward compatibility flag (backward compatibility could
8934 # be disabled via an environment variable, for example).  This will enable new
8935 # code that is aware of this flag to import portage without the unnecessary
8936 # overhead (and other issues!) of initializing the legacy globals.
8937
8938 def init_legacy_globals():
8939         global _globals_initialized
8940         if _globals_initialized:
8941                 return
8942         _globals_initialized = True
8943
8944         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
8945         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
8946         profiledir, flushmtimedb
8947
8948         # Portage needs to ensure a sane umask for the files it creates.
8949         os.umask(0o22)
8950
8951         kwargs = {}
8952         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8953                 kwargs[k] = os.environ.get(envvar, "/")
8954
8955         global _initializing_globals
8956         _initializing_globals = True
8957         db = create_trees(**kwargs)
8958         del _initializing_globals
8959
8960         settings = db["/"]["vartree"].settings
8961
8962         for myroot in db:
8963                 if myroot != "/":
8964                         settings = db[myroot]["vartree"].settings
8965                         break
8966
8967         root = settings["ROOT"]
8968         output._init(config_root=settings['PORTAGE_CONFIGROOT'])
8969
8970         # ========================================================================
8971         # COMPATIBILITY
8972         # These attributes should not be used
8973         # within Portage under any circumstances.
8974         # ========================================================================
8975         archlist    = settings.archlist()
8976         features    = settings.features
8977         groups      = settings["ACCEPT_KEYWORDS"].split()
8978         pkglines    = settings.packages
8979         selinux_enabled   = settings.selinux_enabled()
8980         thirdpartymirrors = settings.thirdpartymirrors()
8981         usedefaults       = settings.use_defs
8982         profiledir  = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
8983         if not os.path.isdir(profiledir):
8984                 profiledir = None
8985         def flushmtimedb(record):
8986                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
8987         # ========================================================================
8988         # COMPATIBILITY
8989         # These attributes should not be used
8990         # within Portage under any circumstances.
8991         # ========================================================================
8992
8993 if True:
8994
8995         _mtimedb_initialized = False
8996         mtimedb     = _MtimedbProxy("mtimedb")
8997         mtimedbfile = _MtimedbProxy("mtimedbfile")
8998
8999         _portdb_initialized  = False
9000         portdb = _PortdbProxy()
9001
9002         _globals_initialized = False
9003
9004         for k in ("db", "settings", "root", "selinux_enabled",
9005                 "archlist", "features", "groups",
9006                 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
9007                 "flushmtimedb"):
9008                 globals()[k] = _LegacyGlobalProxy(k)
9009
9010 # Clear the cache
9011 dircache={}
9012
9013 # ============================================================================
9014 # ============================================================================
9015