3beb618fc751cdb52167478185dc36f7da9424ae
[portage.git] / pym / portage / __init__.py
1 # portage.py -- core Portage functionality
2 # Copyright 1998-2009 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 from __future__ import print_function
7
8 VERSION="$Rev$"[6:-2] + "-svn"
9
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
13
14 try:
15         import sys
16         import codecs
17         import copy
18         import errno
19         if not hasattr(errno, 'ESTALE'):
20                 # ESTALE may not be defined on some systems, such as interix.
21                 errno.ESTALE = -1
22         import logging
23         import re
24         import time
25         import types
26         try:
27                 import cPickle as pickle
28         except ImportError:
29                 import pickle
30
31         import stat
32         try:
33                 from subprocess import getstatusoutput as subprocess_getstatusoutput
34         except ImportError:
35                 from commands import getstatusoutput as subprocess_getstatusoutput
36         from time import sleep
37         from random import shuffle
38         from itertools import chain
39         import platform
40         import warnings
41
42         # Temporarily delete these imports, to ensure that only the
43         # wrapped versions are imported by portage internals.
44         import os
45         del os
46         import shutil
47         del shutil
48
49 except ImportError as e:
50         sys.stderr.write("\n\n")
51         sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
52         sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53         sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
54
55         sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56         sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
57         sys.stderr.write("    "+str(e)+"\n\n");
58         raise
59
60 try:
61         from portage.cache.cache_errors import CacheError
62         import portage.proxy.lazyimport
63         import portage.proxy as proxy
64         proxy.lazyimport.lazyimport(globals(),
65                 'portage.checksum',
66                 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
67                 'portage.cvstree',
68                 'portage.data',
69                 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
70                         'uid,userland,userpriv_groups,wheelgid',
71                 'portage.dep',
72                 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
73                         'get_operator,isjustname,isspecific,isvalidatom,' + \
74                         'match_from_list,match_to_list',
75                 'portage.eclass_cache',
76                 'portage.env.loaders',
77                 'portage.exception',
78                 'portage.getbinpkg',
79                 'portage.locks',
80                 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
81                 'portage.mail',
82                 'portage.output',
83                 'portage.output:bold,colorize',
84                 'portage.process',
85                 'portage.process:atexit_register,run_exitfuncs',
86                 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
87                         'parse_updates,update_config_files,update_dbentries,' + \
88                         'update_dbentry',
89                 'portage.util',
90                 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
91                         'apply_recursive_permissions,dump_traceback,getconfig,' + \
92                         'grabdict,grabdict_package,grabfile,grabfile_package,' + \
93                         'map_dictlist_vals,new_protect_filename,normalize_path,' + \
94                         'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
95                         'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
96                         'writemsg_stdout,write_atomic',
97                 'portage.versions',
98                 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \
99                         'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
100                 'portage.xpak',
101         )
102
103         import portage.const
104         from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
105                 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
106                 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
107                 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
108                 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
109                 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
110                 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
111                 INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
112
113         from portage.localization import _
114
115 except ImportError as e:
116         sys.stderr.write("\n\n")
117         sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118         sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119         sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120         sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121         sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122         sys.stderr.write("!!! a recovery of portage.\n")
123         sys.stderr.write("    "+str(e)+"\n\n")
124         raise
125
126 if sys.hexversion >= 0x3000000:
127         basestring = str
128         long = int
129
130 # Assume utf_8 fs encoding everywhere except in merge code, where the
131 # user's locale is respected.
132 _encodings = {
133         'content'                : 'utf_8',
134         'fs'                     : 'utf_8',
135         'merge'                  : sys.getfilesystemencoding(),
136         'repo.content'           : 'utf_8',
137         'stdio'                  : 'utf_8',
138 }
139
140 # This can happen if python is built with USE=build (stage 1).
141 if _encodings['merge'] is None:
142         _encodings['merge'] = 'ascii'
143
144 if sys.hexversion >= 0x3000000:
145         def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
146                 if isinstance(s, str):
147                         s = s.encode(encoding, errors)
148                 return s
149
150         def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
151                 if isinstance(s, bytes):
152                         s = str(s, encoding=encoding, errors=errors)
153                 return s
154 else:
155         def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
156                 if isinstance(s, unicode):
157                         s = s.encode(encoding, errors)
158                 return s
159
160         def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
161                 if isinstance(s, bytes):
162                         s = unicode(s, encoding=encoding, errors=errors)
163                 return s
164
165 class _unicode_func_wrapper(object):
166         """
167         Wraps a function, converts arguments from unicode to bytes,
168         and return values to unicode from bytes. Function calls
169         will raise UnicodeEncodeError if an argument fails to be
170         encoded with the required encoding. Return values that
171         are single strings are decoded with errors='replace'. Return 
172         values that are lists of strings are decoded with errors='strict'
173         and elements that fail to be decoded are omitted from the returned
174         list.
175         """
176         __slots__ = ('_func', '_encoding')
177
178         def __init__(self, func, encoding=_encodings['fs']):
179                 self._func = func
180                 self._encoding = encoding
181
182         def __call__(self, *args, **kwargs):
183
184                 encoding = self._encoding
185                 wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
186                         for x in args]
187                 if kwargs:
188                         wrapped_kwargs = dict(
189                                 (k, _unicode_encode(v, encoding=encoding, errors='strict'))
190                                 for k, v in kwargs.items())
191                 else:
192                         wrapped_kwargs = {}
193
194                 rval = self._func(*wrapped_args, **wrapped_kwargs)
195
196                 if isinstance(rval, (list, tuple)):
197                         decoded_rval = []
198                         for x in rval:
199                                 try:
200                                         x = _unicode_decode(x, encoding=encoding, errors='strict')
201                                 except UnicodeDecodeError:
202                                         pass
203                                 else:
204                                         decoded_rval.append(x)
205
206                         if isinstance(rval, tuple):
207                                 rval = tuple(decoded_rval)
208                         else:
209                                 rval = decoded_rval
210                 else:
211                         rval = _unicode_decode(rval, encoding=encoding, errors='replace')
212
213                 return rval
214
215 class _unicode_module_wrapper(object):
216         """
217         Wraps a module and wraps all functions with _unicode_func_wrapper.
218         """
219         __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
220
221         def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
222                 object.__setattr__(self, '_mod', mod)
223                 object.__setattr__(self, '_encoding', encoding)
224                 object.__setattr__(self, '_overrides', overrides)
225                 if cache:
226                         cache = {}
227                 else:
228                         cache = None
229                 object.__setattr__(self, '_cache', cache)
230
231         def __getattribute__(self, attr):
232                 cache = object.__getattribute__(self, '_cache')
233                 if cache is not None:
234                         result = cache.get(attr)
235                         if result is not None:
236                                 return result
237                 result = getattr(object.__getattribute__(self, '_mod'), attr)
238                 encoding = object.__getattribute__(self, '_encoding')
239                 overrides = object.__getattribute__(self, '_overrides')
240                 override = None
241                 if overrides is not None:
242                         override = overrides.get(id(result))
243                 if override is not None:
244                         result = override
245                 elif isinstance(result, type):
246                         pass
247                 elif type(result) is types.ModuleType:
248                         result = _unicode_module_wrapper(result,
249                                 encoding=encoding, overrides=overrides)
250                 elif hasattr(result, '__call__'):
251                         result = _unicode_func_wrapper(result, encoding=encoding)
252                 if cache is not None:
253                         cache[attr] = result
254                 return result
255
256 import os as _os
257 _os_overrides = {
258         id(_os.fdopen)        : _os.fdopen,
259         id(_os.popen)         : _os.popen,
260         id(_os.read)          : _os.read,
261         id(_os.system)        : _os.system,
262 }
263
264 if hasattr(_os, 'statvfs'):
265         _os_overrides[id(_os.statvfs)] = _os.statvfs
266
267 os = _unicode_module_wrapper(_os, overrides=_os_overrides,
268         encoding=_encodings['fs'])
269 _os_merge = _unicode_module_wrapper(_os,
270         encoding=_encodings['merge'], overrides=_os_overrides)
271
272 import shutil as _shutil
273 shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
274
275 # Imports below this point rely on the above unicode wrapper definitions.
276 _selinux = None
277 selinux = None
278 _selinux_merge = None
279 try:
280         import portage._selinux
281         selinux = _unicode_module_wrapper(_selinux,
282                 encoding=_encodings['fs'])
283         _selinux_merge = _unicode_module_wrapper(_selinux,
284                 encoding=_encodings['merge'])
285 except OSError as e:
286         sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
287         del e
288 except ImportError:
289         pass
290
291 from portage.manifest import Manifest
292
293 # ===========================================================================
294 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
295 # ===========================================================================
296
297 def _gen_missing_encodings(missing_encodings):
298
299         encodings = {}
300
301         if 'ascii' in missing_encodings:
302
303                 class AsciiIncrementalEncoder(codecs.IncrementalEncoder):
304                         def encode(self, input, final=False):
305                                 return codecs.ascii_encode(input, self.errors)[0]
306
307                 class AsciiIncrementalDecoder(codecs.IncrementalDecoder):
308                         def decode(self, input, final=False):
309                                 return codecs.ascii_decode(input, self.errors)[0]
310
311                 class AsciiStreamWriter(codecs.StreamWriter):
312                         encode = codecs.ascii_encode
313
314                 class AsciiStreamReader(codecs.StreamReader):
315                         decode = codecs.ascii_decode
316
317                 codec_info =  codecs.CodecInfo(
318                         name='ascii',
319                         encode=codecs.ascii_encode,
320                         decode=codecs.ascii_decode,
321                         incrementalencoder=AsciiIncrementalEncoder,
322                         incrementaldecoder=AsciiIncrementalDecoder,
323                         streamwriter=AsciiStreamWriter,
324                         streamreader=AsciiStreamReader,
325                 )
326
327                 for alias in ('ascii', '646', 'ansi_x3.4_1968', 'ansi_x3_4_1968',
328                         'ansi_x3.4_1986', 'cp367', 'csascii', 'ibm367', 'iso646_us',
329                         'iso_646.irv_1991', 'iso_ir_6', 'us', 'us_ascii'):
330                         encodings[alias] = codec_info
331
332         if 'utf_8' in missing_encodings:
333
334                 def utf8decode(input, errors='strict'):
335                         return codecs.utf_8_decode(input, errors, True)
336
337                 class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
338                         def encode(self, input, final=False):
339                                 return codecs.utf_8_encode(input, self.errors)[0]
340
341                 class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
342                         _buffer_decode = codecs.utf_8_decode
343
344                 class Utf8StreamWriter(codecs.StreamWriter):
345                         encode = codecs.utf_8_encode
346
347                 class Utf8StreamReader(codecs.StreamReader):
348                         decode = codecs.utf_8_decode
349
350                 codec_info = codecs.CodecInfo(
351                         name='utf-8',
352                         encode=codecs.utf_8_encode,
353                         decode=utf8decode,
354                         incrementalencoder=Utf8IncrementalEncoder,
355                         incrementaldecoder=Utf8IncrementalDecoder,
356                         streamreader=Utf8StreamReader,
357                         streamwriter=Utf8StreamWriter,
358                 )
359
360                 for alias in ('utf_8', 'u8', 'utf', 'utf8', 'utf8_ucs2', 'utf8_ucs4'):
361                         encodings[alias] = codec_info
362
363         return encodings
364
365 def _ensure_default_encoding():
366         """
367         The python that's inside stage 1 or 2 is built with a minimal
368         configuration which does not include the /usr/lib/pythonX.Y/encodings
369         directory. This results in error like the following:
370
371           LookupError: no codec search functions registered: can't find encoding
372
373         In order to solve this problem, detect it early and manually register
374         a search function for the ascii and utf_8 codecs. Starting with python-3.0
375         this problem is more noticeable because of stricter handling of encoding
376         and decoding between strings of characters and bytes.
377         """
378
379         default_fallback = 'utf_8'
380         default_encoding = sys.getdefaultencoding().lower().replace('-', '_')
381         filesystem_encoding = _encodings['merge'].lower().replace('-', '_')
382         required_encodings = set(['ascii', 'utf_8'])
383         required_encodings.add(default_encoding)
384         required_encodings.add(filesystem_encoding)
385         missing_encodings = set()
386         for codec_name in required_encodings:
387                 try:
388                         codecs.lookup(codec_name)
389                 except LookupError:
390                         missing_encodings.add(codec_name)
391
392         if not missing_encodings:
393                 return
394
395         encodings = _gen_missing_encodings(missing_encodings)
396
397         if default_encoding in missing_encodings and \
398                 default_encoding not in encodings:
399                 # Make the fallback codec correspond to whatever name happens
400                 # to be returned by sys.getfilesystemencoding().
401
402                 try:
403                         encodings[default_encoding] = codecs.lookup(default_fallback)
404                 except LookupError:
405                         encodings[default_encoding] = encodings[default_fallback]
406
407         if filesystem_encoding in missing_encodings and \
408                 filesystem_encoding not in encodings:
409                 # Make the fallback codec correspond to whatever name happens
410                 # to be returned by sys.getdefaultencoding().
411
412                 try:
413                         encodings[filesystem_encoding] = codecs.lookup(default_fallback)
414                 except LookupError:
415                         encodings[filesystem_encoding] = encodings[default_fallback]
416
417         def search_function(name):
418                 name = name.lower()
419                 name = name.replace('-', '_')
420                 codec_info = encodings.get(name)
421                 if codec_info is not None:
422                         return codecs.CodecInfo(
423                                 name=codec_info.name,
424                                 encode=codec_info.encode,
425                                 decode=codec_info.decode,
426                                 incrementalencoder=codec_info.incrementalencoder,
427                                 incrementaldecoder=codec_info.incrementaldecoder,
428                                 streamreader=codec_info.streamreader,
429                                 streamwriter=codec_info.streamwriter,
430                         )
431                 return None
432
433         codecs.register(search_function)
434
435         del codec_name, default_encoding, default_fallback, \
436                 filesystem_encoding, missing_encodings, \
437                 required_encodings, search_function
438
439 # Do this ASAP since writemsg() might not work without it.
440 _ensure_default_encoding()
441
442 def _shell_quote(s):
443         """
444         Quote a string in double-quotes and use backslashes to
445         escape any backslashes, double-quotes, dollar signs, or
446         backquotes in the string.
447         """
448         for letter in "\\\"$`":
449                 if letter in s:
450                         s = s.replace(letter, "\\" + letter)
451         return "\"%s\"" % s
452
453 bsd_chflags = None
454
455 if platform.system() in ('FreeBSD',):
456
457         class bsd_chflags(object):
458
459                 @classmethod
460                 def chflags(cls, path, flags, opts=""):
461                         cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
462                         status, output = subprocess_getstatusoutput(cmd)
463                         if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
464                                 return
465                         # Try to generate an ENOENT error if appropriate.
466                         if 'h' in opts:
467                                 _os_merge.lstat(path)
468                         else:
469                                 _os_merge.stat(path)
470                         # Make sure the binary exists.
471                         if not portage.process.find_binary('chflags'):
472                                 raise portage.exception.CommandNotFound('chflags')
473                         # Now we're not sure exactly why it failed or what
474                         # the real errno was, so just report EPERM.
475                         e = OSError(errno.EPERM, output)
476                         e.errno = errno.EPERM
477                         e.filename = path
478                         e.message = output
479                         raise e
480
481                 @classmethod
482                 def lchflags(cls, path, flags):
483                         return cls.chflags(path, flags, opts='-h')
484
485 def load_mod(name):
486         modname = ".".join(name.split(".")[:-1])
487         mod = __import__(modname)
488         components = name.split('.')
489         for comp in components[1:]:
490                 mod = getattr(mod, comp)
491         return mod
492
493 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
494         for x in key_order:
495                 if x in top_dict and key in top_dict[x]:
496                         if FullCopy:
497                                 return copy.deepcopy(top_dict[x][key])
498                         else:
499                                 return top_dict[x][key]
500         if EmptyOnError:
501                 return ""
502         else:
503                 raise KeyError("Key not found in list; '%s'" % key)
504
505 def getcwd():
506         "this fixes situations where the current directory doesn't exist"
507         try:
508                 return os.getcwd()
509         except OSError: #dir doesn't exist
510                 os.chdir("/")
511                 return "/"
512 getcwd()
513
514 def abssymlink(symlink):
515         "This reads symlinks, resolving the relative symlinks, and returning the absolute."
516         mylink=os.readlink(symlink)
517         if mylink[0] != '/':
518                 mydir=os.path.dirname(symlink)
519                 mylink=mydir+"/"+mylink
520         return os.path.normpath(mylink)
521
522 dircache = {}
523 cacheHit=0
524 cacheMiss=0
525 cacheStale=0
526 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
527         global cacheHit,cacheMiss,cacheStale
528         mypath = normalize_path(my_original_path)
529         if mypath in dircache:
530                 cacheHit += 1
531                 cached_mtime, list, ftype = dircache[mypath]
532         else:
533                 cacheMiss += 1
534                 cached_mtime, list, ftype = -1, [], []
535         try:
536                 pathstat = os.stat(mypath)
537                 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
538                         mtime = pathstat.st_mtime
539                 else:
540                         raise portage.exception.DirectoryNotFound(mypath)
541         except EnvironmentError as e:
542                 if e.errno == portage.exception.PermissionDenied.errno:
543                         raise portage.exception.PermissionDenied(mypath)
544                 del e
545                 if EmptyOnError:
546                         return [], []
547                 return None, None
548         except portage.exception.PortageException:
549                 if EmptyOnError:
550                         return [], []
551                 return None, None
552         # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
553         if mtime != cached_mtime or time.time() - mtime < 4:
554                 if mypath in dircache:
555                         cacheStale += 1
556                 try:
557                         list = os.listdir(mypath)
558                 except EnvironmentError as e:
559                         if e.errno != errno.EACCES:
560                                 raise
561                         del e
562                         raise portage.exception.PermissionDenied(mypath)
563                 ftype = []
564                 for x in list:
565                         try:
566                                 if followSymlinks:
567                                         pathstat = os.stat(mypath+"/"+x)
568                                 else:
569                                         pathstat = os.lstat(mypath+"/"+x)
570
571                                 if stat.S_ISREG(pathstat[stat.ST_MODE]):
572                                         ftype.append(0)
573                                 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
574                                         ftype.append(1)
575                                 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
576                                         ftype.append(2)
577                                 else:
578                                         ftype.append(3)
579                         except (IOError, OSError):
580                                 ftype.append(3)
581                 dircache[mypath] = mtime, list, ftype
582
583         ret_list = []
584         ret_ftype = []
585         for x in range(0, len(list)):
586                 if list[x] in ignorelist:
587                         pass
588                 elif ignorecvs:
589                         if list[x][:2] != ".#":
590                                 ret_list.append(list[x])
591                                 ret_ftype.append(ftype[x])
592                 else:
593                         ret_list.append(list[x])
594                         ret_ftype.append(ftype[x])
595
596         writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
597         return ret_list, ret_ftype
598
599 _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git')
600
601 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
602         EmptyOnError=False, dirsonly=False):
603         """
604         Portage-specific implementation of os.listdir
605
606         @param mypath: Path whose contents you wish to list
607         @type mypath: String
608         @param recursive: Recursively scan directories contained within mypath
609         @type recursive: Boolean
610         @param filesonly; Only return files, not more directories
611         @type filesonly: Boolean
612         @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
613         @type ignorecvs: Boolean
614         @param ignorelist: List of filenames/directories to exclude
615         @type ignorelist: List
616         @param followSymlinks: Follow Symlink'd files and directories
617         @type followSymlinks: Boolean
618         @param EmptyOnError: Return [] if an error occurs.
619         @type EmptyOnError: Boolean
620         @param dirsonly: Only return directories.
621         @type dirsonly: Boolean
622         @rtype: List
623         @returns: A list of files and directories (or just files or just directories) or an empty list.
624         """
625
626         list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
627
628         if list is None:
629                 list=[]
630         if ftype is None:
631                 ftype=[]
632
633         if not (filesonly or dirsonly or recursive):
634                 return list
635
636         if recursive:
637                 x=0
638                 while x<len(ftype):
639                         if ftype[x] == 1 and not \
640                                 (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs):
641                                 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
642                                         followSymlinks)
643
644                                 l=l[:]
645                                 for y in range(0,len(l)):
646                                         l[y]=list[x]+"/"+l[y]
647                                 list=list+l
648                                 ftype=ftype+f
649                         x+=1
650         if filesonly:
651                 rlist=[]
652                 for x in range(0,len(ftype)):
653                         if ftype[x]==0:
654                                 rlist=rlist+[list[x]]
655         elif dirsonly:
656                 rlist = []
657                 for x in range(0, len(ftype)):
658                         if ftype[x] == 1:
659                                 rlist = rlist + [list[x]]       
660         else:
661                 rlist=list
662
663         return rlist
664
665 def flatten(mytokens):
666         """this function now turns a [1,[2,3]] list into
667         a [1,2,3] list and returns it."""
668         newlist=[]
669         for x in mytokens:
670                 if isinstance(x, list):
671                         newlist.extend(flatten(x))
672                 else:
673                         newlist.append(x)
674         return newlist
675
676 #beautiful directed graph object
677
678 class digraph(object):
679         def __init__(self):
680                 """Create an empty digraph"""
681                 
682                 # { node : ( { child : priority } , { parent : priority } ) }
683                 self.nodes = {}
684                 self.order = []
685
686         def add(self, node, parent, priority=0):
687                 """Adds the specified node with the specified parent.
688                 
689                 If the dep is a soft-dep and the node already has a hard
690                 relationship to the parent, the relationship is left as hard."""
691                 
692                 if node not in self.nodes:
693                         self.nodes[node] = ({}, {}, node)
694                         self.order.append(node)
695                 
696                 if not parent:
697                         return
698                 
699                 if parent not in self.nodes:
700                         self.nodes[parent] = ({}, {}, parent)
701                         self.order.append(parent)
702
703                 priorities = self.nodes[node][1].get(parent)
704                 if priorities is None:
705                         priorities = []
706                         self.nodes[node][1][parent] = priorities
707                         self.nodes[parent][0][node] = priorities
708                 priorities.append(priority)
709                 priorities.sort()
710
711         def remove(self, node):
712                 """Removes the specified node from the digraph, also removing
713                 and ties to other nodes in the digraph. Raises KeyError if the
714                 node doesn't exist."""
715                 
716                 if node not in self.nodes:
717                         raise KeyError(node)
718                 
719                 for parent in self.nodes[node][1]:
720                         del self.nodes[parent][0][node]
721                 for child in self.nodes[node][0]:
722                         del self.nodes[child][1][node]
723                 
724                 del self.nodes[node]
725                 self.order.remove(node)
726
727         def difference_update(self, t):
728                 """
729                 Remove all given nodes from node_set. This is more efficient
730                 than multiple calls to the remove() method.
731                 """
732                 if isinstance(t, (list, tuple)) or \
733                         not hasattr(t, "__contains__"):
734                         t = frozenset(t)
735                 order = []
736                 for node in self.order:
737                         if node not in t:
738                                 order.append(node)
739                                 continue
740                         for parent in self.nodes[node][1]:
741                                 del self.nodes[parent][0][node]
742                         for child in self.nodes[node][0]:
743                                 del self.nodes[child][1][node]
744                         del self.nodes[node]
745                 self.order = order
746
747         def remove_edge(self, child, parent):
748                 """
749                 Remove edge in the direction from child to parent. Note that it is
750                 possible for a remaining edge to exist in the opposite direction.
751                 Any endpoint vertices that become isolated will remain in the graph.
752                 """
753
754                 # Nothing should be modified when a KeyError is raised.
755                 for k in parent, child:
756                         if k not in self.nodes:
757                                 raise KeyError(k)
758
759                 # Make sure the edge exists.
760                 if child not in self.nodes[parent][0]:
761                         raise KeyError(child)
762                 if parent not in self.nodes[child][1]:
763                         raise KeyError(parent)
764
765                 # Remove the edge.
766                 del self.nodes[child][1][parent]
767                 del self.nodes[parent][0][child]
768
769         def __iter__(self):
770                 return iter(self.order)
771
772         def contains(self, node):
773                 """Checks if the digraph contains mynode"""
774                 return node in self.nodes
775
776         def get(self, key, default=None):
777                 node_data = self.nodes.get(key, self)
778                 if node_data is self:
779                         return default
780                 return node_data[2]
781
782         def all_nodes(self):
783                 """Return a list of all nodes in the graph"""
784                 return self.order[:]
785
786         def child_nodes(self, node, ignore_priority=None):
787                 """Return all children of the specified node"""
788                 if ignore_priority is None:
789                         return list(self.nodes[node][0])
790                 children = []
791                 if hasattr(ignore_priority, '__call__'):
792                         for child, priorities in self.nodes[node][0].items():
793                                 for priority in priorities:
794                                         if not ignore_priority(priority):
795                                                 children.append(child)
796                                                 break
797                 else:
798                         for child, priorities in self.nodes[node][0].items():
799                                 if ignore_priority < priorities[-1]:
800                                         children.append(child)
801                 return children
802
803         def parent_nodes(self, node, ignore_priority=None):
804                 """Return all parents of the specified node"""
805                 if ignore_priority is None:
806                         return list(self.nodes[node][1])
807                 parents = []
808                 if hasattr(ignore_priority, '__call__'):
809                         for parent, priorities in self.nodes[node][1].items():
810                                 for priority in priorities:
811                                         if not ignore_priority(priority):
812                                                 parents.append(parent)
813                                                 break
814                 else:
815                         for parent, priorities in self.nodes[node][1].items():
816                                 if ignore_priority < priorities[-1]:
817                                         parents.append(parent)
818                 return parents
819
820         def leaf_nodes(self, ignore_priority=None):
821                 """Return all nodes that have no children
822                 
823                 If ignore_soft_deps is True, soft deps are not counted as
824                 children in calculations."""
825                 
826                 leaf_nodes = []
827                 if ignore_priority is None:
828                         for node in self.order:
829                                 if not self.nodes[node][0]:
830                                         leaf_nodes.append(node)
831                 elif hasattr(ignore_priority, '__call__'):
832                         for node in self.order:
833                                 is_leaf_node = True
834                                 for child, priorities in self.nodes[node][0].items():
835                                         for priority in priorities:
836                                                 if not ignore_priority(priority):
837                                                         is_leaf_node = False
838                                                         break
839                                         if not is_leaf_node:
840                                                 break
841                                 if is_leaf_node:
842                                         leaf_nodes.append(node)
843                 else:
844                         for node in self.order:
845                                 is_leaf_node = True
846                                 for child, priorities in self.nodes[node][0].items():
847                                         if ignore_priority < priorities[-1]:
848                                                 is_leaf_node = False
849                                                 break
850                                 if is_leaf_node:
851                                         leaf_nodes.append(node)
852                 return leaf_nodes
853
854         def root_nodes(self, ignore_priority=None):
855                 """Return all nodes that have no parents.
856                 
857                 If ignore_soft_deps is True, soft deps are not counted as
858                 parents in calculations."""
859                 
860                 root_nodes = []
861                 if ignore_priority is None:
862                         for node in self.order:
863                                 if not self.nodes[node][1]:
864                                         root_nodes.append(node)
865                 elif hasattr(ignore_priority, '__call__'):
866                         for node in self.order:
867                                 is_root_node = True
868                                 for parent, priorities in self.nodes[node][1].items():
869                                         for priority in priorities:
870                                                 if not ignore_priority(priority):
871                                                         is_root_node = False
872                                                         break
873                                         if not is_root_node:
874                                                 break
875                                 if is_root_node:
876                                         root_nodes.append(node)
877                 else:
878                         for node in self.order:
879                                 is_root_node = True
880                                 for parent, priorities in self.nodes[node][1].items():
881                                         if ignore_priority < priorities[-1]:
882                                                 is_root_node = False
883                                                 break
884                                 if is_root_node:
885                                         root_nodes.append(node)
886                 return root_nodes
887
888         def is_empty(self):
889                 """Checks if the digraph is empty"""
890                 return len(self.nodes) == 0
891
892         def clone(self):
893                 clone = digraph()
894                 clone.nodes = {}
895                 memo = {}
896                 for children, parents, node in self.nodes.values():
897                         children_clone = {}
898                         for child, priorities in children.items():
899                                 priorities_clone = memo.get(id(priorities))
900                                 if priorities_clone is None:
901                                         priorities_clone = priorities[:]
902                                         memo[id(priorities)] = priorities_clone
903                                 children_clone[child] = priorities_clone
904                         parents_clone = {}
905                         for parent, priorities in parents.items():
906                                 priorities_clone = memo.get(id(priorities))
907                                 if priorities_clone is None:
908                                         priorities_clone = priorities[:]
909                                         memo[id(priorities)] = priorities_clone
910                                 parents_clone[parent] = priorities_clone
911                         clone.nodes[node] = (children_clone, parents_clone, node)
912                 clone.order = self.order[:]
913                 return clone
914
915         # Backward compatibility
916         addnode = add
917         allnodes = all_nodes
918         allzeros = leaf_nodes
919         hasnode = contains
920         __contains__ = contains
921         empty = is_empty
922         copy = clone
923
924         def delnode(self, node):
925                 try:
926                         self.remove(node)
927                 except KeyError:
928                         pass
929
930         def firstzero(self):
931                 leaf_nodes = self.leaf_nodes()
932                 if leaf_nodes:
933                         return leaf_nodes[0]
934                 return None
935
936         def hasallzeros(self, ignore_priority=None):
937                 return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
938                         len(self.order)
939
940         def debug_print(self):
941                 def output(s):
942                         writemsg(s, noiselevel=-1)
943                 for node in self.nodes:
944                         output("%s " % (node,))
945                         if self.nodes[node][0]:
946                                 output("depends on\n")
947                         else:
948                                 output("(no children)\n")
949                         for child, priorities in self.nodes[node][0].items():
950                                 output("  %s (%s)\n" % (child, priorities[-1],))
951
952 #parse /etc/env.d and generate /etc/profile.env
953
954 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
955         env=None, writemsg_level=None):
956         if writemsg_level is None:
957                 writemsg_level = portage.util.writemsg_level
958         if target_root is None:
959                 global settings
960                 target_root = settings["ROOT"]
961         if prev_mtimes is None:
962                 global mtimedb
963                 prev_mtimes = mtimedb["ldpath"]
964         if env is None:
965                 env = os.environ
966         envd_dir = os.path.join(target_root, "etc", "env.d")
967         portage.util.ensure_dirs(envd_dir, mode=0o755)
968         fns = listdir(envd_dir, EmptyOnError=1)
969         fns.sort()
970         templist = []
971         for x in fns:
972                 if len(x) < 3:
973                         continue
974                 if not x[0].isdigit() or not x[1].isdigit():
975                         continue
976                 if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
977                         continue
978                 templist.append(x)
979         fns = templist
980         del templist
981
982         space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
983         colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
984                 "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
985                   "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
986                   "PYTHONPATH", "ROOTPATH"])
987
988         config_list = []
989
990         for x in fns:
991                 file_path = os.path.join(envd_dir, x)
992                 try:
993                         myconfig = getconfig(file_path, expand=False)
994                 except portage.exception.ParseError as e:
995                         writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
996                         del e
997                         continue
998                 if myconfig is None:
999                         # broken symlink or file removed by a concurrent process
1000                         writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
1001                         continue
1002
1003                 config_list.append(myconfig)
1004                 if "SPACE_SEPARATED" in myconfig:
1005                         space_separated.update(myconfig["SPACE_SEPARATED"].split())
1006                         del myconfig["SPACE_SEPARATED"]
1007                 if "COLON_SEPARATED" in myconfig:
1008                         colon_separated.update(myconfig["COLON_SEPARATED"].split())
1009                         del myconfig["COLON_SEPARATED"]
1010
1011         env = {}
1012         specials = {}
1013         for var in space_separated:
1014                 mylist = []
1015                 for myconfig in config_list:
1016                         if var in myconfig:
1017                                 for item in myconfig[var].split():
1018                                         if item and not item in mylist:
1019                                                 mylist.append(item)
1020                                 del myconfig[var] # prepare for env.update(myconfig)
1021                 if mylist:
1022                         env[var] = " ".join(mylist)
1023                 specials[var] = mylist
1024
1025         for var in colon_separated:
1026                 mylist = []
1027                 for myconfig in config_list:
1028                         if var in myconfig:
1029                                 for item in myconfig[var].split(":"):
1030                                         if item and not item in mylist:
1031                                                 mylist.append(item)
1032                                 del myconfig[var] # prepare for env.update(myconfig)
1033                 if mylist:
1034                         env[var] = ":".join(mylist)
1035                 specials[var] = mylist
1036
1037         for myconfig in config_list:
1038                 """Cumulative variables have already been deleted from myconfig so that
1039                 they won't be overwritten by this dict.update call."""
1040                 env.update(myconfig)
1041
1042         ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
1043         try:
1044                 myld = codecs.open(_unicode_encode(ldsoconf_path,
1045                         encoding=_encodings['fs'], errors='strict'),
1046                         mode='r', encoding=_encodings['content'], errors='replace')
1047                 myldlines=myld.readlines()
1048                 myld.close()
1049                 oldld=[]
1050                 for x in myldlines:
1051                         #each line has at least one char (a newline)
1052                         if x[0]=="#":
1053                                 continue
1054                         oldld.append(x[:-1])
1055         except (IOError, OSError) as e:
1056                 if e.errno != errno.ENOENT:
1057                         raise
1058                 oldld = None
1059
1060         ld_cache_update=False
1061
1062         newld = specials["LDPATH"]
1063         if (oldld!=newld):
1064                 #ld.so.conf needs updating and ldconfig needs to be run
1065                 myfd = atomic_ofstream(ldsoconf_path)
1066                 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
1067                 myfd.write("# contents of /etc/env.d directory\n")
1068                 for x in specials["LDPATH"]:
1069                         myfd.write(x+"\n")
1070                 myfd.close()
1071                 ld_cache_update=True
1072
1073         # Update prelink.conf if we are prelink-enabled
1074         if prelink_capable:
1075                 newprelink = atomic_ofstream(
1076                         os.path.join(target_root, "etc", "prelink.conf"))
1077                 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
1078                 newprelink.write("# contents of /etc/env.d directory\n")
1079
1080                 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
1081                         newprelink.write("-l "+x+"\n");
1082                 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
1083                         if not x:
1084                                 continue
1085                         if x[-1]!='/':
1086                                 x=x+"/"
1087                         plmasked=0
1088                         for y in specials["PRELINK_PATH_MASK"]:
1089                                 if not y:
1090                                         continue
1091                                 if y[-1]!='/':
1092                                         y=y+"/"
1093                                 if y==x[0:len(y)]:
1094                                         plmasked=1
1095                                         break
1096                         if not plmasked:
1097                                 newprelink.write("-h "+x+"\n")
1098                 for x in specials["PRELINK_PATH_MASK"]:
1099                         newprelink.write("-b "+x+"\n")
1100                 newprelink.close()
1101
1102         # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
1103         # granularity is possible.  In order to avoid the potential ambiguity of
1104         # mtimes that differ by less than 1 second, sleep here if any of the
1105         # directories have been modified during the current second.
1106         sleep_for_mtime_granularity = False
1107         current_time = long(time.time())
1108         mtime_changed = False
1109         lib_dirs = set()
1110         for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
1111                 x = os.path.join(target_root, lib_dir.lstrip(os.sep))
1112                 try:
1113                         newldpathtime = long(os.stat(x).st_mtime)
1114                         lib_dirs.add(normalize_path(x))
1115                 except OSError as oe:
1116                         if oe.errno == errno.ENOENT:
1117                                 try:
1118                                         del prev_mtimes[x]
1119                                 except KeyError:
1120                                         pass
1121                                 # ignore this path because it doesn't exist
1122                                 continue
1123                         raise
1124                 if newldpathtime == current_time:
1125                         sleep_for_mtime_granularity = True
1126                 if x in prev_mtimes:
1127                         if prev_mtimes[x] == newldpathtime:
1128                                 pass
1129                         else:
1130                                 prev_mtimes[x] = newldpathtime
1131                                 mtime_changed = True
1132                 else:
1133                         prev_mtimes[x] = newldpathtime
1134                         mtime_changed = True
1135
1136         if mtime_changed:
1137                 ld_cache_update = True
1138
1139         if makelinks and \
1140                 not ld_cache_update and \
1141                 contents is not None:
1142                 libdir_contents_changed = False
1143                 for mypath, mydata in contents.items():
1144                         if mydata[0] not in ("obj","sym"):
1145                                 continue
1146                         head, tail = os.path.split(mypath)
1147                         if head in lib_dirs:
1148                                 libdir_contents_changed = True
1149                                 break
1150                 if not libdir_contents_changed:
1151                         makelinks = False
1152
1153         ldconfig = "/sbin/ldconfig"
1154         if "CHOST" in env and "CBUILD" in env and \
1155                 env["CHOST"] != env["CBUILD"]:
1156                 from portage.process import find_binary
1157                 ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
1158
1159         # Only run ldconfig as needed
1160         if (ld_cache_update or makelinks) and ldconfig:
1161                 # ldconfig has very different behaviour between FreeBSD and Linux
1162                 if ostype=="Linux" or ostype.lower().endswith("gnu"):
1163                         # We can't update links if we haven't cleaned other versions first, as
1164                         # an older package installed ON TOP of a newer version will cause ldconfig
1165                         # to overwrite the symlinks we just made. -X means no links. After 'clean'
1166                         # we can safely create links.
1167                         writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
1168                                 (target_root,))
1169                         if makelinks:
1170                                 os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
1171                         else:
1172                                 os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
1173                 elif ostype in ("FreeBSD","DragonFly"):
1174                         writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
1175                                 target_root)
1176                         os.system(("cd / ; %s -elf -i " + \
1177                                 "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
1178                                 (ldconfig, target_root, target_root))
1179
1180         del specials["LDPATH"]
1181
1182         penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
1183         penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
1184         cenvnotice  = penvnotice[:]
1185         penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
1186         cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
1187
1188         #create /etc/profile.env for bash support
1189         outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
1190         outfile.write(penvnotice)
1191
1192         env_keys = [ x for x in env if x != "LDPATH" ]
1193         env_keys.sort()
1194         for k in env_keys:
1195                 v = env[k]
1196                 if v.startswith('$') and not v.startswith('${'):
1197                         outfile.write("export %s=$'%s'\n" % (k, v[1:]))
1198                 else:
1199                         outfile.write("export %s='%s'\n" % (k, v))
1200         outfile.close()
1201
1202         #create /etc/csh.env for (t)csh support
1203         outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
1204         outfile.write(cenvnotice)
1205         for x in env_keys:
1206                 outfile.write("setenv %s '%s'\n" % (x, env[x]))
1207         outfile.close()
1208
1209         if sleep_for_mtime_granularity:
1210                 while current_time == long(time.time()):
1211                         sleep(1)
1212
1213 def ExtractKernelVersion(base_dir):
1214         """
1215         Try to figure out what kernel version we are running
1216         @param base_dir: Path to sources (usually /usr/src/linux)
1217         @type base_dir: string
1218         @rtype: tuple( version[string], error[string])
1219         @returns:
1220         1. tuple( version[string], error[string])
1221         Either version or error is populated (but never both)
1222
1223         """
1224         lines = []
1225         pathname = os.path.join(base_dir, 'Makefile')
1226         try:
1227                 f = codecs.open(_unicode_encode(pathname,
1228                         encoding=_encodings['fs'], errors='strict'), mode='r',
1229                         encoding=_encodings['content'], errors='replace')
1230         except OSError as details:
1231                 return (None, str(details))
1232         except IOError as details:
1233                 return (None, str(details))
1234
1235         try:
1236                 for i in range(4):
1237                         lines.append(f.readline())
1238         except OSError as details:
1239                 return (None, str(details))
1240         except IOError as details:
1241                 return (None, str(details))
1242
1243         lines = [l.strip() for l in lines]
1244
1245         version = ''
1246
1247         #XXX: The following code relies on the ordering of vars within the Makefile
1248         for line in lines:
1249                 # split on the '=' then remove annoying whitespace
1250                 items = line.split("=")
1251                 items = [i.strip() for i in items]
1252                 if items[0] == 'VERSION' or \
1253                         items[0] == 'PATCHLEVEL':
1254                         version += items[1]
1255                         version += "."
1256                 elif items[0] == 'SUBLEVEL':
1257                         version += items[1]
1258                 elif items[0] == 'EXTRAVERSION' and \
1259                         items[-1] != items[0]:
1260                         version += items[1]
1261
1262         # Grab a list of files named localversion* and sort them
1263         localversions = os.listdir(base_dir)
1264         for x in range(len(localversions)-1,-1,-1):
1265                 if localversions[x][:12] != "localversion":
1266                         del localversions[x]
1267         localversions.sort()
1268
1269         # Append the contents of each to the version string, stripping ALL whitespace
1270         for lv in localversions:
1271                 version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
1272
1273         # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
1274         kernelconfig = getconfig(base_dir+"/.config")
1275         if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
1276                 version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
1277
1278         return (version,None)
1279
1280 def autouse(myvartree, use_cache=1, mysettings=None):
1281         """
1282         autuse returns a list of USE variables auto-enabled to packages being installed
1283
1284         @param myvartree: Instance of the vartree class (from /var/db/pkg...)
1285         @type myvartree: vartree
1286         @param use_cache: read values from cache
1287         @type use_cache: Boolean
1288         @param mysettings: Instance of config
1289         @type mysettings: config
1290         @rtype: string
1291         @returns: A string containing a list of USE variables that are enabled via use.defaults
1292         """
1293         if mysettings is None:
1294                 global settings
1295                 mysettings = settings
1296         if mysettings.profile_path is None:
1297                 return ""
1298         myusevars=""
1299         usedefaults = mysettings.use_defs
1300         for myuse in usedefaults:
1301                 dep_met = True
1302                 for mydep in usedefaults[myuse]:
1303                         if not myvartree.dep_match(mydep,use_cache=True):
1304                                 dep_met = False
1305                                 break
1306                 if dep_met:
1307                         myusevars += " "+myuse
1308         return myusevars
1309
1310 def check_config_instance(test):
1311         if not isinstance(test, config):
1312                 raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
1313
1314 def _lazy_iuse_regex(iuse_implicit):
1315         """
1316         The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
1317         and the value is only used when an ebuild phase needs to be executed
1318         (it's used only to generate QA notices).
1319         """
1320         # Escape anything except ".*" which is supposed to pass through from
1321         # _get_implicit_iuse().
1322         regex = sorted(re.escape(x) for x in iuse_implicit)
1323         regex = "^(%s)$" % "|".join(regex)
1324         regex = regex.replace("\\.\\*", ".*")
1325         return regex
1326
1327 class _local_repo_config(object):
1328         __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',)
1329         def __init__(self, name, repo_opts):
1330                 self.name = name
1331
1332                 aliases = repo_opts.get('aliases')
1333                 if aliases is not None:
1334                         aliases = tuple(aliases.split())
1335                 self.aliases = aliases
1336
1337                 eclass_overrides = repo_opts.get('eclass-overrides')
1338                 if eclass_overrides is not None:
1339                         eclass_overrides = tuple(eclass_overrides.split())
1340                 self.eclass_overrides = eclass_overrides
1341
1342                 masters = repo_opts.get('masters')
1343                 if masters is not None:
1344                         masters = tuple(masters.split())
1345                 self.masters = masters
1346
1347 class config(object):
1348         """
1349         This class encompasses the main portage configuration.  Data is pulled from
1350         ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all 
1351         parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
1352         overrides.
1353         
1354         Generally if you need data like USE flags, FEATURES, environment variables,
1355         virtuals ...etc you look in here.
1356         """
1357
1358         # Don't include anything that could be extremely long here (like SRC_URI)
1359         # since that could cause execve() calls to fail with E2BIG errors. For
1360         # example, see bug #262647.
1361         _setcpv_aux_keys = ('SLOT', 'RESTRICT', 'LICENSE',
1362                 'KEYWORDS',  'INHERITED', 'IUSE', 'PROVIDE', 'EAPI',
1363                 'PROPERTIES', 'DEFINED_PHASES', 'repository')
1364
1365         _env_blacklist = [
1366                 "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
1367                 "EBUILD_PHASE", "EMERGE_FROM", "HOMEPAGE", "INHERITED", "IUSE",
1368                 "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
1369                 "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
1370                 "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
1371                 "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
1372                 "ROOT", "SLOT", "SRC_URI"
1373         ]
1374
1375         _environ_whitelist = []
1376
1377         # Whitelisted variables are always allowed to enter the ebuild
1378         # environment. Generally, this only includes special portage
1379         # variables. Ebuilds can unset variables that are not whitelisted
1380         # and rely on them remaining unset for future phases, without them
1381         # leaking back in from various locations (bug #189417). It's very
1382         # important to set our special BASH_ENV variable in the ebuild
1383         # environment in order to prevent sandbox from sourcing /etc/profile
1384         # in it's bashrc (causing major leakage).
1385         _environ_whitelist += [
1386                 "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
1387                 "DISTDIR", "DOC_SYMLINKS_DIR", "EBUILD",
1388                 "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
1389                 "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "EMERGE_FROM",
1390                 "FEATURES", "FILESDIR", "HOME", "NOCOLOR", "PATH",
1391                 "PKGDIR",
1392                 "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
1393                 "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
1394                 "PORTAGE_BASHRC",
1395                 "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
1396                 "PORTAGE_BINPKG_TMPFILE",
1397                 "PORTAGE_BIN_PATH",
1398                 "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
1399                 "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
1400                 "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
1401                 "PORTAGE_IUSE",
1402                 "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
1403                 "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
1404                 "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
1405                 "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
1406                 "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
1407                 "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
1408                 "ROOT", "ROOTPATH", "STARTDIR", "T", "TMP", "TMPDIR",
1409                 "USE_EXPAND", "USE_ORDER", "WORKDIR",
1410                 "XARGS",
1411         ]
1412
1413         # user config variables
1414         _environ_whitelist += [
1415                 "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
1416         ]
1417
1418         _environ_whitelist += [
1419                 "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
1420         ]
1421
1422         # misc variables inherited from the calling environment
1423         _environ_whitelist += [
1424                 "COLORTERM", "DISPLAY", "EDITOR", "LESS",
1425                 "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
1426                 "TERM", "TERMCAP", "USER",
1427         ]
1428
1429         # other variables inherited from the calling environment
1430         _environ_whitelist += [
1431                 "CVS_RSH", "ECHANGELOG_USER",
1432                 "GPG_AGENT_INFO",
1433                 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
1434                 "STY", "WINDOW", "XAUTHORITY",
1435         ]
1436
1437         _environ_whitelist = frozenset(_environ_whitelist)
1438
1439         _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
1440
1441         # Filter selected variables in the config.environ() method so that
1442         # they don't needlessly propagate down into the ebuild environment.
1443         _environ_filter = []
1444
1445         # misc variables inherited from the calling environment
1446         _environ_filter += [
1447                 "INFOPATH", "MANPATH",
1448         ]
1449
1450         # variables that break bash
1451         _environ_filter += [
1452                 "HISTFILE", "POSIXLY_CORRECT",
1453         ]
1454
1455         # portage config variables and variables set directly by portage
1456         _environ_filter += [
1457                 "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
1458                 "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
1459                 "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
1460                 "EMERGE_LOG_DIR",
1461                 "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
1462                 "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
1463                 "GENTOO_MIRRORS", "NOCONFMEM", "O",
1464                 "PORTAGE_BACKGROUND",
1465                 "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
1466                 "PORTAGE_ELOG_CLASSES",
1467                 "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
1468                 "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
1469                 "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
1470                 "PORTAGE_GPG_DIR",
1471                 "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
1472                 "PORTAGE_PACKAGE_EMPTY_ABORT",
1473                 "PORTAGE_REPO_DUPLICATE_WARN",
1474                 "PORTAGE_RO_DISTDIRS",
1475                 "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
1476                 "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
1477                 "QUICKPKG_DEFAULT_OPTS",
1478                 "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
1479                 "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
1480         ]
1481
1482         _environ_filter = frozenset(_environ_filter)
1483
1484         _undef_lic_groups = set()
1485         _default_globals = (
1486                 ('ACCEPT_LICENSE',           '* -@EULA'),
1487                 ('ACCEPT_PROPERTIES',        '*'),
1488         )
1489
1490         # To enhance usability, make some vars case insensitive
1491         # by forcing them to lower case.
1492         _case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
1493
1494         def __init__(self, clone=None, mycpv=None, config_profile_path=None,
1495                 config_incrementals=None, config_root=None, target_root=None,
1496                 local_config=True, env=None):
1497                 """
1498                 @param clone: If provided, init will use deepcopy to copy by value the instance.
1499                 @type clone: Instance of config class.
1500                 @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
1501                 and then calling instance.setcpv(mycpv).
1502                 @type mycpv: String
1503                 @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
1504                 @type config_profile_path: String
1505                 @param config_incrementals: List of incremental variables
1506                         (defaults to portage.const.INCREMENTALS)
1507                 @type config_incrementals: List
1508                 @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
1509                 @type config_root: String
1510                 @param target_root: __init__ override of $ROOT env variable.
1511                 @type target_root: String
1512                 @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
1513                 ignore local config (keywording and unmasking)
1514                 @type local_config: Boolean
1515                 @param env: The calling environment which is used to override settings.
1516                         Defaults to os.environ if unspecified.
1517                 @type env: dict
1518                 """
1519
1520                 # When initializing the global portage.settings instance, avoid
1521                 # raising exceptions whenever possible since exceptions thrown
1522                 # from 'import portage' or 'import portage.exceptions' statements
1523                 # can practically render the api unusable for api consumers.
1524                 tolerant = "_initializing_globals" in globals()
1525
1526                 self.already_in_regenerate = 0
1527
1528                 self.locked   = 0
1529                 self.mycpv    = None
1530                 self._setcpv_args_hash = None
1531                 self.puse     = []
1532                 self.modifiedkeys = []
1533                 self.uvlist = []
1534                 self._accept_chost_re = None
1535                 self._accept_license = None
1536                 self._accept_license_str = None
1537                 self._license_groups = {}
1538                 self._accept_properties = None
1539
1540                 self.virtuals = {}
1541                 self.virts_p = {}
1542                 self.dirVirtuals = None
1543                 self.v_count  = 0
1544
1545                 # Virtuals obtained from the vartree
1546                 self.treeVirtuals = {}
1547                 # Virtuals by user specification. Includes negatives.
1548                 self.userVirtuals = {}
1549                 # Virtual negatives from user specifications.
1550                 self.negVirtuals  = {}
1551                 # Virtuals added by the depgraph via self.setinst().
1552                 self._depgraphVirtuals = {}
1553
1554                 self.user_profile_dir = None
1555                 self.local_config = local_config
1556                 self._local_repo_configs = None
1557                 self._local_repo_conf_path = None
1558
1559                 if clone:
1560                         self.incrementals = copy.deepcopy(clone.incrementals)
1561                         self.profile_path = copy.deepcopy(clone.profile_path)
1562                         self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
1563                         self.local_config = copy.deepcopy(clone.local_config)
1564                         self._local_repo_configs = \
1565                                 copy.deepcopy(clone._local_repo_configs)
1566                         self._local_repo_conf_path = \
1567                                 copy.deepcopy(clone._local_repo_conf_path)
1568
1569                         self.module_priority = copy.deepcopy(clone.module_priority)
1570                         self.modules         = copy.deepcopy(clone.modules)
1571
1572                         self.depcachedir = copy.deepcopy(clone.depcachedir)
1573
1574                         self.packages = copy.deepcopy(clone.packages)
1575                         self.virtuals = copy.deepcopy(clone.virtuals)
1576
1577                         self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
1578                         self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
1579                         self.userVirtuals = copy.deepcopy(clone.userVirtuals)
1580                         self.negVirtuals  = copy.deepcopy(clone.negVirtuals)
1581                         self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
1582
1583                         self.use_defs = copy.deepcopy(clone.use_defs)
1584                         self.usemask  = copy.deepcopy(clone.usemask)
1585                         self.usemask_list = copy.deepcopy(clone.usemask_list)
1586                         self.pusemask_list = copy.deepcopy(clone.pusemask_list)
1587                         self.useforce      = copy.deepcopy(clone.useforce)
1588                         self.useforce_list = copy.deepcopy(clone.useforce_list)
1589                         self.puseforce_list = copy.deepcopy(clone.puseforce_list)
1590                         self.puse     = copy.deepcopy(clone.puse)
1591                         self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
1592                         self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
1593                         self.mycpv    = copy.deepcopy(clone.mycpv)
1594                         self._setcpv_args_hash = copy.deepcopy(clone._setcpv_args_hash)
1595
1596                         self.configdict = copy.deepcopy(clone.configdict)
1597                         self.configlist = [
1598                                 self.configdict['env.d'],
1599                                 self.configdict['pkginternal'],
1600                                 self.configdict['globals'],
1601                                 self.configdict['defaults'],
1602                                 self.configdict['conf'],
1603                                 self.configdict['pkg'],
1604                                 self.configdict['auto'],
1605                                 self.configdict['env'],
1606                         ]
1607                         self.lookuplist = self.configlist[:]
1608                         self.lookuplist.reverse()
1609                         self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
1610                         self.profiles = copy.deepcopy(clone.profiles)
1611                         self.backupenv  = self.configdict["backupenv"]
1612                         self.pusedict   = copy.deepcopy(clone.pusedict)
1613                         self.categories = copy.deepcopy(clone.categories)
1614                         self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
1615                         self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
1616                         self.pmaskdict = copy.deepcopy(clone.pmaskdict)
1617                         self.punmaskdict = copy.deepcopy(clone.punmaskdict)
1618                         self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
1619                         self.pprovideddict = copy.deepcopy(clone.pprovideddict)
1620                         self.features = copy.deepcopy(clone.features)
1621
1622                         self._accept_license = copy.deepcopy(clone._accept_license)
1623                         self._plicensedict = copy.deepcopy(clone._plicensedict)
1624                         self._license_groups = copy.deepcopy(clone._license_groups)
1625                         self._accept_properties = copy.deepcopy(clone._accept_properties)
1626                         self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
1627                 else:
1628
1629                         def check_var_directory(varname, var):
1630                                 if not os.path.isdir(var):
1631                                         writemsg(_("!!! Error: %s='%s' is not a directory. "
1632                                                 "Please correct this.\n") % (varname, var),
1633                                                 noiselevel=-1)
1634                                         raise portage.exception.DirectoryNotFound(var)
1635
1636                         if config_root is None:
1637                                 config_root = "/"
1638
1639                         config_root = normalize_path(os.path.abspath(
1640                                 config_root)).rstrip(os.path.sep) + os.path.sep
1641
1642                         check_var_directory("PORTAGE_CONFIGROOT", config_root)
1643
1644                         self.depcachedir = DEPCACHE_PATH
1645
1646                         if not config_profile_path:
1647                                 config_profile_path = \
1648                                         os.path.join(config_root, PROFILE_PATH)
1649                                 if os.path.isdir(config_profile_path):
1650                                         self.profile_path = config_profile_path
1651                                 else:
1652                                         self.profile_path = None
1653                         else:
1654                                 self.profile_path = config_profile_path[:]
1655
1656                         if config_incrementals is None:
1657                                 self.incrementals = copy.deepcopy(portage.const.INCREMENTALS)
1658                         else:
1659                                 self.incrementals = copy.deepcopy(config_incrementals)
1660
1661                         self.module_priority    = ["user","default"]
1662                         self.modules            = {}
1663                         modules_loader = portage.env.loaders.KeyValuePairFileLoader(
1664                                 os.path.join(config_root, MODULES_FILE_PATH), None, None)
1665                         modules_dict, modules_errors = modules_loader.load()
1666                         self.modules["user"] = modules_dict
1667                         if self.modules["user"] is None:
1668                                 self.modules["user"] = {}
1669                         self.modules["default"] = {
1670                                 "portdbapi.metadbmodule": "portage.cache.metadata.database",
1671                                 "portdbapi.auxdbmodule":  "portage.cache.flat_hash.database",
1672                         }
1673
1674                         self.usemask=[]
1675                         self.configlist=[]
1676
1677                         # back up our incremental variables:
1678                         self.configdict={}
1679                         self._use_expand_dict = {}
1680                         # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
1681                         self.configlist.append({})
1682                         self.configdict["env.d"] = self.configlist[-1]
1683
1684                         self.configlist.append({})
1685                         self.configdict["pkginternal"] = self.configlist[-1]
1686
1687                         # The symlink might not exist or might not be a symlink.
1688                         if self.profile_path is None:
1689                                 self.profiles = []
1690                         else:
1691                                 self.profiles = []
1692                                 def addProfile(currentPath):
1693                                         parentsFile = os.path.join(currentPath, "parent")
1694                                         eapi_file = os.path.join(currentPath, "eapi")
1695                                         try:
1696                                                 eapi = codecs.open(_unicode_encode(eapi_file,
1697                                                         encoding=_encodings['fs'], errors='strict'),
1698                                                         mode='r', encoding=_encodings['content'], errors='replace'
1699                                                         ).readline().strip()
1700                                         except IOError:
1701                                                 pass
1702                                         else:
1703                                                 if not eapi_is_supported(eapi):
1704                                                         raise portage.exception.ParseError(_(
1705                                                                 "Profile contains unsupported "
1706                                                                 "EAPI '%s': '%s'") % \
1707                                                                 (eapi, os.path.realpath(eapi_file),))
1708                                         if os.path.exists(parentsFile):
1709                                                 parents = grabfile(parentsFile)
1710                                                 if not parents:
1711                                                         raise portage.exception.ParseError(
1712                                                                 _("Empty parent file: '%s'") % parentsFile)
1713                                                 for parentPath in parents:
1714                                                         parentPath = normalize_path(os.path.join(
1715                                                                 currentPath, parentPath))
1716                                                         if os.path.exists(parentPath):
1717                                                                 addProfile(parentPath)
1718                                                         else:
1719                                                                 raise portage.exception.ParseError(
1720                                                                         _("Parent '%s' not found: '%s'") %  \
1721                                                                         (parentPath, parentsFile))
1722                                         self.profiles.append(currentPath)
1723                                 try:
1724                                         addProfile(os.path.realpath(self.profile_path))
1725                                 except portage.exception.ParseError as e:
1726                                         writemsg(_("!!! Unable to parse profile: '%s'\n") % \
1727                                                 self.profile_path, noiselevel=-1)
1728                                         writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
1729                                         del e
1730                                         self.profiles = []
1731                         if local_config and self.profiles:
1732                                 custom_prof = os.path.join(
1733                                         config_root, CUSTOM_PROFILE_PATH)
1734                                 if os.path.exists(custom_prof):
1735                                         self.user_profile_dir = custom_prof
1736                                         self.profiles.append(custom_prof)
1737                                 del custom_prof
1738
1739                         self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1740                         self.packages      = stack_lists(self.packages_list, incremental=1)
1741                         del self.packages_list
1742                         #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1743
1744                         # revmaskdict
1745                         self.prevmaskdict={}
1746                         for x in self.packages:
1747                                 # Negative atoms are filtered by the above stack_lists() call.
1748                                 if not isinstance(x, dep.Atom):
1749                                         x = dep.Atom(x.lstrip('*'))
1750                                 self.prevmaskdict.setdefault(x.cp, []).append(x)
1751
1752                         self._pkeywords_list = []
1753                         rawpkeywords = [grabdict_package(
1754                                 os.path.join(x, "package.keywords"), recursive=1) \
1755                                 for x in self.profiles]
1756                         for pkeyworddict in rawpkeywords:
1757                                 cpdict = {}
1758                                 for k, v in pkeyworddict.items():
1759                                         cpdict.setdefault(k.cp, {})[k] = v
1760                                 self._pkeywords_list.append(cpdict)
1761
1762                         # get profile-masked use flags -- INCREMENTAL Child over parent
1763                         self.usemask_list = [grabfile(os.path.join(x, "use.mask"),
1764                                 recursive=1) for x in self.profiles]
1765                         self.usemask  = set(stack_lists(
1766                                 self.usemask_list, incremental=True))
1767                         use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1768                         self.use_defs  = stack_dictlist(use_defs_lists, incremental=True)
1769                         del use_defs_lists
1770
1771                         self.pusemask_list = []
1772                         rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1773                                 recursive=1) for x in self.profiles]
1774                         for pusemaskdict in rawpusemask:
1775                                 cpdict = {}
1776                                 for k, v in pusemaskdict.items():
1777                                         cpdict.setdefault(k.cp, {})[k] = v
1778                                 self.pusemask_list.append(cpdict)
1779                         del rawpusemask
1780
1781                         self.pkgprofileuse = []
1782                         rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1783                                 juststrings=True, recursive=1) for x in self.profiles]
1784                         for rawpusedict in rawprofileuse:
1785                                 cpdict = {}
1786                                 for k, v in rawpusedict.items():
1787                                         cpdict.setdefault(k.cp, {})[k] = v
1788                                 self.pkgprofileuse.append(cpdict)
1789                         del rawprofileuse
1790
1791                         self.useforce_list = [grabfile(os.path.join(x, "use.force"),
1792                                 recursive=1) for x in self.profiles]
1793                         self.useforce  = set(stack_lists(
1794                                 self.useforce_list, incremental=True))
1795
1796                         self.puseforce_list = []
1797                         rawpuseforce = [grabdict_package(
1798                                 os.path.join(x, "package.use.force"), recursive=1) \
1799                                 for x in self.profiles]
1800                         for rawpusefdict in rawpuseforce:
1801                                 cpdict = {}
1802                                 for k, v in rawpusefdict.items():
1803                                         cpdict.setdefault(k.cp, {})[k] = v
1804                                 self.puseforce_list.append(cpdict)
1805                         del rawpuseforce
1806
1807                         make_conf = getconfig(
1808                                 os.path.join(config_root, MAKE_CONF_FILE),
1809                                 tolerant=tolerant, allow_sourcing=True)
1810                         if make_conf is None:
1811                                 make_conf = {}
1812
1813                         # Allow ROOT setting to come from make.conf if it's not overridden
1814                         # by the constructor argument (from the calling environment).
1815                         if target_root is None and "ROOT" in make_conf:
1816                                 target_root = make_conf["ROOT"]
1817                                 if not target_root.strip():
1818                                         target_root = None
1819                         if target_root is None:
1820                                 target_root = "/"
1821
1822                         target_root = normalize_path(os.path.abspath(
1823                                 target_root)).rstrip(os.path.sep) + os.path.sep
1824
1825                         portage.util.ensure_dirs(target_root)
1826                         check_var_directory("ROOT", target_root)
1827
1828                         # The expand_map is used for variable substitution
1829                         # in getconfig() calls, and the getconfig() calls
1830                         # update expand_map with the value of each variable
1831                         # assignment that occurs. Variable substitution occurs
1832                         # in the following order, which corresponds to the
1833                         # order of appearance in self.lookuplist:
1834                         #
1835                         #   * env.d
1836                         #   * make.globals
1837                         #   * make.defaults
1838                         #   * make.conf
1839                         #
1840                         # Notably absent is "env", since we want to avoid any
1841                         # interaction with the calling environment that might
1842                         # lead to unexpected results.
1843                         expand_map = {}
1844
1845                         env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
1846                                 expand=expand_map)
1847                         # env_d will be None if profile.env doesn't exist.
1848                         if env_d:
1849                                 self.configdict["env.d"].update(env_d)
1850                                 expand_map.update(env_d)
1851
1852                         # backupenv is used for calculating incremental variables.
1853                         if env is None:
1854                                 env = os.environ
1855
1856                         # Avoid potential UnicodeDecodeError exceptions later.
1857                         env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
1858                                 for k, v in env.items())
1859
1860                         self.backupenv = env_unicode
1861
1862                         if env_d:
1863                                 # Remove duplicate values so they don't override updated
1864                                 # profile.env values later (profile.env is reloaded in each
1865                                 # call to self.regenerate).
1866                                 for k, v in env_d.items():
1867                                         try:
1868                                                 if self.backupenv[k] == v:
1869                                                         del self.backupenv[k]
1870                                         except KeyError:
1871                                                 pass
1872                                 del k, v
1873
1874                         self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1875
1876                         # make.globals should not be relative to config_root
1877                         # because it only contains constants.
1878                         for x in (portage.const.GLOBAL_CONFIG_PATH, "/etc"):
1879                                 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1880                                         expand=expand_map)
1881                                 if self.mygcfg:
1882                                         break
1883
1884                         if self.mygcfg is None:
1885                                 self.mygcfg = {}
1886
1887                         for k, v in self._default_globals:
1888                                 self.mygcfg.setdefault(k, v)
1889
1890                         self.configlist.append(self.mygcfg)
1891                         self.configdict["globals"]=self.configlist[-1]
1892
1893                         self.make_defaults_use = []
1894                         self.mygcfg = {}
1895                         if self.profiles:
1896                                 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1897                                         expand=expand_map) for x in self.profiles]
1898
1899                                 for cfg in mygcfg_dlists:
1900                                         if cfg:
1901                                                 self.make_defaults_use.append(cfg.get("USE", ""))
1902                                         else:
1903                                                 self.make_defaults_use.append("")
1904                                 self.mygcfg = stack_dicts(mygcfg_dlists,
1905                                         incrementals=portage.const.INCREMENTALS)
1906                                 if self.mygcfg is None:
1907                                         self.mygcfg = {}
1908                         self.configlist.append(self.mygcfg)
1909                         self.configdict["defaults"]=self.configlist[-1]
1910
1911                         self.mygcfg = getconfig(
1912                                 os.path.join(config_root, MAKE_CONF_FILE),
1913                                 tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1914                         if self.mygcfg is None:
1915                                 self.mygcfg = {}
1916
1917                         # Don't allow the user to override certain variables in make.conf
1918                         profile_only_variables = self.configdict["defaults"].get(
1919                                 "PROFILE_ONLY_VARIABLES", "").split()
1920                         for k in profile_only_variables:
1921                                 self.mygcfg.pop(k, None)
1922
1923                         self.configlist.append(self.mygcfg)
1924                         self.configdict["conf"]=self.configlist[-1]
1925
1926                         self.configlist.append(util.LazyItemsDict())
1927                         self.configdict["pkg"]=self.configlist[-1]
1928
1929                         #auto-use:
1930                         self.configlist.append({})
1931                         self.configdict["auto"]=self.configlist[-1]
1932
1933                         self.configdict["backupenv"] = self.backupenv
1934
1935                         # Don't allow the user to override certain variables in the env
1936                         for k in profile_only_variables:
1937                                 self.backupenv.pop(k, None)
1938
1939                         self.configlist.append(self.configdict["env"])
1940
1941                         # make lookuplist for loading package.*
1942                         self.lookuplist=self.configlist[:]
1943                         self.lookuplist.reverse()
1944
1945                         # Blacklist vars that could interfere with portage internals.
1946                         for blacklisted in self._env_blacklist:
1947                                 for cfg in self.lookuplist:
1948                                         cfg.pop(blacklisted, None)
1949                                 self.backupenv.pop(blacklisted, None)
1950                         del blacklisted, cfg
1951
1952                         self["PORTAGE_CONFIGROOT"] = config_root
1953                         self.backup_changes("PORTAGE_CONFIGROOT")
1954                         self["ROOT"] = target_root
1955                         self.backup_changes("ROOT")
1956
1957                         self.pusedict = {}
1958                         self.pkeywordsdict = {}
1959                         self._plicensedict = {}
1960                         self._ppropertiesdict = {}
1961                         self.punmaskdict = {}
1962                         abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
1963
1964                         # locations for "categories" and "arch.list" files
1965                         locations = [os.path.join(self["PORTDIR"], "profiles")]
1966                         pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1967                         pmask_locations.extend(self.profiles)
1968
1969                         """ repoman controls PORTDIR_OVERLAY via the environment, so no
1970                         special cases are needed here."""
1971                         overlay_profiles = []
1972                         for ov in self["PORTDIR_OVERLAY"].split():
1973                                 ov = normalize_path(ov)
1974                                 profiles_dir = os.path.join(ov, "profiles")
1975                                 if os.path.isdir(profiles_dir):
1976                                         overlay_profiles.append(profiles_dir)
1977                         locations += overlay_profiles
1978                         
1979                         pmask_locations.extend(overlay_profiles)
1980
1981                         if local_config:
1982                                 locations.append(abs_user_config)
1983                                 pmask_locations.append(abs_user_config)
1984                                 pusedict = grabdict_package(
1985                                         os.path.join(abs_user_config, "package.use"), recursive=1)
1986                                 for k, v in pusedict.items():
1987                                         self.pusedict.setdefault(k.cp, {})[k] = v
1988
1989                                 #package.keywords
1990                                 pkgdict = grabdict_package(
1991                                         os.path.join(abs_user_config, "package.keywords"),
1992                                         recursive=1)
1993                                 for k, v in pkgdict.items():
1994                                         # default to ~arch if no specific keyword is given
1995                                         if not v:
1996                                                 mykeywordlist = []
1997                                                 if self.configdict["defaults"] and \
1998                                                         "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1999                                                         groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
2000                                                 else:
2001                                                         groups = []
2002                                                 for keyword in groups:
2003                                                         if not keyword[0] in "~-":
2004                                                                 mykeywordlist.append("~"+keyword)
2005                                                 v = mykeywordlist
2006                                         self.pkeywordsdict.setdefault(k.cp, {})[k] = v
2007
2008                                 #package.license
2009                                 licdict = grabdict_package(os.path.join(
2010                                         abs_user_config, "package.license"), recursive=1)
2011                                 for k, v in licdict.items():
2012                                         cp = k.cp
2013                                         cp_dict = self._plicensedict.get(cp)
2014                                         if not cp_dict:
2015                                                 cp_dict = {}
2016                                                 self._plicensedict[cp] = cp_dict
2017                                         cp_dict[k] = self.expandLicenseTokens(v)
2018
2019                                 #package.properties
2020                                 propdict = grabdict_package(os.path.join(
2021                                         abs_user_config, "package.properties"), recursive=1)
2022                                 for k, v in propdict.items():
2023                                         cp = k.cp
2024                                         cp_dict = self._ppropertiesdict.get(cp)
2025                                         if not cp_dict:
2026                                                 cp_dict = {}
2027                                                 self._ppropertiesdict[cp] = cp_dict
2028                                         cp_dict[k] = v
2029
2030                                 self._local_repo_configs = {}
2031                                 self._local_repo_conf_path = \
2032                                         os.path.join(abs_user_config, 'repos.conf')
2033                                 try:
2034                                         from configparser import SafeConfigParser, ParsingError
2035                                 except ImportError:
2036                                         from ConfigParser import SafeConfigParser, ParsingError
2037                                 repo_conf_parser = SafeConfigParser()
2038                                 try:
2039                                         repo_conf_parser.readfp(
2040                                                 codecs.open(
2041                                                 _unicode_encode(self._local_repo_conf_path,
2042                                                 encoding=_encodings['fs'], errors='strict'),
2043                                                 mode='r', encoding=_encodings['content'], errors='replace')
2044                                         )
2045                                 except EnvironmentError as e:
2046                                         if e.errno != errno.ENOENT:
2047                                                 raise
2048                                         del e
2049                                 except ParsingError as e:
2050                                         portage.util.writemsg_level(
2051                                                 _("!!! Error parsing '%s': %s\n")  % \
2052                                                 (self._local_repo_conf_path, e),
2053                                                 level=logging.ERROR, noiselevel=-1)
2054                                         del e
2055                                 else:
2056                                         repo_defaults = repo_conf_parser.defaults()
2057                                         if repo_defaults:
2058                                                 self._local_repo_configs['DEFAULT'] = \
2059                                                         _local_repo_config('DEFAULT', repo_defaults)
2060                                         for repo_name in repo_conf_parser.sections():
2061                                                 repo_opts = repo_defaults.copy()
2062                                                 for opt_name in repo_conf_parser.options(repo_name):
2063                                                         repo_opts[opt_name] = \
2064                                                                 repo_conf_parser.get(repo_name, opt_name)
2065                                                 self._local_repo_configs[repo_name] = \
2066                                                         _local_repo_config(repo_name, repo_opts)
2067
2068                         #getting categories from an external file now
2069                         categories = [grabfile(os.path.join(x, "categories")) for x in locations]
2070                         self.categories = tuple(sorted(
2071                                 stack_lists(categories, incremental=1)))
2072                         del categories
2073
2074                         archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
2075                         archlist = stack_lists(archlist, incremental=1)
2076                         self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
2077
2078                         # package.mask and package.unmask
2079                         pkgmasklines = []
2080                         pkgunmasklines = []
2081                         for x in pmask_locations:
2082                                 pkgmasklines.append(grabfile_package(
2083                                         os.path.join(x, "package.mask"), recursive=1))
2084                                 pkgunmasklines.append(grabfile_package(
2085                                         os.path.join(x, "package.unmask"), recursive=1))
2086                         pkgmasklines = stack_lists(pkgmasklines, incremental=1)
2087                         pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
2088
2089                         self.pmaskdict = {}
2090                         for x in pkgmasklines:
2091                                 self.pmaskdict.setdefault(x.cp, []).append(x)
2092
2093                         for x in pkgunmasklines:
2094                                 self.punmaskdict.setdefault(x.cp, []).append(x)
2095
2096                         pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
2097                         pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
2098                         has_invalid_data = False
2099                         for x in range(len(pkgprovidedlines)-1, -1, -1):
2100                                 myline = pkgprovidedlines[x]
2101                                 if not isvalidatom("=" + myline):
2102                                         writemsg(_("Invalid package name in package.provided: %s\n") % \
2103                                                 myline, noiselevel=-1)
2104                                         has_invalid_data = True
2105                                         del pkgprovidedlines[x]
2106                                         continue
2107                                 cpvr = catpkgsplit(pkgprovidedlines[x])
2108                                 if not cpvr or cpvr[0] == "null":
2109                                         writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
2110                                                 noiselevel=-1)
2111                                         has_invalid_data = True
2112                                         del pkgprovidedlines[x]
2113                                         continue
2114                                 if cpvr[0] == "virtual":
2115                                         writemsg(_("Virtual package in package.provided: %s\n") % \
2116                                                 myline, noiselevel=-1)
2117                                         has_invalid_data = True
2118                                         del pkgprovidedlines[x]
2119                                         continue
2120                         if has_invalid_data:
2121                                 writemsg(_("See portage(5) for correct package.provided usage.\n"),
2122                                         noiselevel=-1)
2123                         self.pprovideddict = {}
2124                         for x in pkgprovidedlines:
2125                                 cpv=catpkgsplit(x)
2126                                 if not x:
2127                                         continue
2128                                 mycatpkg = cpv_getkey(x)
2129                                 if mycatpkg in self.pprovideddict:
2130                                         self.pprovideddict[mycatpkg].append(x)
2131                                 else:
2132                                         self.pprovideddict[mycatpkg]=[x]
2133
2134                         # parse licensegroups
2135                         for x in locations:
2136                                 self._license_groups.update(
2137                                         grabdict(os.path.join(x, "license_groups")))
2138
2139                         # reasonable defaults; this is important as without USE_ORDER,
2140                         # USE will always be "" (nothing set)!
2141                         if "USE_ORDER" not in self:
2142                                 self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
2143
2144                         self["PORTAGE_GID"] = str(portage_gid)
2145                         self.backup_changes("PORTAGE_GID")
2146
2147                         if self.get("PORTAGE_DEPCACHEDIR", None):
2148                                 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
2149                         self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
2150                         self.backup_changes("PORTAGE_DEPCACHEDIR")
2151
2152                         overlays = self.get("PORTDIR_OVERLAY","").split()
2153                         if overlays:
2154                                 new_ov = []
2155                                 for ov in overlays:
2156                                         ov = normalize_path(ov)
2157                                         if os.path.isdir(ov):
2158                                                 new_ov.append(ov)
2159                                         else:
2160                                                 writemsg(_("!!! Invalid PORTDIR_OVERLAY"
2161                                                         " (not a dir): '%s'\n") % ov, noiselevel=-1)
2162                                 self["PORTDIR_OVERLAY"] = " ".join(new_ov)
2163                                 self.backup_changes("PORTDIR_OVERLAY")
2164
2165                         if "CBUILD" not in self and "CHOST" in self:
2166                                 self["CBUILD"] = self["CHOST"]
2167                                 self.backup_changes("CBUILD")
2168
2169                         self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
2170                         self.backup_changes("PORTAGE_BIN_PATH")
2171                         self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
2172                         self.backup_changes("PORTAGE_PYM_PATH")
2173
2174                         for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
2175                                 try:
2176                                         self[var] = str(int(self.get(var, "0")))
2177                                 except ValueError:
2178                                         writemsg(_("!!! %s='%s' is not a valid integer.  "
2179                                                 "Falling back to '0'.\n") % (var, self[var]),
2180                                                 noiselevel=-1)
2181                                         self[var] = "0"
2182                                 self.backup_changes(var)
2183
2184                         # initialize self.features
2185                         self.regenerate()
2186
2187                         if not portage.process.sandbox_capable and \
2188                                 ("sandbox" in self.features or "usersandbox" in self.features):
2189                                 if self.profile_path is not None and \
2190                                         os.path.realpath(self.profile_path) == \
2191                                         os.path.realpath(os.path.join(config_root, PROFILE_PATH)):
2192                                         """ Don't show this warning when running repoman and the
2193                                         sandbox feature came from a profile that doesn't belong to
2194                                         the user."""
2195                                         writemsg(colorize("BAD", _("!!! Problem with sandbox"
2196                                                 " binary. Disabling...\n\n")), noiselevel=-1)
2197                                 if "sandbox" in self.features:
2198                                         self.features.remove("sandbox")
2199                                 if "usersandbox" in self.features:
2200                                         self.features.remove("usersandbox")
2201
2202                         if bsd_chflags:
2203                                 self.features.add('chflags')
2204
2205                         self["FEATURES"] = " ".join(sorted(self.features))
2206                         self.backup_changes("FEATURES")
2207                         global _glep_55_enabled, _validate_cache_for_unsupported_eapis
2208                         if 'parse-eapi-ebuild-head' in self.features:
2209                                 _validate_cache_for_unsupported_eapis = False
2210                         if 'parse-eapi-glep-55' in self.features:
2211                                 _validate_cache_for_unsupported_eapis = False
2212                                 _glep_55_enabled = True
2213
2214                         self._init_dirs()
2215
2216                 for k in self._case_insensitive_vars:
2217                         if k in self:
2218                                 self[k] = self[k].lower()
2219                                 self.backup_changes(k)
2220
2221                 if mycpv:
2222                         self.setcpv(mycpv)
2223
2224         def _init_dirs(self):
2225                 """
2226                 Create a few directories that are critical to portage operation
2227                 """
2228                 if not os.access(self["ROOT"], os.W_OK):
2229                         return
2230
2231                 #                                gid, mode, mask, preserve_perms
2232                 dir_mode_map = {
2233                         "tmp"             : (         -1, 0o1777,  0,  True),
2234                         "var/tmp"         : (         -1, 0o1777,  0,  True),
2235                         PRIVATE_PATH      : (portage_gid, 0o2750, 0o2, False),
2236                         CACHE_PATH        : (portage_gid,  0o755, 0o2, False)
2237                 }
2238
2239                 for mypath, (gid, mode, modemask, preserve_perms) \
2240                         in dir_mode_map.items():
2241                         mydir = os.path.join(self["ROOT"], mypath)
2242                         if preserve_perms and os.path.isdir(mydir):
2243                                 # Only adjust permissions on some directories if
2244                                 # they don't exist yet. This gives freedom to the
2245                                 # user to adjust permissions to suit their taste.
2246                                 continue
2247                         try:
2248                                 portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
2249                         except portage.exception.PortageException as e:
2250                                 writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
2251                                         noiselevel=-1)
2252                                 writemsg("!!! %s\n" % str(e),
2253                                         noiselevel=-1)
2254
2255         def expandLicenseTokens(self, tokens):
2256                 """ Take a token from ACCEPT_LICENSE or package.license and expand it
2257                 if it's a group token (indicated by @) or just return it if it's not a
2258                 group.  If a group is negated then negate all group elements."""
2259                 expanded_tokens = []
2260                 for x in tokens:
2261                         expanded_tokens.extend(self._expandLicenseToken(x, None))
2262                 return expanded_tokens
2263
2264         def _expandLicenseToken(self, token, traversed_groups):
2265                 negate = False
2266                 rValue = []
2267                 if token.startswith("-"):
2268                         negate = True
2269                         license_name = token[1:]
2270                 else:
2271                         license_name = token
2272                 if not license_name.startswith("@"):
2273                         rValue.append(token)
2274                         return rValue
2275                 group_name = license_name[1:]
2276                 if not traversed_groups:
2277                         traversed_groups = set()
2278                 license_group = self._license_groups.get(group_name)
2279                 if group_name in traversed_groups:
2280                         writemsg(_("Circular license group reference"
2281                                 " detected in '%s'\n") % group_name, noiselevel=-1)
2282                         rValue.append("@"+group_name)
2283                 elif license_group:
2284                         traversed_groups.add(group_name)
2285                         for l in license_group:
2286                                 if l.startswith("-"):
2287                                         writemsg(_("Skipping invalid element %s"
2288                                                 " in license group '%s'\n") % (l, group_name),
2289                                                 noiselevel=-1)
2290                                 else:
2291                                         rValue.extend(self._expandLicenseToken(l, traversed_groups))
2292                 else:
2293                         if self._license_groups and \
2294                                 group_name not in self._undef_lic_groups:
2295                                 self._undef_lic_groups.add(group_name)
2296                                 writemsg(_("Undefined license group '%s'\n") % group_name,
2297                                         noiselevel=-1)
2298                         rValue.append("@"+group_name)
2299                 if negate:
2300                         rValue = ["-" + token for token in rValue]
2301                 return rValue
2302
2303         def validate(self):
2304                 """Validate miscellaneous settings and display warnings if necessary.
2305                 (This code was previously in the global scope of portage.py)"""
2306
2307                 groups = self["ACCEPT_KEYWORDS"].split()
2308                 archlist = self.archlist()
2309                 if not archlist:
2310                         writemsg(_("--- 'profiles/arch.list' is empty or "
2311                                 "not available. Empty portage tree?\n"), noiselevel=1)
2312                 else:
2313                         for group in groups:
2314                                 if group not in archlist and \
2315                                         not (group.startswith("-") and group[1:] in archlist) and \
2316                                         group not in ("*", "~*", "**"):
2317                                         writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
2318                                                 noiselevel=-1)
2319
2320                 abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
2321                         PROFILE_PATH)
2322                 if not self.profile_path or (not os.path.islink(abs_profile_path) and \
2323                         not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
2324                         os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
2325                         writemsg(_("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
2326                                 noiselevel=-1)
2327                         writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
2328                         writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
2329
2330                 abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
2331                         USER_VIRTUALS_FILE)
2332                 if os.path.exists(abs_user_virtuals):
2333                         writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
2334                         writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
2335                         writemsg("!!! this new location.\n\n")
2336
2337                 if "fakeroot" in self.features and \
2338                         not portage.process.fakeroot_capable:
2339                         writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
2340                                 "fakeroot binary is not installed.\n"), noiselevel=-1)
2341
2342         def loadVirtuals(self,root):
2343                 """Not currently used by portage."""
2344                 writemsg("DEPRECATED: portage.config.loadVirtuals\n")
2345                 self.getvirtuals(root)
2346
2347         def load_best_module(self,property_string):
2348                 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
2349                 mod = None
2350                 try:
2351                         mod = load_mod(best_mod)
2352                 except ImportError:
2353                         if best_mod.startswith("cache."):
2354                                 best_mod = "portage." + best_mod
2355                                 try:
2356                                         mod = load_mod(best_mod)
2357                                 except ImportError:
2358                                         pass
2359                 if mod is None:
2360                         raise
2361                 return mod
2362
2363         def lock(self):
2364                 self.locked = 1
2365
2366         def unlock(self):
2367                 self.locked = 0
2368
2369         def modifying(self):
2370                 if self.locked:
2371                         raise Exception(_("Configuration is locked."))
2372
2373         def backup_changes(self,key=None):
2374                 self.modifying()
2375                 if key and key in self.configdict["env"]:
2376                         self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
2377                 else:
2378                         raise KeyError(_("No such key defined in environment: %s") % key)
2379
2380         def reset(self,keeping_pkg=0,use_cache=1):
2381                 """
2382                 Restore environment from self.backupenv, call self.regenerate()
2383                 @param keeping_pkg: Should we keep the set_cpv() data or delete it.
2384                 @type keeping_pkg: Boolean
2385                 @param use_cache: Should self.regenerate use the cache or not
2386                 @type use_cache: Boolean
2387                 @rype: None
2388                 """
2389                 self.modifying()
2390                 self.configdict["env"].clear()
2391                 self.configdict["env"].update(self.backupenv)
2392
2393                 self.modifiedkeys = []
2394                 if not keeping_pkg:
2395                         self.mycpv = None
2396                         self.puse = ""
2397                         self.configdict["pkg"].clear()
2398                         self.configdict["pkginternal"].clear()
2399                         self.configdict["defaults"]["USE"] = \
2400                                 " ".join(self.make_defaults_use)
2401                         self.usemask  = set(stack_lists(
2402                                 self.usemask_list, incremental=True))
2403                         self.useforce  = set(stack_lists(
2404                                 self.useforce_list, incremental=True))
2405                 self.regenerate(use_cache=use_cache)
2406
2407         def load_infodir(self,infodir):
2408                 warnings.warn("portage.config.load_infodir() is deprecated",
2409                         DeprecationWarning)
2410                 return 1
2411
2412         class _lazy_vars(object):
2413
2414                 __slots__ = ('built_use', 'settings', 'values')
2415
2416                 def __init__(self, built_use, settings):
2417                         self.built_use = built_use
2418                         self.settings = settings
2419                         self.values = None
2420
2421                 def __getitem__(self, k):
2422                         if self.values is None:
2423                                 self.values = self._init_values()
2424                         return self.values[k]
2425
2426                 def _init_values(self):
2427                         values = {}
2428                         settings = self.settings
2429                         use = self.built_use
2430                         if use is None:
2431                                 use = frozenset(settings['PORTAGE_USE'].split())
2432                         values['ACCEPT_LICENSE'] = self._accept_license(use, settings)
2433                         values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
2434                         return values
2435
2436                 def _accept_license(self, use, settings):
2437                         """
2438                         Generate a pruned version of ACCEPT_LICENSE, by intersection with
2439                         LICENSE. This is required since otherwise ACCEPT_LICENSE might be
2440                         too big (bigger than ARG_MAX), causing execve() calls to fail with
2441                         E2BIG errors as in bug #262647.
2442                         """
2443                         try:
2444                                 licenses = set(flatten(
2445                                         dep.use_reduce(dep.paren_reduce(
2446                                         settings['LICENSE']),
2447                                         uselist=use)))
2448                         except exception.InvalidDependString:
2449                                 licenses = set()
2450                         licenses.discard('||')
2451                         if settings._accept_license:
2452                                 acceptable_licenses = set()
2453                                 for x in settings._accept_license:
2454                                         if x == '*':
2455                                                 acceptable_licenses.update(licenses)
2456                                         elif x == '-*':
2457                                                 acceptable_licenses.clear()
2458                                         elif x[:1] == '-':
2459                                                 acceptable_licenses.discard(x[1:])
2460                                         elif x in licenses:
2461                                                 acceptable_licenses.add(x)
2462
2463                                 licenses = acceptable_licenses
2464                         return ' '.join(sorted(licenses))
2465
2466                 def _restrict(self, use, settings):
2467                         try:
2468                                 restrict = set(flatten(
2469                                         dep.use_reduce(dep.paren_reduce(
2470                                         settings['RESTRICT']),
2471                                         uselist=use)))
2472                         except exception.InvalidDependString:
2473                                 restrict = set()
2474                         return ' '.join(sorted(restrict))
2475
2476         class _lazy_use_expand(object):
2477                 """
2478                 Lazily evaluate USE_EXPAND variables since they are only needed when
2479                 an ebuild shell is spawned. Variables values are made consistent with
2480                 the previously calculated USE settings.
2481                 """
2482
2483                 def __init__(self, use, usemask, iuse_implicit,
2484                         use_expand_split, use_expand_dict):
2485                         self._use = use
2486                         self._usemask = usemask
2487                         self._iuse_implicit = iuse_implicit
2488                         self._use_expand_split = use_expand_split
2489                         self._use_expand_dict = use_expand_dict
2490
2491                 def __getitem__(self, key):
2492                         prefix = key.lower() + '_'
2493                         prefix_len = len(prefix)
2494                         expand_flags = set( x[prefix_len:] for x in self._use \
2495                                 if x[:prefix_len] == prefix )
2496                         var_split = self._use_expand_dict.get(key, '').split()
2497                         # Preserve the order of var_split because it can matter for things
2498                         # like LINGUAS.
2499                         var_split = [ x for x in var_split if x in expand_flags ]
2500                         var_split.extend(expand_flags.difference(var_split))
2501                         has_wildcard = '*' in expand_flags
2502                         if has_wildcard:
2503                                 var_split = [ x for x in var_split if x != "*" ]
2504                         has_iuse = set()
2505                         for x in self._iuse_implicit:
2506                                 if x[:prefix_len] == prefix:
2507                                         has_iuse.add(x[prefix_len:])
2508                         if has_wildcard:
2509                                 # * means to enable everything in IUSE that's not masked
2510                                 if has_iuse:
2511                                         usemask = self._usemask
2512                                         for suffix in has_iuse:
2513                                                 x = prefix + suffix
2514                                                 if x not in usemask:
2515                                                         if suffix not in expand_flags:
2516                                                                 var_split.append(suffix)
2517                                 else:
2518                                         # If there is a wildcard and no matching flags in IUSE then
2519                                         # LINGUAS should be unset so that all .mo files are
2520                                         # installed.
2521                                         var_split = []
2522                         # Make the flags unique and filter them according to IUSE.
2523                         # Also, continue to preserve order for things like LINGUAS
2524                         # and filter any duplicates that variable may contain.
2525                         filtered_var_split = []
2526                         remaining = has_iuse.intersection(var_split)
2527                         for x in var_split:
2528                                 if x in remaining:
2529                                         remaining.remove(x)
2530                                         filtered_var_split.append(x)
2531                         var_split = filtered_var_split
2532
2533                         if var_split:
2534                                 value = ' '.join(var_split)
2535                         else:
2536                                 # Don't export empty USE_EXPAND vars unless the user config
2537                                 # exports them as empty.  This is required for vars such as
2538                                 # LINGUAS, where unset and empty have different meanings.
2539                                 if has_wildcard:
2540                                         # ebuild.sh will see this and unset the variable so
2541                                         # that things like LINGUAS work properly
2542                                         value = '*'
2543                                 else:
2544                                         if has_iuse:
2545                                                 value = ''
2546                                         else:
2547                                                 # It's not in IUSE, so just allow the variable content
2548                                                 # to pass through if it is defined somewhere.  This
2549                                                 # allows packages that support LINGUAS but don't
2550                                                 # declare it in IUSE to use the variable outside of the
2551                                                 # USE_EXPAND context.
2552                                                 value = None
2553
2554                         return value
2555
2556         def setcpv(self, mycpv, use_cache=1, mydb=None):
2557                 """
2558                 Load a particular CPV into the config, this lets us see the
2559                 Default USE flags for a particular ebuild as well as the USE
2560                 flags from package.use.
2561
2562                 @param mycpv: A cpv to load
2563                 @type mycpv: string
2564                 @param use_cache: Enables caching
2565                 @type use_cache: Boolean
2566                 @param mydb: a dbapi instance that supports aux_get with the IUSE key.
2567                 @type mydb: dbapi or derivative.
2568                 @rtype: None
2569                 """
2570
2571                 self.modifying()
2572
2573                 pkg = None
2574                 built_use = None
2575                 if not isinstance(mycpv, basestring):
2576                         pkg = mycpv
2577                         mycpv = pkg.cpv
2578                         mydb = pkg.metadata
2579                         args_hash = (mycpv, id(pkg))
2580                         if pkg.built:
2581                                 built_use = pkg.use.enabled
2582                 else:
2583                         args_hash = (mycpv, id(mydb))
2584
2585                 if args_hash == self._setcpv_args_hash:
2586                         return
2587                 self._setcpv_args_hash = args_hash
2588
2589                 has_changed = False
2590                 self.mycpv = mycpv
2591                 cat, pf = catsplit(mycpv)
2592                 cp = dep_getkey(mycpv)
2593                 cpv_slot = self.mycpv
2594                 pkginternaluse = ""
2595                 iuse = ""
2596                 pkg_configdict = self.configdict["pkg"]
2597                 previous_iuse = pkg_configdict.get("IUSE")
2598
2599                 aux_keys = self._setcpv_aux_keys
2600
2601                 # Discard any existing metadata from the previous package, but
2602                 # preserve things like USE_EXPAND values and PORTAGE_USE which
2603                 # might be reused.
2604                 for k in aux_keys:
2605                         pkg_configdict.pop(k, None)
2606
2607                 pkg_configdict["CATEGORY"] = cat
2608                 pkg_configdict["PF"] = pf
2609                 if mydb:
2610                         if not hasattr(mydb, "aux_get"):
2611                                 for k in aux_keys:
2612                                         if k in mydb:
2613                                                 # Make these lazy, since __getitem__ triggers
2614                                                 # evaluation of USE conditionals which can't
2615                                                 # occur until PORTAGE_USE is calculated below.
2616                                                 pkg_configdict.addLazySingleton(k,
2617                                                         mydb.__getitem__, k)
2618                         else:
2619                                 for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
2620                                         pkg_configdict[k] = v
2621                         repository = pkg_configdict.pop("repository", None)
2622                         if repository is not None:
2623                                 pkg_configdict["PORTAGE_REPO_NAME"] = repository
2624                         slot = pkg_configdict["SLOT"]
2625                         iuse = pkg_configdict["IUSE"]
2626                         if pkg is None:
2627                                 cpv_slot = "%s:%s" % (self.mycpv, slot)
2628                         else:
2629                                 cpv_slot = pkg
2630                         pkginternaluse = []
2631                         for x in iuse.split():
2632                                 if x.startswith("+"):
2633                                         pkginternaluse.append(x[1:])
2634                                 elif x.startswith("-"):
2635                                         pkginternaluse.append(x)
2636                         pkginternaluse = " ".join(pkginternaluse)
2637                 if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
2638                         self.configdict["pkginternal"]["USE"] = pkginternaluse
2639                         has_changed = True
2640
2641                 defaults = []
2642                 pos = 0
2643                 for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
2644                         cpdict = pkgprofileuse_dict.get(cp)
2645                         if cpdict:
2646                                 keys = list(cpdict)
2647                                 while keys:
2648                                         bestmatch = best_match_to_list(cpv_slot, keys)
2649                                         if bestmatch:
2650                                                 keys.remove(bestmatch)
2651                                                 defaults.insert(pos, cpdict[bestmatch])
2652                                         else:
2653                                                 break
2654                                 del keys
2655                         if self.make_defaults_use[i]:
2656                                 defaults.insert(pos, self.make_defaults_use[i])
2657                         pos = len(defaults)
2658                 defaults = " ".join(defaults)
2659                 if defaults != self.configdict["defaults"].get("USE",""):
2660                         self.configdict["defaults"]["USE"] = defaults
2661                         has_changed = True
2662
2663                 useforce = self._getUseForce(cpv_slot)
2664                 if useforce != self.useforce:
2665                         self.useforce = useforce
2666                         has_changed = True
2667
2668                 usemask = self._getUseMask(cpv_slot)
2669                 if usemask != self.usemask:
2670                         self.usemask = usemask
2671                         has_changed = True
2672                 oldpuse = self.puse
2673                 self.puse = ""
2674                 cpdict = self.pusedict.get(cp)
2675                 if cpdict:
2676                         keys = list(cpdict)
2677                         while keys:
2678                                 self.pusekey = best_match_to_list(cpv_slot, keys)
2679                                 if self.pusekey:
2680                                         keys.remove(self.pusekey)
2681                                         self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
2682                                 else:
2683                                         break
2684                         del keys
2685                 if oldpuse != self.puse:
2686                         has_changed = True
2687                 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
2688                 self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
2689
2690                 if has_changed:
2691                         self.reset(keeping_pkg=1,use_cache=use_cache)
2692
2693                 # Ensure that "pkg" values are always preferred over "env" values.
2694                 # This must occur _after_ the above reset() call, since reset()
2695                 # copies values from self.backupenv.
2696                 env_configdict = self.configdict['env']
2697                 for k in pkg_configdict:
2698                         if k != 'USE':
2699                                 env_configdict.pop(k, None)
2700
2701                 lazy_vars = self._lazy_vars(built_use, self)
2702                 env_configdict.addLazySingleton('ACCEPT_LICENSE',
2703                         lazy_vars.__getitem__, 'ACCEPT_LICENSE')
2704                 env_configdict.addLazySingleton('PORTAGE_RESTRICT',
2705                         lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
2706
2707                 # If reset() has not been called, it's safe to return
2708                 # early if IUSE has not changed.
2709                 if not has_changed and previous_iuse == iuse:
2710                         return
2711
2712                 # Filter out USE flags that aren't part of IUSE. This has to
2713                 # be done for every setcpv() call since practically every
2714                 # package has different IUSE.
2715                 use = set(self["USE"].split())
2716                 iuse_implicit = self._get_implicit_iuse()
2717                 iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
2718
2719                 # PORTAGE_IUSE is not always needed so it's lazily evaluated.
2720                 self.configdict["pkg"].addLazySingleton(
2721                         "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
2722
2723                 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
2724                 if ebuild_force_test and \
2725                         not hasattr(self, "_ebuild_force_test_msg_shown"):
2726                                 self._ebuild_force_test_msg_shown = True
2727                                 writemsg(_("Forcing test.\n"), noiselevel=-1)
2728                 if "test" in self.features:
2729                         if "test" in self.usemask and not ebuild_force_test:
2730                                 # "test" is in IUSE and USE=test is masked, so execution
2731                                 # of src_test() probably is not reliable. Therefore,
2732                                 # temporarily disable FEATURES=test just for this package.
2733                                 self["FEATURES"] = " ".join(x for x in self.features \
2734                                         if x != "test")
2735                                 use.discard("test")
2736                         else:
2737                                 use.add("test")
2738                                 if ebuild_force_test:
2739                                         self.usemask.discard("test")
2740
2741                 # Allow _* flags from USE_EXPAND wildcards to pass through here.
2742                 use.difference_update([x for x in use \
2743                         if x not in iuse_implicit and x[-2:] != '_*'])
2744
2745                 # Use the calculated USE flags to regenerate the USE_EXPAND flags so
2746                 # that they are consistent. For optimal performance, use slice
2747                 # comparison instead of startswith().
2748                 use_expand_split = set(x.lower() for \
2749                         x in self.get('USE_EXPAND', '').split())
2750                 lazy_use_expand = self._lazy_use_expand(use, self.usemask,
2751                         iuse_implicit, use_expand_split, self._use_expand_dict)
2752
2753                 use_expand_iuses = {}
2754                 for x in iuse_implicit:
2755                         x_split = x.split('_')
2756                         if len(x_split) == 1:
2757                                 continue
2758                         for i in range(len(x_split) - 1):
2759                                 k = '_'.join(x_split[:i+1])
2760                                 if k in use_expand_split:
2761                                         v = use_expand_iuses.get(k)
2762                                         if v is None:
2763                                                 v = set()
2764                                                 use_expand_iuses[k] = v
2765                                         v.add(x)
2766                                         break
2767
2768                 # If it's not in IUSE, variable content is allowed
2769                 # to pass through if it is defined somewhere.  This
2770                 # allows packages that support LINGUAS but don't
2771                 # declare it in IUSE to use the variable outside of the
2772                 # USE_EXPAND context.
2773                 for k, use_expand_iuse in use_expand_iuses.items():
2774                         if k + '_*' in use:
2775                                 use.update( x for x in use_expand_iuse if x not in usemask )
2776                         k = k.upper()
2777                         self.configdict['env'].addLazySingleton(k,
2778                                 lazy_use_expand.__getitem__, k)
2779
2780                 # Filtered for the ebuild environment. Store this in a separate
2781                 # attribute since we still want to be able to see global USE
2782                 # settings for things like emerge --info.
2783
2784                 self.configdict["pkg"]["PORTAGE_USE"] = \
2785                         " ".join(sorted(x for x in use if x[-2:] != '_*'))
2786
2787         def _get_implicit_iuse(self):
2788                 """
2789                 Some flags are considered to
2790                 be implicit members of IUSE:
2791                   * Flags derived from ARCH
2792                   * Flags derived from USE_EXPAND_HIDDEN variables
2793                   * Masked flags, such as those from {,package}use.mask
2794                   * Forced flags, such as those from {,package}use.force
2795                   * build and bootstrap flags used by bootstrap.sh
2796                 """
2797                 iuse_implicit = set()
2798                 # Flags derived from ARCH.
2799                 arch = self.configdict["defaults"].get("ARCH")
2800                 if arch:
2801                         iuse_implicit.add(arch)
2802                 iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2803
2804                 # Flags derived from USE_EXPAND_HIDDEN variables
2805                 # such as ELIBC, KERNEL, and USERLAND.
2806                 use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2807                 for x in use_expand_hidden:
2808                         iuse_implicit.add(x.lower() + "_.*")
2809
2810                 # Flags that have been masked or forced.
2811                 iuse_implicit.update(self.usemask)
2812                 iuse_implicit.update(self.useforce)
2813
2814                 # build and bootstrap flags used by bootstrap.sh
2815                 iuse_implicit.add("build")
2816                 iuse_implicit.add("bootstrap")
2817
2818                 # Controlled by FEATURES=test. Make this implicit, so handling
2819                 # of FEATURES=test is consistent regardless of explicit IUSE.
2820                 # Users may use use.mask/package.use.mask to control
2821                 # FEATURES=test for all ebuilds, regardless of explicit IUSE.
2822                 iuse_implicit.add("test")
2823
2824                 return iuse_implicit
2825
2826         def _getUseMask(self, pkg):
2827                 cp = getattr(pkg, "cp", None)
2828                 if cp is None:
2829                         cp = dep_getkey(pkg)
2830                 usemask = []
2831                 pos = 0
2832                 for i, pusemask_dict in enumerate(self.pusemask_list):
2833                         cpdict = pusemask_dict.get(cp)
2834                         if cpdict:
2835                                 keys = list(cpdict)
2836                                 while keys:
2837                                         best_match = best_match_to_list(pkg, keys)
2838                                         if best_match:
2839                                                 keys.remove(best_match)
2840                                                 usemask.insert(pos, cpdict[best_match])
2841                                         else:
2842                                                 break
2843                                 del keys
2844                         if self.usemask_list[i]:
2845                                 usemask.insert(pos, self.usemask_list[i])
2846                         pos = len(usemask)
2847                 return set(stack_lists(usemask, incremental=True))
2848
2849         def _getUseForce(self, pkg):
2850                 cp = getattr(pkg, "cp", None)
2851                 if cp is None:
2852                         cp = dep_getkey(pkg)
2853                 useforce = []
2854                 pos = 0
2855                 for i, puseforce_dict in enumerate(self.puseforce_list):
2856                         cpdict = puseforce_dict.get(cp)
2857                         if cpdict:
2858                                 keys = list(cpdict)
2859                                 while keys:
2860                                         best_match = best_match_to_list(pkg, keys)
2861                                         if best_match:
2862                                                 keys.remove(best_match)
2863                                                 useforce.insert(pos, cpdict[best_match])
2864                                         else:
2865                                                 break
2866                                 del keys
2867                         if self.useforce_list[i]:
2868                                 useforce.insert(pos, self.useforce_list[i])
2869                         pos = len(useforce)
2870                 return set(stack_lists(useforce, incremental=True))
2871
2872         def _getMaskAtom(self, cpv, metadata):
2873                 """
2874                 Take a package and return a matching package.mask atom, or None if no
2875                 such atom exists or it has been cancelled by package.unmask. PROVIDE
2876                 is not checked, so atoms will not be found for old-style virtuals.
2877
2878                 @param cpv: The package name
2879                 @type cpv: String
2880                 @param metadata: A dictionary of raw package metadata
2881                 @type metadata: dict
2882                 @rtype: String
2883                 @return: An matching atom string or None if one is not found.
2884                 """
2885
2886                 cp = cpv_getkey(cpv)
2887                 mask_atoms = self.pmaskdict.get(cp)
2888                 if mask_atoms:
2889                         pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2890                         unmask_atoms = self.punmaskdict.get(cp)
2891                         for x in mask_atoms:
2892                                 if not match_from_list(x, pkg_list):
2893                                         continue
2894                                 if unmask_atoms:
2895                                         for y in unmask_atoms:
2896                                                 if match_from_list(y, pkg_list):
2897                                                         return None
2898                                 return x
2899                 return None
2900
2901         def _getProfileMaskAtom(self, cpv, metadata):
2902                 """
2903                 Take a package and return a matching profile atom, or None if no
2904                 such atom exists. Note that a profile atom may or may not have a "*"
2905                 prefix. PROVIDE is not checked, so atoms will not be found for
2906                 old-style virtuals.
2907
2908                 @param cpv: The package name
2909                 @type cpv: String
2910                 @param metadata: A dictionary of raw package metadata
2911                 @type metadata: dict
2912                 @rtype: String
2913                 @return: An matching profile atom string or None if one is not found.
2914                 """
2915
2916                 cp = cpv_getkey(cpv)
2917                 profile_atoms = self.prevmaskdict.get(cp)
2918                 if profile_atoms:
2919                         pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2920                         for x in profile_atoms:
2921                                 if match_from_list(x, pkg_list):
2922                                         continue
2923                                 return x
2924                 return None
2925
2926         def _getKeywords(self, cpv, metadata):
2927                 cp = cpv_getkey(cpv)
2928                 pkg = "%s:%s" % (cpv, metadata["SLOT"])
2929                 keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2930                 pos = len(keywords)
2931                 for pkeywords_dict in self._pkeywords_list:
2932                         cpdict = pkeywords_dict.get(cp)
2933                         if cpdict:
2934                                 keys = list(cpdict)
2935                                 while keys:
2936                                         best_match = best_match_to_list(pkg, keys)
2937                                         if best_match:
2938                                                 keys.remove(best_match)
2939                                                 keywords.insert(pos, cpdict[best_match])
2940                                         else:
2941                                                 break
2942                         pos = len(keywords)
2943                 return stack_lists(keywords, incremental=True)
2944
2945         def _getMissingKeywords(self, cpv, metadata):
2946                 """
2947                 Take a package and return a list of any KEYWORDS that the user may
2948                 may need to accept for the given package. If the KEYWORDS are empty
2949                 and the the ** keyword has not been accepted, the returned list will
2950                 contain ** alone (in order to distiguish from the case of "none
2951                 missing").
2952
2953                 @param cpv: The package name (for package.keywords support)
2954                 @type cpv: String
2955                 @param metadata: A dictionary of raw package metadata
2956                 @type metadata: dict
2957                 @rtype: List
2958                 @return: A list of KEYWORDS that have not been accepted.
2959                 """
2960
2961                 # Hack: Need to check the env directly here as otherwise stacking 
2962                 # doesn't work properly as negative values are lost in the config
2963                 # object (bug #139600)
2964                 egroups = self.configdict["backupenv"].get(
2965                         "ACCEPT_KEYWORDS", "").split()
2966                 mygroups = self._getKeywords(cpv, metadata)
2967                 # Repoman may modify this attribute as necessary.
2968                 pgroups = self["ACCEPT_KEYWORDS"].split()
2969                 match=0
2970                 cp = cpv_getkey(cpv)
2971                 pkgdict = self.pkeywordsdict.get(cp)
2972                 matches = False
2973                 if pkgdict:
2974                         cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2975                         for atom, pkgkeywords in pkgdict.items():
2976                                 if match_from_list(atom, cpv_slot_list):
2977                                         matches = True
2978                                         pgroups.extend(pkgkeywords)
2979                 if matches or egroups:
2980                         pgroups.extend(egroups)
2981                         inc_pgroups = set()
2982                         for x in pgroups:
2983                                 if x.startswith("-"):
2984                                         if x == "-*":
2985                                                 inc_pgroups.clear()
2986                                         else:
2987                                                 inc_pgroups.discard(x[1:])
2988                                 else:
2989                                         inc_pgroups.add(x)
2990                         pgroups = inc_pgroups
2991                         del inc_pgroups
2992                 hasstable = False
2993                 hastesting = False
2994                 for gp in mygroups:
2995                         if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2996                                 writemsg(_("--- WARNING: Package '%(cpv)s' uses"
2997                                         " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, noiselevel=-1)
2998                                 if gp == "*":
2999                                         match = 1
3000                                         break
3001                         elif gp in pgroups:
3002                                 match=1
3003                                 break
3004                         elif gp.startswith("~"):
3005                                 hastesting = True
3006                         elif not gp.startswith("-"):
3007                                 hasstable = True
3008                 if not match and \
3009                         ((hastesting and "~*" in pgroups) or \
3010                         (hasstable and "*" in pgroups) or "**" in pgroups):
3011                         match=1
3012                 if match:
3013                         missing = []
3014                 else:
3015                         if not mygroups:
3016                                 # If KEYWORDS is empty then we still have to return something
3017                                 # in order to distiguish from the case of "none missing".
3018                                 mygroups.append("**")
3019                         missing = mygroups
3020                 return missing
3021
3022         def _getMissingLicenses(self, cpv, metadata):
3023                 """
3024                 Take a LICENSE string and return a list any licenses that the user may
3025                 may need to accept for the given package.  The returned list will not
3026                 contain any licenses that have already been accepted.  This method
3027                 can throw an InvalidDependString exception.
3028
3029                 @param cpv: The package name (for package.license support)
3030                 @type cpv: String
3031                 @param metadata: A dictionary of raw package metadata
3032                 @type metadata: dict
3033                 @rtype: List
3034                 @return: A list of licenses that have not been accepted.
3035                 """
3036                 accept_license = self._accept_license
3037                 cpdict = self._plicensedict.get(dep_getkey(cpv), None)
3038                 if cpdict:
3039                         accept_license = list(self._accept_license)
3040                         cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3041                         for atom in match_to_list(cpv_slot, list(cpdict)):
3042                                 accept_license.extend(cpdict[atom])
3043
3044                 licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
3045                         metadata["LICENSE"]), matchall=1)))
3046                 licenses.discard('||')
3047
3048                 acceptable_licenses = set()
3049                 for x in accept_license:
3050                         if x == '*':
3051                                 acceptable_licenses.update(licenses)
3052                         elif x == '-*':
3053                                 acceptable_licenses.clear()
3054                         elif x[:1] == '-':
3055                                 acceptable_licenses.discard(x[1:])
3056                         else:
3057                                 acceptable_licenses.add(x)
3058
3059                 license_str = metadata["LICENSE"]
3060                 if "?" in license_str:
3061                         use = metadata["USE"].split()
3062                 else:
3063                         use = []
3064
3065                 license_struct = portage.dep.use_reduce(
3066                         portage.dep.paren_reduce(license_str), uselist=use)
3067                 license_struct = portage.dep.dep_opconvert(license_struct)
3068                 return self._getMaskedLicenses(license_struct, acceptable_licenses)
3069
3070         def _getMaskedLicenses(self, license_struct, acceptable_licenses):
3071                 if not license_struct:
3072                         return []
3073                 if license_struct[0] == "||":
3074                         ret = []
3075                         for element in license_struct[1:]:
3076                                 if isinstance(element, list):
3077                                         if element:
3078                                                 ret.append(self._getMaskedLicenses(
3079                                                         element, acceptable_licenses))
3080                                                 if not ret[-1]:
3081                                                         return []
3082                                 else:
3083                                         if element in acceptable_licenses:
3084                                                 return []
3085                                         ret.append(element)
3086                         # Return all masked licenses, since we don't know which combination
3087                         # (if any) the user will decide to unmask.
3088                         return flatten(ret)
3089
3090                 ret = []
3091                 for element in license_struct:
3092                         if isinstance(element, list):
3093                                 if element:
3094                                         ret.extend(self._getMaskedLicenses(element,
3095                                                 acceptable_licenses))
3096                         else:
3097                                 if element not in acceptable_licenses:
3098                                         ret.append(element)
3099                 return ret
3100
3101         def _getMissingProperties(self, cpv, metadata):
3102                 """
3103                 Take a PROPERTIES string and return a list of any properties the user may
3104                 may need to accept for the given package.  The returned list will not
3105                 contain any properties that have already been accepted.  This method
3106                 can throw an InvalidDependString exception.
3107
3108                 @param cpv: The package name (for package.properties support)
3109                 @type cpv: String
3110                 @param metadata: A dictionary of raw package metadata
3111                 @type metadata: dict
3112                 @rtype: List
3113                 @return: A list of properties that have not been accepted.
3114                 """
3115                 accept_properties = self._accept_properties
3116                 cpdict = self._ppropertiesdict.get(dep_getkey(cpv), None)
3117                 if cpdict:
3118                         accept_properties = list(self._accept_properties)
3119                         cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
3120                         for atom in match_to_list(cpv_slot, list(cpdict)):
3121                                 accept_properties.extend(cpdict[atom])
3122
3123                 properties = set(flatten(dep.use_reduce(dep.paren_reduce(
3124                         metadata["PROPERTIES"]), matchall=1)))
3125                 properties.discard('||')
3126
3127                 acceptable_properties = set()
3128                 for x in accept_properties:
3129                         if x == '*':
3130                                 acceptable_properties.update(properties)
3131                         elif x == '-*':
3132                                 acceptable_properties.clear()
3133                         elif x[:1] == '-':
3134                                 acceptable_properties.discard(x[1:])
3135                         else:
3136                                 acceptable_properties.add(x)
3137
3138                 properties_str = metadata["PROPERTIES"]
3139                 if "?" in properties_str:
3140                         use = metadata["USE"].split()
3141                 else:
3142                         use = []
3143
3144                 properties_struct = portage.dep.use_reduce(
3145                         portage.dep.paren_reduce(properties_str), uselist=use)
3146                 properties_struct = portage.dep.dep_opconvert(properties_struct)
3147                 return self._getMaskedProperties(properties_struct, acceptable_properties)
3148
3149         def _getMaskedProperties(self, properties_struct, acceptable_properties):
3150                 if not properties_struct:
3151                         return []
3152                 if properties_struct[0] == "||":
3153                         ret = []
3154                         for element in properties_struct[1:]:
3155                                 if isinstance(element, list):
3156                                         if element:
3157                                                 ret.append(self._getMaskedProperties(
3158                                                         element, acceptable_properties))
3159                                                 if not ret[-1]:
3160                                                         return []
3161                                 else:
3162                                         if element in acceptable_properties:
3163                                                 return[]
3164                                         ret.append(element)
3165                         # Return all masked properties, since we don't know which combination
3166                         # (if any) the user will decide to unmask
3167                         return flatten(ret)
3168
3169                 ret = []
3170                 for element in properties_struct:
3171                         if isinstance(element, list):
3172                                 if element:
3173                                         ret.extend(self._getMaskedProperties(element,
3174                                                 acceptable_properties))
3175                         else:
3176                                 if element not in acceptable_properties:
3177                                         ret.append(element)
3178                 return ret
3179
3180         def _accept_chost(self, cpv, metadata):
3181                 """
3182                 @return True if pkg CHOST is accepted, False otherwise.
3183                 """
3184                 if self._accept_chost_re is None:
3185                         accept_chost = self.get("ACCEPT_CHOSTS", "").split()
3186                         if not accept_chost:
3187                                 chost = self.get("CHOST")
3188                                 if chost:
3189                                         accept_chost.append(chost)
3190                         if not accept_chost:
3191                                 self._accept_chost_re = re.compile(".*")
3192                         elif len(accept_chost) == 1:
3193                                 try:
3194                                         self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
3195                                 except re.error as e:
3196                                         writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3197                                                 (accept_chost[0], e), noiselevel=-1)
3198                                         self._accept_chost_re = re.compile("^$")
3199                         else:
3200                                 try:
3201                                         self._accept_chost_re = re.compile(
3202                                                 r'^(%s)$' % "|".join(accept_chost))
3203                                 except re.error as e:
3204                                         writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
3205                                                 (" ".join(accept_chost), e), noiselevel=-1)
3206                                         self._accept_chost_re = re.compile("^$")
3207
3208                 return self._accept_chost_re.match(
3209                         metadata.get('CHOST', '')) is not None
3210
3211         def setinst(self,mycpv,mydbapi):
3212                 """This updates the preferences for old-style virtuals,
3213                 affecting the behavior of dep_expand() and dep_check()
3214                 calls. It can change dbapi.match() behavior since that
3215                 calls dep_expand(). However, dbapi instances have
3216                 internal match caches that are not invalidated when
3217                 preferences are updated here. This can potentially
3218                 lead to some inconsistency (relevant to bug #1343)."""
3219                 self.modifying()
3220                 if len(self.virtuals) == 0:
3221                         self.getvirtuals()
3222                 # Grab the virtuals this package provides and add them into the tree virtuals.
3223                 if not hasattr(mydbapi, "aux_get"):
3224                         provides = mydbapi["PROVIDE"]
3225                 else:
3226                         provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
3227                 if not provides:
3228                         return
3229                 if isinstance(mydbapi, portdbapi):
3230                         self.setcpv(mycpv, mydb=mydbapi)
3231                         myuse = self["PORTAGE_USE"]
3232                 elif not hasattr(mydbapi, "aux_get"):
3233                         myuse = mydbapi["USE"]
3234                 else:
3235                         myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
3236                 virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
3237
3238                 modified = False
3239                 cp = dep.Atom(cpv_getkey(mycpv))
3240                 for virt in virts:
3241                         virt = dep_getkey(virt)
3242                         providers = self.virtuals.get(virt)
3243                         if providers and cp in providers:
3244                                 continue
3245                         providers = self._depgraphVirtuals.get(virt)
3246                         if providers is None:
3247                                 providers = []
3248                                 self._depgraphVirtuals[virt] = providers
3249                         if cp not in providers:
3250                                 providers.append(cp)
3251                                 modified = True
3252
3253                 if modified:
3254                         self.virtuals = self.__getvirtuals_compile()
3255
3256         def reload(self):
3257                 """Reload things like /etc/profile.env that can change during runtime."""
3258                 env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
3259                 self.configdict["env.d"].clear()
3260                 env_d = getconfig(env_d_filename, expand=False)
3261                 if env_d:
3262                         # env_d will be None if profile.env doesn't exist.
3263                         self.configdict["env.d"].update(env_d)
3264
3265         def _prune_incremental(self, split):
3266                 """
3267                 Prune off any parts of an incremental variable that are
3268                 made irrelevant by the latest occuring * or -*. This
3269                 could be more aggressive but that might be confusing
3270                 and the point is just to reduce noise a bit.
3271                 """
3272                 for i, x in enumerate(reversed(split)):
3273                         if x == '*':
3274                                 split = split[-i-1:]
3275                                 break
3276                         elif x == '-*':
3277                                 if i == 0:
3278                                         split = []
3279                                 else:
3280                                         split = split[-i:]
3281                                 break
3282                 return split
3283
3284         def regenerate(self,useonly=0,use_cache=1):
3285                 """
3286                 Regenerate settings
3287                 This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
3288                 re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
3289                 variables.  This also updates the env.d configdict; useful in case an ebuild
3290                 changes the environment.
3291
3292                 If FEATURES has already stacked, it is not stacked twice.
3293
3294                 @param useonly: Only regenerate USE flags (not any other incrementals)
3295                 @type useonly: Boolean
3296                 @param use_cache: Enable Caching (only for autouse)
3297                 @type use_cache: Boolean
3298                 @rtype: None
3299                 """
3300
3301                 self.modifying()
3302                 if self.already_in_regenerate:
3303                         # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
3304                         writemsg("!!! Looping in regenerate.\n",1)
3305                         return
3306                 else:
3307                         self.already_in_regenerate = 1
3308
3309                 if useonly:
3310                         myincrementals=["USE"]
3311                 else:
3312                         myincrementals = self.incrementals
3313                 myincrementals = set(myincrementals)
3314                 # If self.features exists, it has already been stacked and may have
3315                 # been mutated, so don't stack it again or else any mutations will be
3316                 # reverted.
3317                 if "FEATURES" in myincrementals and hasattr(self, "features"):
3318                         myincrementals.remove("FEATURES")
3319
3320                 if "USE" in myincrementals:
3321                         # Process USE last because it depends on USE_EXPAND which is also
3322                         # an incremental!
3323                         myincrementals.remove("USE")
3324
3325                 mydbs = self.configlist[:-1]
3326                 mydbs.append(self.backupenv)
3327
3328                 # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
3329                 # used to match all licenses without every having to explicitly expand
3330                 # it to all licenses.
3331                 if self.local_config:
3332                         mysplit = []
3333                         for curdb in mydbs:
3334                                 mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
3335                         mysplit = self._prune_incremental(mysplit)
3336                         accept_license_str = ' '.join(mysplit)
3337                         self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
3338                         if accept_license_str != self._accept_license_str:
3339                                 self._accept_license_str = accept_license_str
3340                                 self._accept_license = tuple(self.expandLicenseTokens(mysplit))
3341                 else:
3342                         # repoman will accept any license
3343                         self._accept_license = ('*',)
3344
3345                 # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
3346                 if self.local_config:
3347                         mysplit = []
3348                         for curdb in mydbs:
3349                                 mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
3350                         mysplit = self._prune_incremental(mysplit)
3351                         self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
3352                         if tuple(mysplit) != self._accept_properties:
3353                                 self._accept_properties = tuple(mysplit)
3354                 else:
3355                         # repoman will accept any property
3356                         self._accept_properties = ('*',)
3357
3358                 for mykey in myincrementals:
3359
3360                         myflags=[]
3361                         for curdb in mydbs:
3362                                 if mykey not in curdb:
3363                                         continue
3364                                 #variables are already expanded
3365                                 mysplit = curdb[mykey].split()
3366
3367                                 for x in mysplit:
3368                                         if x=="-*":
3369                                                 # "-*" is a special "minus" var that means "unset all settings".
3370                                                 # so USE="-* gnome" will have *just* gnome enabled.
3371                                                 myflags = []
3372                                                 continue
3373
3374                                         if x[0]=="+":
3375                                                 # Not legal. People assume too much. Complain.
3376                                                 writemsg(colorize("BAD",
3377                                                         _("USE flags should not start with a '+': %s") % x) \
3378                                                         + "\n", noiselevel=-1)
3379                                                 x=x[1:]
3380                                                 if not x:
3381                                                         continue
3382
3383                                         if (x[0]=="-"):
3384                                                 if (x[1:] in myflags):
3385                                                         # Unset/Remove it.
3386                                                         del myflags[myflags.index(x[1:])]
3387                                                 continue
3388
3389                                         # We got here, so add it now.
3390                                         if x not in myflags:
3391                                                 myflags.append(x)
3392
3393                         myflags.sort()
3394                         #store setting in last element of configlist, the original environment:
3395                         if myflags or mykey in self:
3396                                 self.configlist[-1][mykey] = " ".join(myflags)
3397                         del myflags
3398
3399                 # Do the USE calculation last because it depends on USE_EXPAND.
3400                 if "auto" in self["USE_ORDER"].split(":"):
3401                         self.configdict["auto"]["USE"] = autouse(
3402                                 vartree(root=self["ROOT"], categories=self.categories,
3403                                         settings=self),
3404                                 use_cache=use_cache, mysettings=self)
3405                 else:
3406                         self.configdict["auto"]["USE"] = ""
3407
3408                 use_expand = self.get("USE_EXPAND", "").split()
3409                 use_expand_dict = self._use_expand_dict
3410                 use_expand_dict.clear()
3411                 for k in use_expand:
3412                         v = self.get(k)
3413                         if v is not None:
3414                                 use_expand_dict[k] = v
3415
3416                 if not self.uvlist:
3417                         for x in self["USE_ORDER"].split(":"):
3418                                 if x in self.configdict:
3419                                         self.uvlist.append(self.configdict[x])
3420                         self.uvlist.reverse()
3421
3422                 # For optimal performance, use slice
3423                 # comparison instead of startswith().
3424                 myflags = set()
3425                 for curdb in self.uvlist:
3426                         cur_use_expand = [x for x in use_expand if x in curdb]
3427                         mysplit = curdb.get("USE", "").split()
3428                         if not mysplit and not cur_use_expand:
3429                                 continue
3430                         for x in mysplit:
3431                                 if x == "-*":
3432                                         myflags.clear()
3433                                         continue
3434
3435                                 if x[0] == "+":
3436                                         writemsg(colorize("BAD", _("USE flags should not start "
3437                                                 "with a '+': %s\n") % x), noiselevel=-1)
3438                                         x = x[1:]
3439                                         if not x:
3440                                                 continue
3441
3442                                 if x[0] == "-":
3443                                         myflags.discard(x[1:])
3444                                         continue
3445
3446                                 myflags.add(x)
3447
3448                         for var in cur_use_expand:
3449                                 var_lower = var.lower()
3450                                 is_not_incremental = var not in myincrementals
3451                                 if is_not_incremental:
3452                                         prefix = var_lower + "_"
3453                                         prefix_len = len(prefix)
3454                                         for x in list(myflags):
3455                                                 if x[:prefix_len] == prefix:
3456                                                         myflags.remove(x)
3457                                 for x in curdb[var].split():
3458                                         if x[0] == "+":
3459                                                 if is_not_incremental:
3460                                                         writemsg(colorize("BAD", _("Invalid '+' "
3461                                                                 "operator in non-incremental variable "
3462                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3463                                                         continue
3464                                                 else:
3465                                                         writemsg(colorize("BAD", _("Invalid '+' "
3466                                                                 "operator in incremental variable "
3467                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3468                                                 x = x[1:]
3469                                         if x[0] == "-":
3470                                                 if is_not_incremental:
3471                                                         writemsg(colorize("BAD", _("Invalid '-' "
3472                                                                 "operator in non-incremental variable "
3473                                                                  "'%s': '%s'\n") % (var, x)), noiselevel=-1)
3474                                                         continue
3475                                                 myflags.discard(var_lower + "_" + x[1:])
3476                                                 continue
3477                                         myflags.add(var_lower + "_" + x)
3478
3479                 if hasattr(self, "features"):
3480                         self.features.clear()
3481                 else:
3482                         self.features = set()
3483                 self.features.update(self.configlist[-1].get('FEATURES', '').split())
3484                 self['FEATURES'] = ' '.join(sorted(self.features))
3485
3486                 myflags.update(self.useforce)
3487                 arch = self.configdict["defaults"].get("ARCH")
3488                 if arch:
3489                         myflags.add(arch)
3490
3491                 myflags.difference_update(self.usemask)
3492                 self.configlist[-1]["USE"]= " ".join(sorted(myflags))
3493
3494                 self.already_in_regenerate = 0
3495
3496         def get_virts_p(self, myroot=None):
3497                 if self.virts_p:
3498                         return self.virts_p
3499                 virts = self.getvirtuals()
3500                 if virts:
3501                         for x in virts:
3502                                 vkeysplit = x.split("/")
3503                                 if vkeysplit[1] not in self.virts_p:
3504                                         self.virts_p[vkeysplit[1]] = virts[x]
3505                 return self.virts_p
3506
3507         def getvirtuals(self, myroot=None):
3508                 """myroot is now ignored because, due to caching, it has always been
3509                 broken for all but the first call."""
3510                 myroot = self["ROOT"]
3511                 if self.virtuals:
3512                         return self.virtuals
3513
3514                 virtuals_list = []
3515                 for x in self.profiles:
3516                         virtuals_file = os.path.join(x, "virtuals")
3517                         virtuals_dict = grabdict(virtuals_file)
3518                         atoms_dict = {}
3519                         for k, v in virtuals_dict.items():
3520                                 try:
3521                                         virt_atom = portage.dep.Atom(k)
3522                                 except portage.exception.InvalidAtom:
3523                                         virt_atom = None
3524                                 else:
3525                                         if virt_atom.blocker or \
3526                                                 str(virt_atom) != str(virt_atom.cp):
3527                                                 virt_atom = None
3528                                 if virt_atom is None:
3529                                         writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
3530                                                 (virtuals_file, k), noiselevel=-1)
3531                                         continue
3532                                 providers = []
3533                                 for atom in v:
3534                                         atom_orig = atom
3535                                         if atom[:1] == '-':
3536                                                 # allow incrementals
3537                                                 atom = atom[1:]
3538                                         try:
3539                                                 atom = portage.dep.Atom(atom)
3540                                         except portage.exception.InvalidAtom:
3541                                                 atom = None
3542                                         else:
3543                                                 if atom.blocker:
3544                                                         atom = None
3545                                         if atom is None:
3546                                                 writemsg(_("--- Invalid atom in %s: %s\n") % \
3547                                                         (virtuals_file, myatom), noiselevel=-1)
3548                                         else:
3549                                                 if atom_orig == str(atom):
3550                                                         # normal atom, so return as Atom instance
3551                                                         providers.append(atom)
3552                                                 else:
3553                                                         # atom has special prefix, so return as string
3554                                                         providers.append(atom_orig)
3555                                 if providers:
3556                                         atoms_dict[virt_atom] = providers
3557                         if atoms_dict:
3558                                 virtuals_list.append(atoms_dict)
3559
3560                 self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
3561                 del virtuals_list
3562
3563                 for virt in self.dirVirtuals:
3564                         # Preference for virtuals decreases from left to right.
3565                         self.dirVirtuals[virt].reverse()
3566
3567                 # Repoman does not use user or tree virtuals.
3568                 if self.local_config and not self.treeVirtuals:
3569                         temp_vartree = vartree(myroot, None,
3570                                 categories=self.categories, settings=self)
3571                         self._populate_treeVirtuals(temp_vartree)
3572
3573                 self.virtuals = self.__getvirtuals_compile()
3574                 return self.virtuals
3575
3576         def _populate_treeVirtuals(self, vartree):
3577                 """Reduce the provides into a list by CP."""
3578                 for provide, cpv_list in vartree.get_all_provides().items():
3579                         try:
3580                                 provide = dep.Atom(provide)
3581                         except exception.InvalidAtom:
3582                                 continue
3583                         self.treeVirtuals[provide.cp] = \
3584                                 [dep.Atom(cpv_getkey(cpv)) for cpv in cpv_list]
3585
3586         def __getvirtuals_compile(self):
3587                 """Stack installed and profile virtuals.  Preference for virtuals
3588                 decreases from left to right.
3589                 Order of preference:
3590                 1. installed and in profile
3591                 2. installed only
3592                 3. profile only
3593                 """
3594
3595                 # Virtuals by profile+tree preferences.
3596                 ptVirtuals   = {}
3597
3598                 for virt, installed_list in self.treeVirtuals.items():
3599                         profile_list = self.dirVirtuals.get(virt, None)
3600                         if not profile_list:
3601                                 continue
3602                         for cp in installed_list:
3603                                 if cp in profile_list:
3604                                         ptVirtuals.setdefault(virt, [])
3605                                         ptVirtuals[virt].append(cp)
3606
3607                 virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
3608                         self.dirVirtuals, self._depgraphVirtuals])
3609                 return virtuals
3610
3611         def __delitem__(self,mykey):
3612                 self.modifying()
3613                 for x in self.lookuplist:
3614                         if x != None:
3615                                 if mykey in x:
3616                                         del x[mykey]
3617
3618         def __getitem__(self,mykey):
3619                 for d in self.lookuplist:
3620                         if mykey in d:
3621                                 return d[mykey]
3622                 return '' # for backward compat, don't raise KeyError
3623
3624         def get(self, k, x=None):
3625                 for d in self.lookuplist:
3626                         if k in d:
3627                                 return d[k]
3628                 return x
3629
3630         def pop(self, key, *args):
3631                 if len(args) > 1:
3632                         raise TypeError(
3633                                 "pop expected at most 2 arguments, got " + \
3634                                 repr(1 + len(args)))
3635                 v = self
3636                 for d in reversed(self.lookuplist):
3637                         v = d.pop(key, v)
3638                 if v is self:
3639                         if args:
3640                                 return args[0]
3641                         raise KeyError(key)
3642                 return v
3643
3644         def has_key(self,mykey):
3645                 warnings.warn("portage.config.has_key() is deprecated, "
3646                         "use the in operator instead",
3647                         DeprecationWarning)
3648                 return mykey in self
3649
3650         def __contains__(self, mykey):
3651                 """Called to implement membership test operators (in and not in)."""
3652                 for d in self.lookuplist:
3653                         if mykey in d:
3654                                 return True
3655                 return False
3656
3657         def setdefault(self, k, x=None):
3658                 v = self.get(k)
3659                 if v is not None:
3660                         return v
3661                 else:
3662                         self[k] = x
3663                         return x
3664
3665         def keys(self):
3666                 return list(self)
3667
3668         def __iter__(self):
3669                 keys = set()
3670                 for d in self.lookuplist:
3671                         keys.update(d)
3672                 return iter(keys)
3673
3674         def iterkeys(self):
3675                 return iter(self)
3676
3677         def iteritems(self):
3678                 for k in self:
3679                         yield (k, self[k])
3680
3681         def items(self):
3682                 return list(self.iteritems())
3683
3684         def __setitem__(self,mykey,myvalue):
3685                 "set a value; will be thrown away at reset() time"
3686                 if not isinstance(myvalue, basestring):
3687                         raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
3688
3689                 # Avoid potential UnicodeDecodeError exceptions later.
3690                 mykey = _unicode_decode(mykey)
3691                 myvalue = _unicode_decode(myvalue)
3692
3693                 self.modifying()
3694                 self.modifiedkeys.append(mykey)
3695                 self.configdict["env"][mykey]=myvalue
3696
3697         def environ(self):
3698                 "return our locally-maintained environment"
3699                 mydict={}
3700                 environ_filter = self._environ_filter
3701
3702                 eapi = self.get('EAPI')
3703                 phase = self.get('EBUILD_PHASE')
3704                 filter_calling_env = False
3705                 if phase not in ('clean', 'cleanrm', 'depend'):
3706                         temp_dir = self.get('T')
3707                         if temp_dir is not None and \
3708                                 os.path.exists(os.path.join(temp_dir, 'environment')):
3709                                 filter_calling_env = True
3710
3711                 environ_whitelist = self._environ_whitelist
3712                 env_d = self.configdict["env.d"]
3713                 for x in self:
3714                         if x in environ_filter:
3715                                 continue
3716                         myvalue = self[x]
3717                         if not isinstance(myvalue, basestring):
3718                                 writemsg(_("!!! Non-string value in config: %s=%s\n") % \
3719                                         (x, myvalue), noiselevel=-1)
3720                                 continue
3721                         if filter_calling_env and \
3722                                 x not in environ_whitelist and \
3723                                 not self._environ_whitelist_re.match(x):
3724                                 # Do not allow anything to leak into the ebuild
3725                                 # environment unless it is explicitly whitelisted.
3726                                 # This ensures that variables unset by the ebuild
3727                                 # remain unset.
3728                                 continue
3729                         mydict[x] = myvalue
3730                 if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
3731                         writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
3732                         mydict["HOME"]=mydict["BUILD_PREFIX"][:]
3733
3734                 if filter_calling_env:
3735                         if phase:
3736                                 whitelist = []
3737                                 if "rpm" == phase:
3738                                         whitelist.append("RPMDIR")
3739                                 for k in whitelist:
3740                                         v = self.get(k)
3741                                         if v is not None:
3742                                                 mydict[k] = v
3743
3744                 # Filtered by IUSE and implicit IUSE.
3745                 mydict["USE"] = self.get("PORTAGE_USE", "")
3746
3747                 # Don't export AA to the ebuild environment in EAPIs that forbid it
3748                 if eapi not in ("0", "1", "2"):
3749                         mydict.pop("AA", None)
3750
3751                 # sandbox's bashrc sources /etc/profile which unsets ROOTPATH,
3752                 # so we have to back it up and restore it.
3753                 rootpath = mydict.get("ROOTPATH")
3754                 if rootpath:
3755                         mydict["PORTAGE_ROOTPATH"] = rootpath
3756
3757                 return mydict
3758
3759         def thirdpartymirrors(self):
3760                 if getattr(self, "_thirdpartymirrors", None) is None:
3761                         profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3762                         for x in self["PORTDIR_OVERLAY"].split():
3763                                 profileroots.insert(0, os.path.join(x, "profiles"))
3764                         thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3765                         self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3766                 return self._thirdpartymirrors
3767
3768         def archlist(self):
3769                 return flatten([[myarch, "~" + myarch] \
3770                         for myarch in self["PORTAGE_ARCHLIST"].split()])
3771
3772         def selinux_enabled(self):
3773                 if getattr(self, "_selinux_enabled", None) is None:
3774                         self._selinux_enabled = 0
3775                         if "selinux" in self["USE"].split():
3776                                 if selinux:
3777                                         if selinux.is_selinux_enabled() == 1:
3778                                                 self._selinux_enabled = 1
3779                                         else:
3780                                                 self._selinux_enabled = 0
3781                                 else:
3782                                         writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
3783                                                 noiselevel=-1)
3784                                         self._selinux_enabled = 0
3785
3786                 return self._selinux_enabled
3787
3788         if sys.hexversion >= 0x3000000:
3789                 keys = __iter__
3790                 items = iteritems
3791
3792 def _can_test_pty_eof():
3793         """
3794         The _test_pty_eof() function seems to hang on most
3795         kernels other than Linux.
3796         @rtype: bool
3797         @returns: True if _test_pty_eof() won't hang, False otherwise.
3798         """
3799         return platform.system() in ("Linux",)
3800
3801 def _test_pty_eof():
3802         """
3803         Returns True if this issues is fixed for the currently
3804         running version of python: http://bugs.python.org/issue5380
3805         Raises an EnvironmentError from openpty() if it fails.
3806         """
3807
3808         use_fork = False
3809
3810         import array, fcntl, pty, select, termios
3811         test_string = 2 * "blah blah blah\n"
3812         test_string = _unicode_decode(test_string,
3813                 encoding='utf_8', errors='strict')
3814
3815         # may raise EnvironmentError
3816         master_fd, slave_fd = pty.openpty()
3817
3818         # Non-blocking mode is required for Darwin kernel.
3819         fcntl.fcntl(master_fd, fcntl.F_SETFL,
3820                 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3821
3822         # Disable post-processing of output since otherwise weird
3823         # things like \n -> \r\n transformations may occur.
3824         mode = termios.tcgetattr(slave_fd)
3825         mode[1] &= ~termios.OPOST
3826         termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3827
3828         # Simulate a subprocess writing some data to the
3829         # slave end of the pipe, and then exiting.
3830         pid = None
3831         if use_fork:
3832                 pids = process.spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
3833                         encoding='utf_8', errors='strict'), env=os.environ,
3834                         fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
3835                         returnpid=True)
3836                 if isinstance(pids, int):
3837                         os.close(master_fd)
3838                         os.close(slave_fd)
3839                         raise EnvironmentError('spawn failed')
3840                 pid = pids[0]
3841         else:
3842                 os.write(slave_fd, _unicode_encode(test_string,
3843                         encoding='utf_8', errors='strict'))
3844         os.close(slave_fd)
3845
3846         # If using a fork, we must wait for the child here,
3847         # in order to avoid a race condition that would
3848         # lead to inconsistent results.
3849         if pid is not None:
3850                 os.waitpid(pid, 0)
3851
3852         master_file = os.fdopen(master_fd, 'rb')
3853         eof = False
3854         data = []
3855         iwtd = [master_file]
3856         owtd = []
3857         ewtd = []
3858
3859         while not eof:
3860
3861                 events = select.select(iwtd, owtd, ewtd)
3862                 if not events[0]:
3863                         eof = True
3864                         break
3865
3866                 buf = array.array('B')
3867                 try:
3868                         buf.fromfile(master_file, 1024)
3869                 except EOFError:
3870                         eof = True
3871                 except IOError:
3872                         # This is where data loss occurs.
3873                         eof = True
3874
3875                 if not buf:
3876                         eof = True
3877                 else:
3878                         data.append(_unicode_decode(buf.tostring(),
3879                                 encoding='utf_8', errors='strict'))
3880
3881         master_file.close()
3882
3883         return test_string == ''.join(data)
3884
3885 # If _test_pty_eof() can't be used for runtime detection of
3886 # http://bugs.python.org/issue5380, openpty can't safely be used
3887 # unless we can guarantee that the current version of python has
3888 # been fixed (affects all current versions of python3). When
3889 # this issue is fixed in python3, we can add another sys.hexversion
3890 # conditional to enable openpty support in the fixed versions.
3891 if sys.hexversion >= 0x3000000 and not _can_test_pty_eof():
3892         # Disable the use of openpty on Solaris as it seems Python's openpty
3893         # implementation doesn't play nice on Solaris with Portage's
3894         # behaviour causing hangs/deadlocks.
3895         # Disable on Darwin also, it used to work fine, but since the
3896         # introduction of _test_pty_eof Portage hangs (on the
3897         # slave_file.close()) indicating some other problems with openpty on
3898         # Darwin there
3899         # On AIX, haubi reported that the openpty code doesn't work any
3900         # longer since the introduction of _test_pty_eof either.
3901         # Looks like Python's openpty module is too fragile to use on UNIX,
3902         # so only use it on Linux
3903         _disable_openpty = True
3904 else:
3905         _disable_openpty = False
3906 _tested_pty = False
3907
3908 if not _can_test_pty_eof():
3909         # Skip _test_pty_eof() on systems where it hangs.
3910         _tested_pty = True
3911
3912 def _create_pty_or_pipe(copy_term_size=None):
3913         """
3914         Try to create a pty and if then fails then create a normal
3915         pipe instead.
3916
3917         @param copy_term_size: If a tty file descriptor is given
3918                 then the term size will be copied to the pty.
3919         @type copy_term_size: int
3920         @rtype: tuple
3921         @returns: A tuple of (is_pty, master_fd, slave_fd) where
3922                 is_pty is True if a pty was successfully allocated, and
3923                 False if a normal pipe was allocated.
3924         """
3925
3926         got_pty = False
3927
3928         global _disable_openpty, _tested_pty
3929         if not (_tested_pty or _disable_openpty):
3930                 try:
3931                         if not _test_pty_eof():
3932                                 _disable_openpty = True
3933                 except EnvironmentError as e:
3934                         _disable_openpty = True
3935                         writemsg("openpty failed: '%s'\n" % str(e),
3936                                 noiselevel=-1)
3937                         del e
3938                 _tested_pty = True
3939
3940         if _disable_openpty:
3941                 master_fd, slave_fd = os.pipe()
3942         else:
3943                 from pty import openpty
3944                 try:
3945                         master_fd, slave_fd = openpty()
3946                         got_pty = True
3947                 except EnvironmentError as e:
3948                         _disable_openpty = True
3949                         writemsg("openpty failed: '%s'\n" % str(e),
3950                                 noiselevel=-1)
3951                         del e
3952                         master_fd, slave_fd = os.pipe()
3953
3954         if got_pty:
3955                 # Disable post-processing of output since otherwise weird
3956                 # things like \n -> \r\n transformations may occur.
3957                 import termios
3958                 mode = termios.tcgetattr(slave_fd)
3959                 mode[1] &= ~termios.OPOST
3960                 termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
3961
3962         if got_pty and \
3963                 copy_term_size is not None and \
3964                 os.isatty(copy_term_size):
3965                 from portage.output import get_term_size, set_term_size
3966                 rows, columns = get_term_size()
3967                 set_term_size(rows, columns, slave_fd)
3968
3969         return (got_pty, master_fd, slave_fd)
3970
3971 # XXX This would be to replace getstatusoutput completely.
3972 # XXX Issue: cannot block execution. Deadlock condition.
3973 def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
3974         """
3975         Spawn a subprocess with extra portage-specific options.
3976         Optiosn include:
3977
3978         Sandbox: Sandbox means the spawned process will be limited in its ability t
3979         read and write files (normally this means it is restricted to ${IMAGE}/)
3980         SElinux Sandbox: Enables sandboxing on SElinux
3981         Reduced Privileges: Drops privilages such that the process runs as portage:portage
3982         instead of as root.
3983
3984         Notes: os.system cannot be used because it messes with signal handling.  Instead we
3985         use the portage.process spawn* family of functions.
3986
3987         This function waits for the process to terminate.
3988
3989         @param mystring: Command to run
3990         @type mystring: String
3991         @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
3992         @type mysettings: Dictionary or config instance
3993         @param debug: Ignored
3994         @type debug: Boolean
3995         @param free: Enable sandboxing for this process
3996         @type free: Boolean
3997         @param droppriv: Drop to portage:portage when running this command
3998         @type droppriv: Boolean
3999         @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
4000         @type sesandbox: Boolean
4001         @param fakeroot: Run this command with faked root privileges
4002         @type fakeroot: Boolean
4003         @param keywords: Extra options encoded as a dict, to be passed to spawn
4004         @type keywords: Dictionary
4005         @rtype: Integer
4006         @returns:
4007         1. The return code of the spawned process.
4008         """
4009
4010         if isinstance(mysettings, dict):
4011                 env=mysettings
4012                 keywords["opt_name"]="[ %s ]" % "portage"
4013         else:
4014                 check_config_instance(mysettings)
4015                 env=mysettings.environ()
4016                 if mysettings.mycpv is not None:
4017                         keywords["opt_name"] = "[%s]" % mysettings.mycpv
4018                 else:
4019                         keywords["opt_name"] = "[%s/%s]" % \
4020                                 (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
4021
4022         fd_pipes = keywords.get("fd_pipes")
4023         if fd_pipes is None:
4024                 fd_pipes = {
4025                         0:sys.stdin.fileno(),
4026                         1:sys.stdout.fileno(),
4027                         2:sys.stderr.fileno(),
4028                 }
4029         # In some cases the above print statements don't flush stdout, so
4030         # it needs to be flushed before allowing a child process to use it
4031         # so that output always shows in the correct order.
4032         stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
4033         for fd in fd_pipes.values():
4034                 if fd in stdout_filenos:
4035                         sys.stdout.flush()
4036                         sys.stderr.flush()
4037                         break
4038
4039         # The default policy for the sesandbox domain only allows entry (via exec)
4040         # from shells and from binaries that belong to portage (the number of entry
4041         # points is minimized).  The "tee" binary is not among the allowed entry
4042         # points, so it is spawned outside of the sesandbox domain and reads from a
4043         # pseudo-terminal that connects two domains.
4044         logfile = keywords.get("logfile")
4045         mypids = []
4046         master_fd = None
4047         slave_fd = None
4048         fd_pipes_orig = None
4049         got_pty = False
4050         if logfile:
4051                 del keywords["logfile"]
4052                 if 1 not in fd_pipes or 2 not in fd_pipes:
4053                         raise ValueError(fd_pipes)
4054
4055                 fd_pipes.setdefault(0, sys.stdin.fileno())
4056                 fd_pipes_orig = fd_pipes.copy()
4057
4058                 got_pty, master_fd, slave_fd = \
4059                         _create_pty_or_pipe(copy_term_size=fd_pipes_orig[1])
4060
4061                 # We must set non-blocking mode before we close the slave_fd
4062                 # since otherwise the fcntl call can fail on FreeBSD (the child
4063                 # process might have already exited and closed slave_fd so we
4064                 # have to keep it open in order to avoid FreeBSD potentially
4065                 # generating an EAGAIN exception).
4066                 import fcntl
4067                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
4068                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
4069
4070                 fd_pipes[0] = fd_pipes_orig[0]
4071                 fd_pipes[1] = slave_fd
4072                 fd_pipes[2] = slave_fd
4073                 keywords["fd_pipes"] = fd_pipes
4074
4075         features = mysettings.features
4076         # TODO: Enable fakeroot to be used together with droppriv.  The
4077         # fake ownership/permissions will have to be converted to real
4078         # permissions in the merge phase.
4079         fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
4080         if droppriv and not uid and portage_gid and portage_uid:
4081                 keywords.update({"uid":portage_uid,"gid":portage_gid,
4082                         "groups":userpriv_groups,"umask":0o02})
4083         if not free:
4084                 free=((droppriv and "usersandbox" not in features) or \
4085                         (not droppriv and "sandbox" not in features and \
4086                         "usersandbox" not in features and not fakeroot))
4087
4088         if free or "SANDBOX_ACTIVE" in os.environ:
4089                 keywords["opt_name"] += " bash"
4090                 spawn_func = portage.process.spawn_bash
4091         elif fakeroot:
4092                 keywords["opt_name"] += " fakeroot"
4093                 keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
4094                 spawn_func = portage.process.spawn_fakeroot
4095         else:
4096                 keywords["opt_name"] += " sandbox"
4097                 spawn_func = portage.process.spawn_sandbox
4098
4099         if sesandbox:
4100                 spawn_func = selinux.spawn_wrapper(spawn_func,
4101                         mysettings["PORTAGE_SANDBOX_T"])
4102
4103         returnpid = keywords.get("returnpid")
4104         keywords["returnpid"] = True
4105         try:
4106                 mypids.extend(spawn_func(mystring, env=env, **keywords))
4107         finally:
4108                 if logfile:
4109                         os.close(slave_fd)
4110
4111         if returnpid:
4112                 return mypids
4113
4114         if logfile:
4115                 log_file = open(_unicode_encode(logfile), mode='ab')
4116                 stdout_file = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
4117                 master_file = os.fdopen(master_fd, 'rb')
4118                 iwtd = [master_file]
4119                 owtd = []
4120                 ewtd = []
4121                 import array, select
4122                 buffsize = 65536
4123                 eof = False
4124                 while not eof:
4125                         events = select.select(iwtd, owtd, ewtd)
4126                         for f in events[0]:
4127                                 # Use non-blocking mode to prevent read
4128                                 # calls from blocking indefinitely.
4129                                 buf = array.array('B')
4130                                 try:
4131                                         buf.fromfile(f, buffsize)
4132                                 except EOFError:
4133                                         pass
4134                                 if not buf:
4135                                         eof = True
4136                                         break
4137                                 if f is master_file:
4138                                         buf.tofile(stdout_file)
4139                                         stdout_file.flush()
4140                                         buf.tofile(log_file)
4141                                         log_file.flush()
4142                 log_file.close()
4143                 stdout_file.close()
4144                 master_file.close()
4145         pid = mypids[-1]
4146         retval = os.waitpid(pid, 0)[1]
4147         portage.process.spawned_pids.remove(pid)
4148         if retval != os.EX_OK:
4149                 if retval & 0xff:
4150                         return (retval & 0xff) << 8
4151                 return retval >> 8
4152         return retval
4153
4154 _userpriv_spawn_kwargs = (
4155         ("uid",    portage_uid),
4156         ("gid",    portage_gid),
4157         ("groups", userpriv_groups),
4158         ("umask",  0o02),
4159 )
4160
4161 def _spawn_fetch(settings, args, **kwargs):
4162         """
4163         Spawn a process with appropriate settings for fetching, including
4164         userfetch and selinux support.
4165         """
4166
4167         global _userpriv_spawn_kwargs
4168
4169         # Redirect all output to stdout since some fetchers like
4170         # wget pollute stderr (if portage detects a problem then it
4171         # can send it's own message to stderr).
4172         if "fd_pipes" not in kwargs:
4173
4174                 kwargs["fd_pipes"] = {
4175                         0 : sys.stdin.fileno(),
4176                         1 : sys.stdout.fileno(),
4177                         2 : sys.stdout.fileno(),
4178                 }
4179
4180         if "userfetch" in settings.features and \
4181                 os.getuid() == 0 and portage_gid and portage_uid:
4182                 kwargs.update(_userpriv_spawn_kwargs)
4183
4184         spawn_func = portage.process.spawn
4185
4186         if settings.selinux_enabled():
4187                 spawn_func = selinux.spawn_wrapper(spawn_func,
4188                         settings["PORTAGE_FETCH_T"])
4189
4190                 # bash is an allowed entrypoint, while most binaries are not
4191                 if args[0] != BASH_BINARY:
4192                         args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
4193
4194         rval = spawn_func(args, env=dict(iter(settings.items())), **kwargs)
4195
4196         return rval
4197
4198 _userpriv_test_write_file_cache = {}
4199 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
4200         "rm -f  %(file_path)s ; exit $rval"
4201
4202 def _userpriv_test_write_file(settings, file_path):
4203         """
4204         Drop privileges and try to open a file for writing. The file may or
4205         may not exist, and the parent directory is assumed to exist. The file
4206         is removed before returning.
4207
4208         @param settings: A config instance which is passed to _spawn_fetch()
4209         @param file_path: A file path to open and write.
4210         @return: True if write succeeds, False otherwise.
4211         """
4212
4213         global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
4214         rval = _userpriv_test_write_file_cache.get(file_path)
4215         if rval is not None:
4216                 return rval
4217
4218         args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
4219                 {"file_path" : _shell_quote(file_path)}]
4220
4221         returncode = _spawn_fetch(settings, args)
4222
4223         rval = returncode == os.EX_OK
4224         _userpriv_test_write_file_cache[file_path] = rval
4225         return rval
4226
4227 def _checksum_failure_temp_file(distdir, basename):
4228         """
4229         First try to find a duplicate temp file with the same checksum and return
4230         that filename if available. Otherwise, use mkstemp to create a new unique
4231         filename._checksum_failure_.$RANDOM, rename the given file, and return the
4232         new filename. In any case, filename will be renamed or removed before this
4233         function returns a temp filename.
4234         """
4235
4236         filename = os.path.join(distdir, basename)
4237         size = os.stat(filename).st_size
4238         checksum = None
4239         tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
4240         for temp_filename in os.listdir(distdir):
4241                 if not tempfile_re.match(temp_filename):
4242                         continue
4243                 temp_filename = os.path.join(distdir, temp_filename)
4244                 try:
4245                         if size != os.stat(temp_filename).st_size:
4246                                 continue
4247                 except OSError:
4248                         continue
4249                 try:
4250                         temp_checksum = portage.checksum.perform_md5(temp_filename)
4251                 except portage.exception.FileNotFound:
4252                         # Apparently the temp file disappeared. Let it go.
4253                         continue
4254                 if checksum is None:
4255                         checksum = portage.checksum.perform_md5(filename)
4256                 if checksum == temp_checksum:
4257                         os.unlink(filename)
4258                         return temp_filename
4259
4260         from tempfile import mkstemp
4261         fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
4262         os.close(fd)
4263         os.rename(filename, temp_filename)
4264         return temp_filename
4265
4266 def _check_digests(filename, digests, show_errors=1):
4267         """
4268         Check digests and display a message if an error occurs.
4269         @return True if all digests match, False otherwise.
4270         """
4271         verified_ok, reason = portage.checksum.verify_all(filename, digests)
4272         if not verified_ok:
4273                 if show_errors:
4274                         writemsg(_("!!! Previously fetched"
4275                                 " file: '%s'\n") % filename, noiselevel=-1)
4276                         writemsg(_("!!! Reason: %s\n") % reason[0],
4277                                 noiselevel=-1)
4278                         writemsg(_("!!! Got:      %s\n"
4279                                 "!!! Expected: %s\n") % \
4280                                 (reason[1], reason[2]), noiselevel=-1)
4281                 return False
4282         return True
4283
4284 def _check_distfile(filename, digests, eout, show_errors=1):
4285         """
4286         @return a tuple of (match, stat_obj) where match is True if filename
4287         matches all given digests (if any) and stat_obj is a stat result, or
4288         None if the file does not exist.
4289         """
4290         if digests is None:
4291                 digests = {}
4292         size = digests.get("size")
4293         if size is not None and len(digests) == 1:
4294                 digests = None
4295
4296         try:
4297                 st = os.stat(filename)
4298         except OSError:
4299                 return (False, None)
4300         if size is not None and size != st.st_size:
4301                 return (False, st)
4302         if not digests:
4303                 if size is not None:
4304                         eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
4305                         eout.eend(0)
4306                 elif st.st_size == 0:
4307                         # Zero-byte distfiles are always invalid.
4308                         return (False, st)
4309         else:
4310                 if _check_digests(filename, digests, show_errors=show_errors):
4311                         eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
4312                                 " ".join(sorted(digests))))
4313                         eout.eend(0)
4314                 else:
4315                         return (False, st)
4316         return (True, st)
4317
4318 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
4319
4320 _size_suffix_map = {
4321         ''  : 0,
4322         'K' : 10,
4323         'M' : 20,
4324         'G' : 30,
4325         'T' : 40,
4326         'P' : 50,
4327         'E' : 60,
4328         'Z' : 70,
4329         'Y' : 80,
4330 }
4331
4332 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
4333         "fetch files.  Will use digest file if available."
4334
4335         if not myuris:
4336                 return 1
4337
4338         features = mysettings.features
4339         restrict = mysettings.get("PORTAGE_RESTRICT","").split()
4340
4341         from portage.data import secpass
4342         userfetch = secpass >= 2 and "userfetch" in features
4343         userpriv = secpass >= 2 and "userpriv" in features
4344
4345         # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
4346         if "mirror" in restrict or \
4347            "nomirror" in restrict:
4348                 if ("mirror" in features) and ("lmirror" not in features):
4349                         # lmirror should allow you to bypass mirror restrictions.
4350                         # XXX: This is not a good thing, and is temporary at best.
4351                         print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
4352                         return 1
4353
4354         # Generally, downloading the same file repeatedly from
4355         # every single available mirror is a waste of bandwidth
4356         # and time, so there needs to be a cap.
4357         checksum_failure_max_tries = 5
4358         v = checksum_failure_max_tries
4359         try:
4360                 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
4361                         checksum_failure_max_tries))
4362         except (ValueError, OverflowError):
4363                 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4364                         " contains non-integer value: '%s'\n") % \
4365                         mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
4366                 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4367                         "default value: %s\n") % checksum_failure_max_tries,
4368                         noiselevel=-1)
4369                 v = checksum_failure_max_tries
4370         if v < 1:
4371                 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
4372                         " contains value less than 1: '%s'\n") % v, noiselevel=-1)
4373                 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
4374                         "default value: %s\n") % checksum_failure_max_tries,
4375                         noiselevel=-1)
4376                 v = checksum_failure_max_tries
4377         checksum_failure_max_tries = v
4378         del v
4379
4380         fetch_resume_size_default = "350K"
4381         fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
4382         if fetch_resume_size is not None:
4383                 fetch_resume_size = "".join(fetch_resume_size.split())
4384                 if not fetch_resume_size:
4385                         # If it's undefined or empty, silently use the default.
4386                         fetch_resume_size = fetch_resume_size_default
4387                 match = _fetch_resume_size_re.match(fetch_resume_size)
4388                 if match is None or \
4389                         (match.group(2).upper() not in _size_suffix_map):
4390                         writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
4391                                 " contains an unrecognized format: '%s'\n") % \
4392                                 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
4393                         writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
4394                                 "default value: %s\n") % fetch_resume_size_default,
4395                                 noiselevel=-1)
4396                         fetch_resume_size = None
4397         if fetch_resume_size is None:
4398                 fetch_resume_size = fetch_resume_size_default
4399                 match = _fetch_resume_size_re.match(fetch_resume_size)
4400         fetch_resume_size = int(match.group(1)) * \
4401                 2 ** _size_suffix_map[match.group(2).upper()]
4402
4403         # Behave like the package has RESTRICT="primaryuri" after a
4404         # couple of checksum failures, to increase the probablility
4405         # of success before checksum_failure_max_tries is reached.
4406         checksum_failure_primaryuri = 2
4407         thirdpartymirrors = mysettings.thirdpartymirrors()
4408
4409         # In the background parallel-fetch process, it's safe to skip checksum
4410         # verification of pre-existing files in $DISTDIR that have the correct
4411         # file size. The parent process will verify their checksums prior to
4412         # the unpack phase.
4413
4414         parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
4415         if parallel_fetchonly:
4416                 fetchonly = 1
4417
4418         check_config_instance(mysettings)
4419
4420         custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
4421                 CUSTOM_MIRRORS_FILE), recursive=1)
4422
4423         mymirrors=[]
4424
4425         if listonly or ("distlocks" not in features):
4426                 use_locks = 0
4427
4428         fetch_to_ro = 0
4429         if "skiprocheck" in features:
4430                 fetch_to_ro = 1
4431
4432         if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
4433                 if use_locks:
4434                         writemsg(colorize("BAD",
4435                                 _("!!! For fetching to a read-only filesystem, "
4436                                 "locking should be turned off.\n")), noiselevel=-1)
4437                         writemsg(_("!!! This can be done by adding -distlocks to "
4438                                 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
4439 #                       use_locks = 0
4440
4441         # local mirrors are always added
4442         if "local" in custommirrors:
4443                 mymirrors += custommirrors["local"]
4444
4445         if "nomirror" in restrict or \
4446            "mirror" in restrict:
4447                 # We don't add any mirrors.
4448                 pass
4449         else:
4450                 if try_mirrors:
4451                         mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
4452
4453         skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
4454         pkgdir = mysettings.get("O")
4455         if not (pkgdir is None or skip_manifest):
4456                 mydigests = Manifest(
4457                         pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
4458         else:
4459                 # no digests because fetch was not called for a specific package
4460                 mydigests = {}
4461
4462         ro_distdirs = [x for x in \
4463                 util.shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
4464                 if os.path.isdir(x)]
4465
4466         fsmirrors = []
4467         for x in range(len(mymirrors)-1,-1,-1):
4468                 if mymirrors[x] and mymirrors[x][0]=='/':
4469                         fsmirrors += [mymirrors[x]]
4470                         del mymirrors[x]
4471
4472         restrict_fetch = "fetch" in restrict
4473         custom_local_mirrors = custommirrors.get("local", [])
4474         if restrict_fetch:
4475                 # With fetch restriction, a normal uri may only be fetched from
4476                 # custom local mirrors (if available).  A mirror:// uri may also
4477                 # be fetched from specific mirrors (effectively overriding fetch
4478                 # restriction, but only for specific mirrors).
4479                 locations = custom_local_mirrors
4480         else:
4481                 locations = mymirrors
4482
4483         file_uri_tuples = []
4484         if isinstance(myuris, dict):
4485                 for myfile, uri_set in myuris.items():
4486                         for myuri in uri_set:
4487                                 file_uri_tuples.append((myfile, myuri))
4488         else:
4489                 for myuri in myuris:
4490                         file_uri_tuples.append((os.path.basename(myuri), myuri))
4491
4492         filedict={}
4493         primaryuri_indexes={}
4494         primaryuri_dict = {}
4495         thirdpartymirror_uris = {}
4496         for myfile, myuri in file_uri_tuples:
4497                 if myfile not in filedict:
4498                         filedict[myfile]=[]
4499                         for y in range(0,len(locations)):
4500                                 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
4501                 if myuri[:9]=="mirror://":
4502                         eidx = myuri.find("/", 9)
4503                         if eidx != -1:
4504                                 mirrorname = myuri[9:eidx]
4505                                 path = myuri[eidx+1:]
4506
4507                                 # Try user-defined mirrors first
4508                                 if mirrorname in custommirrors:
4509                                         for cmirr in custommirrors[mirrorname]:
4510                                                 filedict[myfile].append(
4511                                                         cmirr.rstrip("/") + "/" + path)
4512
4513                                 # now try the official mirrors
4514                                 if mirrorname in thirdpartymirrors:
4515                                         shuffle(thirdpartymirrors[mirrorname])
4516
4517                                         uris = [locmirr.rstrip("/") + "/" + path \
4518                                                 for locmirr in thirdpartymirrors[mirrorname]]
4519                                         filedict[myfile].extend(uris)
4520                                         thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
4521
4522                                 if not filedict[myfile]:
4523                                         writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
4524                         else:
4525                                 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
4526                                 writemsg("  %s\n" % (myuri), noiselevel=-1)
4527                 else:
4528                         if restrict_fetch:
4529                                 # Only fetch from specific mirrors is allowed.
4530                                 continue
4531                         if "primaryuri" in restrict:
4532                                 # Use the source site first.
4533                                 if myfile in primaryuri_indexes:
4534                                         primaryuri_indexes[myfile] += 1
4535                                 else:
4536                                         primaryuri_indexes[myfile] = 0
4537                                 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
4538                         else:
4539                                 filedict[myfile].append(myuri)
4540                         primaryuris = primaryuri_dict.get(myfile)
4541                         if primaryuris is None:
4542                                 primaryuris = []
4543                                 primaryuri_dict[myfile] = primaryuris
4544                         primaryuris.append(myuri)
4545
4546         # Prefer thirdpartymirrors over normal mirrors in cases when
4547         # the file does not yet exist on the normal mirrors.
4548         for myfile, uris in thirdpartymirror_uris.items():
4549                 primaryuri_dict.setdefault(myfile, []).extend(uris)
4550
4551         can_fetch=True
4552
4553         if listonly:
4554                 can_fetch = False
4555
4556         if can_fetch and not fetch_to_ro:
4557                 global _userpriv_test_write_file_cache
4558                 dirmode  = 0o2070
4559                 filemode =   0o60
4560                 modemask =    0o2
4561                 dir_gid = portage_gid
4562                 if "FAKED_MODE" in mysettings:
4563                         # When inside fakeroot, directories with portage's gid appear
4564                         # to have root's gid. Therefore, use root's gid instead of
4565                         # portage's gid to avoid spurrious permissions adjustments
4566                         # when inside fakeroot.
4567                         dir_gid = 0
4568                 distdir_dirs = [""]
4569                 if "distlocks" in features:
4570                         distdir_dirs.append(".locks")
4571                 try:
4572                         
4573                         for x in distdir_dirs:
4574                                 mydir = os.path.join(mysettings["DISTDIR"], x)
4575                                 write_test_file = os.path.join(
4576                                         mydir, ".__portage_test_write__")
4577
4578                                 try:
4579                                         st = os.stat(mydir)
4580                                 except OSError:
4581                                         st = None
4582
4583                                 if st is not None and stat.S_ISDIR(st.st_mode):
4584                                         if not (userfetch or userpriv):
4585                                                 continue
4586                                         if _userpriv_test_write_file(mysettings, write_test_file):
4587                                                 continue
4588
4589                                 _userpriv_test_write_file_cache.pop(write_test_file, None)
4590                                 if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
4591                                         if st is None:
4592                                                 # The directory has just been created
4593                                                 # and therefore it must be empty.
4594                                                 continue
4595                                         writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
4596                                                 noiselevel=-1)
4597                                         def onerror(e):
4598                                                 raise # bail out on the first error that occurs during recursion
4599                                         if not apply_recursive_permissions(mydir,
4600                                                 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
4601                                                 filemode=filemode, filemask=modemask, onerror=onerror):
4602                                                 raise portage.exception.OperationNotPermitted(
4603                                                         _("Failed to apply recursive permissions for the portage group."))
4604                 except portage.exception.PortageException as e:
4605                         if not os.path.isdir(mysettings["DISTDIR"]):
4606                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
4607                                 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
4608                                 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
4609
4610         if can_fetch and \
4611                 not fetch_to_ro and \
4612                 not os.access(mysettings["DISTDIR"], os.W_OK):
4613                 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
4614                         noiselevel=-1)
4615                 can_fetch = False
4616
4617         if can_fetch and use_locks and locks_in_subdir:
4618                         distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
4619                         if not os.access(distlocks_subdir, os.W_OK):
4620                                 writemsg(_("!!! No write access to write to %s.  Aborting.\n") % distlocks_subdir,
4621                                         noiselevel=-1)
4622                                 return 0
4623                         del distlocks_subdir
4624
4625         distdir_writable = can_fetch and not fetch_to_ro
4626         failed_files = set()
4627         restrict_fetch_msg = False
4628
4629         for myfile in filedict:
4630                 """
4631                 fetched  status
4632                 0        nonexistent
4633                 1        partially downloaded
4634                 2        completely downloaded
4635                 """
4636                 fetched = 0
4637
4638                 orig_digests = mydigests.get(myfile, {})
4639                 size = orig_digests.get("size")
4640                 if size == 0:
4641                         # Zero-byte distfiles are always invalid, so discard their digests.
4642                         del mydigests[myfile]
4643                         orig_digests.clear()
4644                         size = None
4645                 pruned_digests = orig_digests
4646                 if parallel_fetchonly:
4647                         pruned_digests = {}
4648                         if size is not None:
4649                                 pruned_digests["size"] = size
4650
4651                 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
4652                 has_space = True
4653                 has_space_superuser = True
4654                 file_lock = None
4655                 if listonly:
4656                         writemsg_stdout("\n", noiselevel=-1)
4657                 else:
4658                         # check if there is enough space in DISTDIR to completely store myfile
4659                         # overestimate the filesize so we aren't bitten by FS overhead
4660                         if size is not None and hasattr(os, "statvfs"):
4661                                 vfs_stat = os.statvfs(mysettings["DISTDIR"])
4662                                 try:
4663                                         mysize = os.stat(myfile_path).st_size
4664                                 except OSError as e:
4665                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4666                                                 raise
4667                                         del e
4668                                         mysize = 0
4669                                 if (size - mysize + vfs_stat.f_bsize) >= \
4670                                         (vfs_stat.f_bsize * vfs_stat.f_bavail):
4671
4672                                         if (size - mysize + vfs_stat.f_bsize) >= \
4673                                                 (vfs_stat.f_bsize * vfs_stat.f_bfree):
4674                                                 has_space_superuser = False
4675
4676                                         if not has_space_superuser:
4677                                                 has_space = False
4678                                         elif secpass < 2:
4679                                                 has_space = False
4680                                         elif userfetch:
4681                                                 has_space = False
4682
4683                         if not has_space:
4684                                 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
4685                                         (myfile, mysettings["DISTDIR"]), noiselevel=-1)
4686
4687                                 if has_space_superuser:
4688                                         writemsg(_("!!! Insufficient privileges to use "
4689                                                 "remaining space.\n"), noiselevel=-1)
4690                                         if userfetch:
4691                                                 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
4692                                                         " in /etc/make.conf in order to fetch with\n"
4693                                                         "!!! superuser privileges.\n"), noiselevel=-1)
4694
4695                         if distdir_writable and use_locks:
4696
4697                                 if locks_in_subdir:
4698                                         lock_file = os.path.join(mysettings["DISTDIR"],
4699                                                 locks_in_subdir, myfile)
4700                                 else:
4701                                         lock_file = myfile_path
4702
4703                                 lock_kwargs = {}
4704                                 if fetchonly:
4705                                         lock_kwargs["flags"] = os.O_NONBLOCK
4706
4707                                 try:
4708                                         file_lock = portage.locks.lockfile(myfile_path,
4709                                                 wantnewlockfile=1, **lock_kwargs)
4710                                 except portage.exception.TryAgain:
4711                                         writemsg(_(">>> File '%s' is already locked by "
4712                                                 "another fetcher. Continuing...\n") % myfile,
4713                                                 noiselevel=-1)
4714                                         continue
4715                 try:
4716                         if not listonly:
4717
4718                                 eout = portage.output.EOutput()
4719                                 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
4720                                 match, mystat = _check_distfile(
4721                                         myfile_path, pruned_digests, eout)
4722                                 if match:
4723                                         if distdir_writable:
4724                                                 try:
4725                                                         apply_secpass_permissions(myfile_path,
4726                                                                 gid=portage_gid, mode=0o664, mask=0o2,
4727                                                                 stat_cached=mystat)
4728                                                 except portage.exception.PortageException as e:
4729                                                         if not os.access(myfile_path, os.R_OK):
4730                                                                 writemsg(_("!!! Failed to adjust permissions:"
4731                                                                         " %s\n") % str(e), noiselevel=-1)
4732                                                         del e
4733                                         continue
4734
4735                                 if distdir_writable and mystat is None:
4736                                         # Remove broken symlinks if necessary.
4737                                         try:
4738                                                 os.unlink(myfile_path)
4739                                         except OSError:
4740                                                 pass
4741
4742                                 if mystat is not None:
4743                                         if stat.S_ISDIR(mystat.st_mode):
4744                                                 portage.util.writemsg_level(
4745                                                         _("!!! Unable to fetch file since "
4746                                                         "a directory is in the way: \n"
4747                                                         "!!!   %s\n") % myfile_path,
4748                                                         level=logging.ERROR, noiselevel=-1)
4749                                                 return 0
4750
4751                                         if mystat.st_size == 0:
4752                                                 if distdir_writable:
4753                                                         try:
4754                                                                 os.unlink(myfile_path)
4755                                                         except OSError:
4756                                                                 pass
4757                                         elif distdir_writable:
4758                                                 if mystat.st_size < fetch_resume_size and \
4759                                                         mystat.st_size < size:
4760                                                         # If the file already exists and the size does not
4761                                                         # match the existing digests, it may be that the
4762                                                         # user is attempting to update the digest. In this
4763                                                         # case, the digestgen() function will advise the
4764                                                         # user to use `ebuild --force foo.ebuild manifest`
4765                                                         # in order to force the old digests to be replaced.
4766                                                         # Since the user may want to keep this file, rename
4767                                                         # it instead of deleting it.
4768                                                         writemsg(_(">>> Renaming distfile with size "
4769                                                                 "%d (smaller than " "PORTAGE_FETCH_RESU"
4770                                                                 "ME_MIN_SIZE)\n") % mystat.st_size)
4771                                                         temp_filename = \
4772                                                                 _checksum_failure_temp_file(
4773                                                                 mysettings["DISTDIR"], myfile)
4774                                                         writemsg_stdout(_("Refetching... "
4775                                                                 "File renamed to '%s'\n\n") % \
4776                                                                 temp_filename, noiselevel=-1)
4777                                                 elif mystat.st_size >= size:
4778                                                         temp_filename = \
4779                                                                 _checksum_failure_temp_file(
4780                                                                 mysettings["DISTDIR"], myfile)
4781                                                         writemsg_stdout(_("Refetching... "
4782                                                                 "File renamed to '%s'\n\n") % \
4783                                                                 temp_filename, noiselevel=-1)
4784
4785                                 if distdir_writable and ro_distdirs:
4786                                         readonly_file = None
4787                                         for x in ro_distdirs:
4788                                                 filename = os.path.join(x, myfile)
4789                                                 match, mystat = _check_distfile(
4790                                                         filename, pruned_digests, eout)
4791                                                 if match:
4792                                                         readonly_file = filename
4793                                                         break
4794                                         if readonly_file is not None:
4795                                                 try:
4796                                                         os.unlink(myfile_path)
4797                                                 except OSError as e:
4798                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4799                                                                 raise
4800                                                         del e
4801                                                 os.symlink(readonly_file, myfile_path)
4802                                                 continue
4803
4804                                 if fsmirrors and not os.path.exists(myfile_path) and has_space:
4805                                         for mydir in fsmirrors:
4806                                                 mirror_file = os.path.join(mydir, myfile)
4807                                                 try:
4808                                                         shutil.copyfile(mirror_file, myfile_path)
4809                                                         writemsg(_("Local mirror has file: %s\n") % myfile)
4810                                                         break
4811                                                 except (IOError, OSError) as e:
4812                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4813                                                                 raise
4814                                                         del e
4815
4816                                 try:
4817                                         mystat = os.stat(myfile_path)
4818                                 except OSError as e:
4819                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4820                                                 raise
4821                                         del e
4822                                 else:
4823                                         try:
4824                                                 apply_secpass_permissions(
4825                                                         myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
4826                                                         stat_cached=mystat)
4827                                         except portage.exception.PortageException as e:
4828                                                 if not os.access(myfile_path, os.R_OK):
4829                                                         writemsg(_("!!! Failed to adjust permissions:"
4830                                                                 " %s\n") % str(e), noiselevel=-1)
4831
4832                                         # If the file is empty then it's obviously invalid. Remove
4833                                         # the empty file and try to download if possible.
4834                                         if mystat.st_size == 0:
4835                                                 if distdir_writable:
4836                                                         try:
4837                                                                 os.unlink(myfile_path)
4838                                                         except EnvironmentError:
4839                                                                 pass
4840                                         elif myfile not in mydigests:
4841                                                 # We don't have a digest, but the file exists.  We must
4842                                                 # assume that it is fully downloaded.
4843                                                 continue
4844                                         else:
4845                                                 if mystat.st_size < mydigests[myfile]["size"] and \
4846                                                         not restrict_fetch:
4847                                                         fetched = 1 # Try to resume this download.
4848                                                 elif parallel_fetchonly and \
4849                                                         mystat.st_size == mydigests[myfile]["size"]:
4850                                                         eout = portage.output.EOutput()
4851                                                         eout.quiet = \
4852                                                                 mysettings.get("PORTAGE_QUIET") == "1"
4853                                                         eout.ebegin(
4854                                                                 "%s size ;-)" % (myfile, ))
4855                                                         eout.eend(0)
4856                                                         continue
4857                                                 else:
4858                                                         verified_ok, reason = portage.checksum.verify_all(
4859                                                                 myfile_path, mydigests[myfile])
4860                                                         if not verified_ok:
4861                                                                 writemsg(_("!!! Previously fetched"
4862                                                                         " file: '%s'\n") % myfile, noiselevel=-1)
4863                                                                 writemsg(_("!!! Reason: %s\n") % reason[0],
4864                                                                         noiselevel=-1)
4865                                                                 writemsg(_("!!! Got:      %s\n"
4866                                                                         "!!! Expected: %s\n") % \
4867                                                                         (reason[1], reason[2]), noiselevel=-1)
4868                                                                 if reason[0] == _("Insufficient data for checksum verification"):
4869                                                                         return 0
4870                                                                 if distdir_writable:
4871                                                                         temp_filename = \
4872                                                                                 _checksum_failure_temp_file(
4873                                                                                 mysettings["DISTDIR"], myfile)
4874                                                                         writemsg_stdout(_("Refetching... "
4875                                                                                 "File renamed to '%s'\n\n") % \
4876                                                                                 temp_filename, noiselevel=-1)
4877                                                         else:
4878                                                                 eout = portage.output.EOutput()
4879                                                                 eout.quiet = \
4880                                                                         mysettings.get("PORTAGE_QUIET", None) == "1"
4881                                                                 digests = mydigests.get(myfile)
4882                                                                 if digests:
4883                                                                         digests = list(digests)
4884                                                                         digests.sort()
4885                                                                         eout.ebegin(
4886                                                                                 "%s %s ;-)" % (myfile, " ".join(digests)))
4887                                                                         eout.eend(0)
4888                                                                 continue # fetch any remaining files
4889
4890                         # Create a reversed list since that is optimal for list.pop().
4891                         uri_list = filedict[myfile][:]
4892                         uri_list.reverse()
4893                         checksum_failure_count = 0
4894                         tried_locations = set()
4895                         while uri_list:
4896                                 loc = uri_list.pop()
4897                                 # Eliminate duplicates here in case we've switched to
4898                                 # "primaryuri" mode on the fly due to a checksum failure.
4899                                 if loc in tried_locations:
4900                                         continue
4901                                 tried_locations.add(loc)
4902                                 if listonly:
4903                                         writemsg_stdout(loc+" ", noiselevel=-1)
4904                                         continue
4905                                 # allow different fetchcommands per protocol
4906                                 protocol = loc[0:loc.find("://")]
4907
4908                                 missing_file_param = False
4909                                 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
4910                                 fetchcommand = mysettings.get(fetchcommand_var)
4911                                 if fetchcommand is None:
4912                                         fetchcommand_var = "FETCHCOMMAND"
4913                                         fetchcommand = mysettings.get(fetchcommand_var)
4914                                         if fetchcommand is None:
4915                                                 portage.util.writemsg_level(
4916                                                         _("!!! %s is unset. It should "
4917                                                         "have been defined in\n!!! %s/make.globals.\n") \
4918                                                         % (fetchcommand_var,
4919                                                         portage.const.GLOBAL_CONFIG_PATH),
4920                                                         level=logging.ERROR, noiselevel=-1)
4921                                                 return 0
4922                                 if "${FILE}" not in fetchcommand:
4923                                         portage.util.writemsg_level(
4924                                                 _("!!! %s does not contain the required ${FILE}"
4925                                                 " parameter.\n") % fetchcommand_var,
4926                                                 level=logging.ERROR, noiselevel=-1)
4927                                         missing_file_param = True
4928
4929                                 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
4930                                 resumecommand = mysettings.get(resumecommand_var)
4931                                 if resumecommand is None:
4932                                         resumecommand_var = "RESUMECOMMAND"
4933                                         resumecommand = mysettings.get(resumecommand_var)
4934                                         if resumecommand is None:
4935                                                 portage.util.writemsg_level(
4936                                                         _("!!! %s is unset. It should "
4937                                                         "have been defined in\n!!! %s/make.globals.\n") \
4938                                                         % (resumecommand_var,
4939                                                         portage.const.GLOBAL_CONFIG_PATH),
4940                                                         level=logging.ERROR, noiselevel=-1)
4941                                                 return 0
4942                                 if "${FILE}" not in resumecommand:
4943                                         portage.util.writemsg_level(
4944                                                 _("!!! %s does not contain the required ${FILE}"
4945                                                 " parameter.\n") % resumecommand_var,
4946                                                 level=logging.ERROR, noiselevel=-1)
4947                                         missing_file_param = True
4948
4949                                 if missing_file_param:
4950                                         portage.util.writemsg_level(
4951                                                 _("!!! Refer to the make.conf(5) man page for "
4952                                                 "information about how to\n!!! correctly specify "
4953                                                 "FETCHCOMMAND and RESUMECOMMAND.\n"),
4954                                                 level=logging.ERROR, noiselevel=-1)
4955                                         if myfile != os.path.basename(loc):
4956                                                 return 0
4957
4958                                 if not can_fetch:
4959                                         if fetched != 2:
4960                                                 try:
4961                                                         mysize = os.stat(myfile_path).st_size
4962                                                 except OSError as e:
4963                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4964                                                                 raise
4965                                                         del e
4966                                                         mysize = 0
4967
4968                                                 if mysize == 0:
4969                                                         writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
4970                                                                 noiselevel=-1)
4971                                                 elif size is None or size > mysize:
4972                                                         writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
4973                                                                 noiselevel=-1)
4974                                                 else:
4975                                                         writemsg(_("!!! File %s is incorrect size, "
4976                                                                 "but unable to retry.\n") % myfile, noiselevel=-1)
4977                                                 return 0
4978                                         else:
4979                                                 continue
4980
4981                                 if fetched != 2 and has_space:
4982                                         #we either need to resume or start the download
4983                                         if fetched == 1:
4984                                                 try:
4985                                                         mystat = os.stat(myfile_path)
4986                                                 except OSError as e:
4987                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
4988                                                                 raise
4989                                                         del e
4990                                                         fetched = 0
4991                                                 else:
4992                                                         if mystat.st_size < fetch_resume_size:
4993                                                                 writemsg(_(">>> Deleting distfile with size "
4994                                                                         "%d (smaller than " "PORTAGE_FETCH_RESU"
4995                                                                         "ME_MIN_SIZE)\n") % mystat.st_size)
4996                                                                 try:
4997                                                                         os.unlink(myfile_path)
4998                                                                 except OSError as e:
4999                                                                         if e.errno not in \
5000                                                                                 (errno.ENOENT, errno.ESTALE):
5001                                                                                 raise
5002                                                                         del e
5003                                                                 fetched = 0
5004                                         if fetched == 1:
5005                                                 #resume mode:
5006                                                 writemsg(_(">>> Resuming download...\n"))
5007                                                 locfetch=resumecommand
5008                                                 command_var = resumecommand_var
5009                                         else:
5010                                                 #normal mode:
5011                                                 locfetch=fetchcommand
5012                                                 command_var = fetchcommand_var
5013                                         writemsg_stdout(_(">>> Downloading '%s'\n") % \
5014                                                 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
5015                                         variables = {
5016                                                 "DISTDIR": mysettings["DISTDIR"],
5017                                                 "URI":     loc,
5018                                                 "FILE":    myfile
5019                                         }
5020
5021                                         myfetch = util.shlex_split(locfetch)
5022                                         myfetch = [varexpand(x, mydict=variables) for x in myfetch]
5023                                         myret = -1
5024                                         try:
5025
5026                                                 myret = _spawn_fetch(mysettings, myfetch)
5027
5028                                         finally:
5029                                                 try:
5030                                                         apply_secpass_permissions(myfile_path,
5031                                                                 gid=portage_gid, mode=0o664, mask=0o2)
5032                                                 except portage.exception.FileNotFound as e:
5033                                                         pass
5034                                                 except portage.exception.PortageException as e:
5035                                                         if not os.access(myfile_path, os.R_OK):
5036                                                                 writemsg(_("!!! Failed to adjust permissions:"
5037                                                                         " %s\n") % str(e), noiselevel=-1)
5038
5039                                         # If the file is empty then it's obviously invalid.  Don't
5040                                         # trust the return value from the fetcher.  Remove the
5041                                         # empty file and try to download again.
5042                                         try:
5043                                                 if os.stat(myfile_path).st_size == 0:
5044                                                         os.unlink(myfile_path)
5045                                                         fetched = 0
5046                                                         continue
5047                                         except EnvironmentError:
5048                                                 pass
5049
5050                                         if mydigests is not None and myfile in mydigests:
5051                                                 try:
5052                                                         mystat = os.stat(myfile_path)
5053                                                 except OSError as e:
5054                                                         if e.errno not in (errno.ENOENT, errno.ESTALE):
5055                                                                 raise
5056                                                         del e
5057                                                         fetched = 0
5058                                                 else:
5059
5060                                                         if stat.S_ISDIR(mystat.st_mode):
5061                                                                 # This can happen if FETCHCOMMAND erroneously
5062                                                                 # contains wget's -P option where it should
5063                                                                 # instead have -O.
5064                                                                 portage.util.writemsg_level(
5065                                                                         _("!!! The command specified in the "
5066                                                                         "%s variable appears to have\n!!! "
5067                                                                         "created a directory instead of a "
5068                                                                         "normal file.\n") % command_var,
5069                                                                         level=logging.ERROR, noiselevel=-1)
5070                                                                 portage.util.writemsg_level(
5071                                                                         _("!!! Refer to the make.conf(5) "
5072                                                                         "man page for information about how "
5073                                                                         "to\n!!! correctly specify "
5074                                                                         "FETCHCOMMAND and RESUMECOMMAND.\n"),
5075                                                                         level=logging.ERROR, noiselevel=-1)
5076                                                                 return 0
5077
5078                                                         # no exception?  file exists. let digestcheck() report
5079                                                         # an appropriately for size or checksum errors
5080
5081                                                         # If the fetcher reported success and the file is
5082                                                         # too small, it's probably because the digest is
5083                                                         # bad (upstream changed the distfile).  In this
5084                                                         # case we don't want to attempt to resume. Show a
5085                                                         # digest verification failure to that the user gets
5086                                                         # a clue about what just happened.
5087                                                         if myret != os.EX_OK and \
5088                                                                 mystat.st_size < mydigests[myfile]["size"]:
5089                                                                 # Fetch failed... Try the next one... Kill 404 files though.
5090                                                                 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
5091                                                                         html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
5092                                                                         if html404.search(codecs.open(
5093                                                                                 _unicode_encode(myfile_path,
5094                                                                                 encoding=_encodings['fs'], errors='strict'),
5095                                                                                 mode='r', encoding=_encodings['content'], errors='replace'
5096                                                                                 ).read()):
5097                                                                                 try:
5098                                                                                         os.unlink(mysettings["DISTDIR"]+"/"+myfile)
5099                                                                                         writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
5100                                                                                         fetched = 0
5101                                                                                         continue
5102                                                                                 except (IOError, OSError):
5103                                                                                         pass
5104                                                                 fetched = 1
5105                                                                 continue
5106                                                         if True:
5107                                                                 # File is the correct size--check the checksums for the fetched
5108                                                                 # file NOW, for those users who don't have a stable/continuous
5109                                                                 # net connection. This way we have a chance to try to download
5110                                                                 # from another mirror...
5111                                                                 verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
5112                                                                 if not verified_ok:
5113                                                                         print(reason)
5114                                                                         writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
5115                                                                                 noiselevel=-1)
5116                                                                         writemsg(_("!!! Reason: %s\n") % reason[0],
5117                                                                                 noiselevel=-1)
5118                                                                         writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
5119                                                                                 (reason[1], reason[2]), noiselevel=-1)
5120                                                                         if reason[0] == _("Insufficient data for checksum verification"):
5121                                                                                 return 0
5122                                                                         temp_filename = \
5123                                                                                 _checksum_failure_temp_file(
5124                                                                                 mysettings["DISTDIR"], myfile)
5125                                                                         writemsg_stdout(_("Refetching... "
5126                                                                                 "File renamed to '%s'\n\n") % \
5127                                                                                 temp_filename, noiselevel=-1)
5128                                                                         fetched=0
5129                                                                         checksum_failure_count += 1
5130                                                                         if checksum_failure_count == \
5131                                                                                 checksum_failure_primaryuri:
5132                                                                                 # Switch to "primaryuri" mode in order
5133                                                                                 # to increase the probablility of
5134                                                                                 # of success.
5135                                                                                 primaryuris = \
5136                                                                                         primaryuri_dict.get(myfile)
5137                                                                                 if primaryuris:
5138                                                                                         uri_list.extend(
5139                                                                                                 reversed(primaryuris))
5140                                                                         if checksum_failure_count >= \
5141                                                                                 checksum_failure_max_tries:
5142                                                                                 break
5143                                                                 else:
5144                                                                         eout = portage.output.EOutput()
5145                                                                         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5146                                                                         digests = mydigests.get(myfile)
5147                                                                         if digests:
5148                                                                                 eout.ebegin("%s %s ;-)" % \
5149                                                                                         (myfile, " ".join(sorted(digests))))
5150                                                                                 eout.eend(0)
5151                                                                         fetched=2
5152                                                                         break
5153                                         else:
5154                                                 if not myret:
5155                                                         fetched=2
5156                                                         break
5157                                                 elif mydigests!=None:
5158                                                         writemsg(_("No digest file available and download failed.\n\n"),
5159                                                                 noiselevel=-1)
5160                 finally:
5161                         if use_locks and file_lock:
5162                                 portage.locks.unlockfile(file_lock)
5163
5164                 if listonly:
5165                         writemsg_stdout("\n", noiselevel=-1)
5166                 if fetched != 2:
5167                         if restrict_fetch and not restrict_fetch_msg:
5168                                 restrict_fetch_msg = True
5169                                 msg = _("\n!!! %s/%s"
5170                                         " has fetch restriction turned on.\n"
5171                                         "!!! This probably means that this "
5172                                         "ebuild's files must be downloaded\n"
5173                                         "!!! manually.  See the comments in"
5174                                         " the ebuild for more information.\n\n") % \
5175                                         (mysettings["CATEGORY"], mysettings["PF"])
5176                                 portage.util.writemsg_level(msg,
5177                                         level=logging.ERROR, noiselevel=-1)
5178                                 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
5179                                         os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
5180
5181                                 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
5182                                 private_tmpdir = None
5183                                 if not parallel_fetchonly and not have_builddir:
5184                                         # When called by digestgen(), it's normal that
5185                                         # PORTAGE_BUILDDIR doesn't exist. It's helpful
5186                                         # to show the pkg_nofetch output though, so go
5187                                         # ahead and create a temporary PORTAGE_BUILDDIR.
5188                                         # Use a temporary config instance to avoid altering
5189                                         # the state of the one that's been passed in.
5190                                         mysettings = config(clone=mysettings)
5191                                         from tempfile import mkdtemp
5192                                         try:
5193                                                 private_tmpdir = mkdtemp("", "._portage_fetch_.",
5194                                                         global_tmpdir)
5195                                         except OSError as e:
5196                                                 if e.errno != portage.exception.PermissionDenied.errno:
5197                                                         raise
5198                                                 raise portage.exception.PermissionDenied(global_tmpdir)
5199                                         mysettings["PORTAGE_TMPDIR"] = private_tmpdir
5200                                         mysettings.backup_changes("PORTAGE_TMPDIR")
5201                                         debug = mysettings.get("PORTAGE_DEBUG") == "1"
5202                                         portage.doebuild_environment(mysettings["EBUILD"], "fetch",
5203                                                 mysettings["ROOT"], mysettings, debug, 1, None)
5204                                         prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
5205                                         have_builddir = True
5206
5207                                 if not parallel_fetchonly and have_builddir:
5208                                         # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
5209                                         # ensuring sane $PWD (bug #239560) and storing elog
5210                                         # messages. Therefore, calling code needs to ensure that
5211                                         # PORTAGE_BUILDDIR is already clean and locked here.
5212
5213                                         # All the pkg_nofetch goes to stderr since it's considered
5214                                         # to be an error message.
5215                                         fd_pipes = {
5216                                                 0 : sys.stdin.fileno(),
5217                                                 1 : sys.stderr.fileno(),
5218                                                 2 : sys.stderr.fileno(),
5219                                         }
5220
5221                                         ebuild_phase = mysettings.get("EBUILD_PHASE")
5222                                         try:
5223                                                 mysettings["EBUILD_PHASE"] = "nofetch"
5224                                                 spawn(_shell_quote(EBUILD_SH_BINARY) + \
5225                                                         " nofetch", mysettings, fd_pipes=fd_pipes)
5226                                         finally:
5227                                                 if ebuild_phase is None:
5228                                                         mysettings.pop("EBUILD_PHASE", None)
5229                                                 else:
5230                                                         mysettings["EBUILD_PHASE"] = ebuild_phase
5231                                                 if private_tmpdir is not None:
5232                                                         shutil.rmtree(private_tmpdir)
5233
5234                         elif restrict_fetch:
5235                                 pass
5236                         elif listonly:
5237                                 pass
5238                         elif not filedict[myfile]:
5239                                 writemsg(_("Warning: No mirrors available for file"
5240                                         " '%s'\n") % (myfile), noiselevel=-1)
5241                         else:
5242                                 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
5243                                         noiselevel=-1)
5244
5245                         if listonly:
5246                                 continue
5247                         elif fetchonly:
5248                                 failed_files.add(myfile)
5249                                 continue
5250                         return 0
5251         if failed_files:
5252                 return 0
5253         return 1
5254
5255 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
5256         """
5257         Generates a digest file if missing.  Assumes all files are available.
5258         DEPRECATED: this now only is a compability wrapper for 
5259                     portage.manifest.Manifest()
5260         NOTE: manifestonly and overwrite are useless with manifest2 and
5261               are therefore ignored."""
5262         if myportdb is None:
5263                 writemsg("Warning: myportdb not specified to digestgen\n")
5264                 global portdb
5265                 myportdb = portdb
5266         global _doebuild_manifest_exempt_depend
5267         try:
5268                 _doebuild_manifest_exempt_depend += 1
5269                 distfiles_map = {}
5270                 fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
5271                 for cpv in fetchlist_dict:
5272                         try:
5273                                 for myfile in fetchlist_dict[cpv]:
5274                                         distfiles_map.setdefault(myfile, []).append(cpv)
5275                         except portage.exception.InvalidDependString as e:
5276                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
5277                                 del e
5278                                 return 0
5279                 mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
5280                 manifest1_compat = False
5281                 mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
5282                         fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
5283                 # Don't require all hashes since that can trigger excessive
5284                 # fetches when sufficient digests already exist.  To ease transition
5285                 # while Manifest 1 is being removed, only require hashes that will
5286                 # exist before and after the transition.
5287                 required_hash_types = set()
5288                 required_hash_types.add("size")
5289                 required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
5290                 dist_hashes = mf.fhashdict.get("DIST", {})
5291
5292                 # To avoid accidental regeneration of digests with the incorrect
5293                 # files (such as partially downloaded files), trigger the fetch
5294                 # code if the file exists and it's size doesn't match the current
5295                 # manifest entry. If there really is a legitimate reason for the
5296                 # digest to change, `ebuild --force digest` can be used to avoid
5297                 # triggering this code (or else the old digests can be manually
5298                 # removed from the Manifest).
5299                 missing_files = []
5300                 for myfile in distfiles_map:
5301                         myhashes = dist_hashes.get(myfile)
5302                         if not myhashes:
5303                                 try:
5304                                         st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5305                                 except OSError:
5306                                         st = None
5307                                 if st is None or st.st_size == 0:
5308                                         missing_files.append(myfile)
5309                                 continue
5310                         size = myhashes.get("size")
5311
5312                         try:
5313                                 st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
5314                         except OSError as e:
5315                                 if e.errno != errno.ENOENT:
5316                                         raise
5317                                 del e
5318                                 if size == 0:
5319                                         missing_files.append(myfile)
5320                                         continue
5321                                 if required_hash_types.difference(myhashes):
5322                                         missing_files.append(myfile)
5323                                         continue
5324                         else:
5325                                 if st.st_size == 0 or size is not None and size != st.st_size:
5326                                         missing_files.append(myfile)
5327                                         continue
5328
5329                 if missing_files:
5330                                 mytree = os.path.realpath(os.path.dirname(
5331                                         os.path.dirname(mysettings["O"])))
5332                                 fetch_settings = config(clone=mysettings)
5333                                 debug = mysettings.get("PORTAGE_DEBUG") == "1"
5334                                 for myfile in missing_files:
5335                                         uris = set()
5336                                         for cpv in distfiles_map[myfile]:
5337                                                 myebuild = os.path.join(mysettings["O"],
5338                                                         catsplit(cpv)[1] + ".ebuild")
5339                                                 # for RESTRICT=fetch, mirror, etc...
5340                                                 doebuild_environment(myebuild, "fetch",
5341                                                         mysettings["ROOT"], fetch_settings,
5342                                                         debug, 1, myportdb)
5343                                                 uris.update(myportdb.getFetchMap(
5344                                                         cpv, mytree=mytree)[myfile])
5345
5346                                         fetch_settings["A"] = myfile # for use by pkg_nofetch()
5347
5348                                         try:
5349                                                 st = os.stat(os.path.join(
5350                                                         mysettings["DISTDIR"],myfile))
5351                                         except OSError:
5352                                                 st = None
5353
5354                                         if not fetch({myfile : uris}, fetch_settings):
5355                                                 writemsg(_("!!! Fetch failed for %s, can't update "
5356                                                         "Manifest\n") % myfile, noiselevel=-1)
5357                                                 if myfile in dist_hashes and \
5358                                                         st is not None and st.st_size > 0:
5359                                                         # stat result is obtained before calling fetch(),
5360                                                         # since fetch may rename the existing file if the
5361                                                         # digest does not match.
5362                                                         writemsg(_("!!! If you would like to "
5363                                                                 "forcefully replace the existing "
5364                                                                 "Manifest entry\n!!! for %s, use "
5365                                                                 "the following command:\n") % myfile + \
5366                                                                 "!!!    " + colorize("INFORM",
5367                                                                 "ebuild --force %s manifest" % \
5368                                                                 os.path.basename(myebuild)) + "\n",
5369                                                                 noiselevel=-1)
5370                                                 return 0
5371                 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
5372                 try:
5373                         mf.create(requiredDistfiles=myarchives,
5374                                 assumeDistHashesSometimes=True,
5375                                 assumeDistHashesAlways=(
5376                                 "assume-digests" in mysettings.features))
5377                 except portage.exception.FileNotFound as e:
5378                         writemsg(_("!!! File %s doesn't exist, can't update "
5379                                 "Manifest\n") % e, noiselevel=-1)
5380                         return 0
5381                 except portage.exception.PortagePackageException as e:
5382                         writemsg(("!!! %s\n") % (e,), noiselevel=-1)
5383                         return 0
5384                 try:
5385                         mf.write(sign=False)
5386                 except portage.exception.PermissionDenied as e:
5387                         writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
5388                         return 0
5389                 if "assume-digests" not in mysettings.features:
5390                         distlist = list(mf.fhashdict.get("DIST", {}))
5391                         distlist.sort()
5392                         auto_assumed = []
5393                         for filename in distlist:
5394                                 if not os.path.exists(
5395                                         os.path.join(mysettings["DISTDIR"], filename)):
5396                                         auto_assumed.append(filename)
5397                         if auto_assumed:
5398                                 mytree = os.path.realpath(
5399                                         os.path.dirname(os.path.dirname(mysettings["O"])))
5400                                 cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
5401                                 pkgs = myportdb.cp_list(cp, mytree=mytree)
5402                                 pkgs.sort()
5403                                 writemsg_stdout("  digest.assumed" + portage.output.colorize("WARN",
5404                                         str(len(auto_assumed)).rjust(18)) + "\n")
5405                                 for pkg_key in pkgs:
5406                                         fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
5407                                         pv = pkg_key.split("/")[1]
5408                                         for filename in auto_assumed:
5409                                                 if filename in fetchlist:
5410                                                         writemsg_stdout(
5411                                                                 "   %s::%s\n" % (pv, filename))
5412                 return 1
5413         finally:
5414                 _doebuild_manifest_exempt_depend -= 1
5415
5416 def digestParseFile(myfilename, mysettings=None):
5417         """(filename) -- Parses a given file for entries matching:
5418         <checksumkey> <checksum_hex_string> <filename> <filesize>
5419         Ignores lines that don't start with a valid checksum identifier
5420         and returns a dict with the filenames as keys and {checksumkey:checksum}
5421         as the values.
5422         DEPRECATED: this function is now only a compability wrapper for
5423                     portage.manifest.Manifest()."""
5424
5425         mysplit = myfilename.split(os.sep)
5426         if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
5427                 pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
5428         elif mysplit[-1] == "Manifest":
5429                 pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
5430
5431         if mysettings is None:
5432                 global settings
5433                 mysettings = config(clone=settings)
5434
5435         return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
5436
5437 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
5438         """Verifies checksums.  Assumes all files have been downloaded.
5439         DEPRECATED: this is now only a compability wrapper for 
5440                     portage.manifest.Manifest()."""
5441         if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
5442                 return 1
5443         pkgdir = mysettings["O"]
5444         manifest_path = os.path.join(pkgdir, "Manifest")
5445         if not os.path.exists(manifest_path):
5446                 writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
5447                         noiselevel=-1)
5448                 if strict:
5449                         return 0
5450                 else:
5451                         return 1
5452         mf = Manifest(pkgdir, mysettings["DISTDIR"])
5453         manifest_empty = True
5454         for d in mf.fhashdict.values():
5455                 if d:
5456                         manifest_empty = False
5457                         break
5458         if manifest_empty:
5459                 writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
5460                         noiselevel=-1)
5461                 if strict:
5462                         return 0
5463                 else:
5464                         return 1
5465         eout = portage.output.EOutput()
5466         eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
5467         try:
5468                 if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
5469                         eout.ebegin(_("checking ebuild checksums ;-)"))
5470                         mf.checkTypeHashes("EBUILD")
5471                         eout.eend(0)
5472                         eout.ebegin(_("checking auxfile checksums ;-)"))
5473                         mf.checkTypeHashes("AUX")
5474                         eout.eend(0)
5475                         eout.ebegin(_("checking miscfile checksums ;-)"))
5476                         mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
5477                         eout.eend(0)
5478                 for f in myfiles:
5479                         eout.ebegin(_("checking %s ;-)") % f)
5480                         ftype = mf.findFile(f)
5481                         if ftype is None:
5482                                 raise KeyError(f)
5483                         mf.checkFileHashes(ftype, f)
5484                         eout.eend(0)
5485         except KeyError as e:
5486                 eout.eend(1)
5487                 writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
5488                 return 0
5489         except portage.exception.FileNotFound as e:
5490                 eout.eend(1)
5491                 writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
5492                         noiselevel=-1)
5493                 return 0
5494         except portage.exception.DigestException as e:
5495                 eout.eend(1)
5496                 writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
5497                 writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
5498                 writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
5499                 writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
5500                 writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
5501                 return 0
5502         # Make sure that all of the ebuilds are actually listed in the Manifest.
5503         glep55 = 'parse-eapi-glep-55' in mysettings.features
5504         for f in os.listdir(pkgdir):
5505                 pf = None
5506                 if glep55:
5507                         pf, eapi = _split_ebuild_name_glep55(f)
5508                 elif f[-7:] == '.ebuild':
5509                         pf = f[:-7]
5510                 if pf is not None and not mf.hasFile("EBUILD", f):
5511                         writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5512                                 os.path.join(pkgdir, f), noiselevel=-1)
5513                         if strict:
5514                                 return 0
5515         """ epatch will just grab all the patches out of a directory, so we have to
5516         make sure there aren't any foreign files that it might grab."""
5517         filesdir = os.path.join(pkgdir, "files")
5518
5519         for parent, dirs, files in os.walk(filesdir):
5520                 try:
5521                         parent = _unicode_decode(parent,
5522                                 encoding=_encodings['fs'], errors='strict')
5523                 except UnicodeDecodeError:
5524                         parent = _unicode_decode(parent,
5525                                 encoding=_encodings['fs'], errors='replace')
5526                         writemsg(_("!!! Path contains invalid "
5527                                 "character(s) for encoding '%s': '%s'") \
5528                                 % (_encodings['fs'], parent), noiselevel=-1)
5529                         if strict:
5530                                 return 0
5531                         continue
5532                 for d in dirs:
5533                         d_bytes = d
5534                         try:
5535                                 d = _unicode_decode(d,
5536                                         encoding=_encodings['fs'], errors='strict')
5537                         except UnicodeDecodeError:
5538                                 d = _unicode_decode(d,
5539                                         encoding=_encodings['fs'], errors='replace')
5540                                 writemsg(_("!!! Path contains invalid "
5541                                         "character(s) for encoding '%s': '%s'") \
5542                                         % (_encodings['fs'], os.path.join(parent, d)),
5543                                         noiselevel=-1)
5544                                 if strict:
5545                                         return 0
5546                                 dirs.remove(d_bytes)
5547                                 continue
5548                         if d.startswith(".") or d == "CVS":
5549                                 dirs.remove(d_bytes)
5550                 for f in files:
5551                         try:
5552                                 f = _unicode_decode(f,
5553                                         encoding=_encodings['fs'], errors='strict')
5554                         except UnicodeDecodeError:
5555                                 f = _unicode_decode(f,
5556                                         encoding=_encodings['fs'], errors='replace')
5557                                 if f.startswith("."):
5558                                         continue
5559                                 f = os.path.join(parent, f)[len(filesdir) + 1:]
5560                                 writemsg(_("!!! File name contains invalid "
5561                                         "character(s) for encoding '%s': '%s'") \
5562                                         % (_encodings['fs'], f), noiselevel=-1)
5563                                 if strict:
5564                                         return 0
5565                                 continue
5566                         if f.startswith("."):
5567                                 continue
5568                         f = os.path.join(parent, f)[len(filesdir) + 1:]
5569                         file_type = mf.findFile(f)
5570                         if file_type != "AUX" and not f.startswith("digest-"):
5571                                 writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
5572                                         os.path.join(filesdir, f), noiselevel=-1)
5573                                 if strict:
5574                                         return 0
5575         return 1
5576
5577 # parse actionmap to spawn ebuild with the appropriate args
5578 def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
5579         logfile=None, fd_pipes=None, returnpid=False):
5580         if not returnpid and \
5581                 (alwaysdep or "noauto" not in mysettings.features):
5582                 # process dependency first
5583                 if "dep" in actionmap[mydo]:
5584                         retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
5585                                 mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
5586                                 fd_pipes=fd_pipes, returnpid=returnpid)
5587                         if retval:
5588                                 return retval
5589
5590         eapi = mysettings["EAPI"]
5591
5592         if mydo == "configure" and eapi in ("0", "1"):
5593                 return os.EX_OK
5594
5595         if mydo == "prepare" and eapi in ("0", "1"):
5596                 return os.EX_OK
5597
5598         if mydo == "pretend" and eapi in ("0", "1", "2"):
5599                 return os.EX_OK
5600
5601         kwargs = actionmap[mydo]["args"]
5602         mysettings["EBUILD_PHASE"] = mydo
5603         _doebuild_exit_status_unlink(
5604                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5605
5606         try:
5607                 phase_retval = spawn(actionmap[mydo]["cmd"] % mydo,
5608                         mysettings, debug=debug, logfile=logfile,
5609                         fd_pipes=fd_pipes, returnpid=returnpid, **kwargs)
5610         finally:
5611                 mysettings["EBUILD_PHASE"] = ""
5612
5613         if returnpid:
5614                 return phase_retval
5615
5616         msg = _doebuild_exit_status_check(mydo, mysettings)
5617         if msg:
5618                 if phase_retval == os.EX_OK:
5619                         phase_retval = 1
5620                 from textwrap import wrap
5621                 from portage.elog.messages import eerror
5622                 for l in wrap(msg, 72):
5623                         eerror(l, phase=mydo, key=mysettings.mycpv)
5624
5625         _post_phase_userpriv_perms(mysettings)
5626         if mydo == "install":
5627                 _check_build_log(mysettings)
5628                 if phase_retval == os.EX_OK:
5629                         _post_src_install_chost_fix(mysettings)
5630                         phase_retval = _post_src_install_checks(mysettings)
5631
5632         if mydo == "test" and phase_retval != os.EX_OK and \
5633                 "test-fail-continue" in mysettings.features:
5634                 phase_retval = os.EX_OK
5635
5636         return phase_retval
5637
5638 _post_phase_cmds = {
5639
5640         "install" : [
5641                 "install_qa_check",
5642                 "install_symlink_html_docs"],
5643
5644         "preinst" : [
5645                 "preinst_bsdflags",
5646                 "preinst_sfperms",
5647                 "preinst_selinux_labels",
5648                 "preinst_suid_scan",
5649                 "preinst_mask"],
5650
5651         "postinst" : [
5652                 "postinst_bsdflags"]
5653 }
5654
5655 def _post_phase_userpriv_perms(mysettings):
5656         if "userpriv" in mysettings.features and secpass >= 2:
5657                 """ Privileged phases may have left files that need to be made
5658                 writable to a less privileged user."""
5659                 apply_recursive_permissions(mysettings["T"],
5660                         uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
5661                         filemode=0o60, filemask=0)
5662
5663 def _post_src_install_checks(mysettings):
5664         _post_src_install_uid_fix(mysettings)
5665         global _post_phase_cmds
5666         retval = _spawn_misc_sh(mysettings, _post_phase_cmds["install"])
5667         if retval != os.EX_OK:
5668                 writemsg(_("!!! install_qa_check failed; exiting.\n"),
5669                         noiselevel=-1)
5670         return retval
5671
5672 def _check_build_log(mysettings, out=None):
5673         """
5674         Search the content of $PORTAGE_LOG_FILE if it exists
5675         and generate the following QA Notices when appropriate:
5676
5677           * Automake "maintainer mode"
5678           * command not found
5679           * Unrecognized configure options
5680         """
5681         logfile = mysettings.get("PORTAGE_LOG_FILE")
5682         if logfile is None:
5683                 return
5684         try:
5685                 f = codecs.open(_unicode_encode(logfile,
5686                         encoding=_encodings['fs'], errors='strict'),
5687                         mode='r', encoding=_encodings['content'], errors='replace')
5688         except EnvironmentError:
5689                 return
5690
5691         am_maintainer_mode = []
5692         bash_command_not_found = []
5693         bash_command_not_found_re = re.compile(
5694                 r'(.*): line (\d*): (.*): command not found$')
5695         command_not_found_exclude_re = re.compile(r'/configure: line ')
5696         helper_missing_file = []
5697         helper_missing_file_re = re.compile(
5698                 r'^!!! (do|new).*: .* does not exist$')
5699
5700         configure_opts_warn = []
5701         configure_opts_warn_re = re.compile(
5702                 r'^configure: WARNING: [Uu]nrecognized options: ')
5703
5704         # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
5705         #
5706         #Configuration:
5707         #  Automake:                   ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
5708         am_maintainer_mode_re = re.compile(r'/missing --run ')
5709         am_maintainer_mode_exclude_re = \
5710                 re.compile(r'(/missing --run (autoheader|makeinfo)|^\s*Automake:\s)')
5711
5712         make_jobserver_re = \
5713                 re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
5714         make_jobserver = []
5715
5716         try:
5717                 for line in f:
5718                         if am_maintainer_mode_re.search(line) is not None and \
5719                                 am_maintainer_mode_exclude_re.search(line) is None:
5720                                 am_maintainer_mode.append(line.rstrip("\n"))
5721
5722                         if bash_command_not_found_re.match(line) is not None and \
5723                                 command_not_found_exclude_re.search(line) is None:
5724                                 bash_command_not_found.append(line.rstrip("\n"))
5725
5726                         if helper_missing_file_re.match(line) is not None:
5727                                 helper_missing_file.append(line.rstrip("\n"))
5728
5729                         if configure_opts_warn_re.match(line) is not None:
5730                                 configure_opts_warn.append(line.rstrip("\n"))
5731
5732                         if make_jobserver_re.match(line) is not None:
5733                                 make_jobserver.append(line.rstrip("\n"))
5734
5735         finally:
5736                 f.close()
5737
5738         from portage.elog.messages import eqawarn
5739         def _eqawarn(lines):
5740                 for line in lines:
5741                         eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
5742         from textwrap import wrap
5743         wrap_width = 70
5744
5745         if am_maintainer_mode:
5746                 msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
5747                 msg.append("")
5748                 msg.extend("\t" + line for line in am_maintainer_mode)
5749                 msg.append("")
5750                 msg.extend(wrap(_(
5751                         "If you patch Makefile.am, "
5752                         "configure.in,  or configure.ac then you "
5753                         "should use autotools.eclass and "
5754                         "eautomake or eautoreconf. Exceptions "
5755                         "are limited to system packages "
5756                         "for which it is impossible to run "
5757                         "autotools during stage building. "
5758                         "See http://www.gentoo.org/p"
5759                         "roj/en/qa/autofailure.xml for more information."),
5760                         wrap_width))
5761                 _eqawarn(msg)
5762
5763         if bash_command_not_found:
5764                 msg = [_("QA Notice: command not found:")]
5765                 msg.append("")
5766                 msg.extend("\t" + line for line in bash_command_not_found)
5767                 _eqawarn(msg)
5768
5769         if helper_missing_file:
5770                 msg = [_("QA Notice: file does not exist:")]
5771                 msg.append("")
5772                 msg.extend("\t" + line[4:] for line in helper_missing_file)
5773                 _eqawarn(msg)
5774
5775         if configure_opts_warn:
5776                 msg = [_("QA Notice: Unrecognized configure options:")]
5777                 msg.append("")
5778                 msg.extend("\t" + line for line in configure_opts_warn)
5779                 _eqawarn(msg)
5780
5781         if make_jobserver:
5782                 msg = [_("QA Notice: make jobserver unavailable:")]
5783                 msg.append("")
5784                 msg.extend("\t" + line for line in make_jobserver)
5785                 _eqawarn(msg)
5786
5787 def _post_src_install_chost_fix(settings):
5788         """
5789         It's possible that the ebuild has changed the
5790         CHOST variable, so revert it to the initial
5791         setting.
5792         """
5793         chost = settings.get('CHOST')
5794         if chost:
5795                 write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
5796                         'build-info', 'CHOST'), chost + '\n')
5797
5798 def _post_src_install_uid_fix(mysettings, out=None):
5799         """
5800         Files in $D with user and group bits that match the "portage"
5801         user or group are automatically mapped to PORTAGE_INST_UID and
5802         PORTAGE_INST_GID if necessary. The chown system call may clear
5803         S_ISUID and S_ISGID bits, so those bits are restored if
5804         necessary.
5805         """
5806
5807         os = _os_merge
5808
5809         inst_uid = int(mysettings["PORTAGE_INST_UID"])
5810         inst_gid = int(mysettings["PORTAGE_INST_GID"])
5811
5812         if bsd_chflags:
5813                 # Temporarily remove all of the flags in order to avoid EPERM errors.
5814                 os.system("mtree -c -p %s -k flags > %s" % \
5815                         (_shell_quote(mysettings["D"]),
5816                         _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5817                 os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
5818                         (_shell_quote(mysettings["D"]),))
5819                 os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
5820                         (_shell_quote(mysettings["D"]),))
5821
5822         destdir = mysettings["D"]
5823         unicode_errors = []
5824
5825         while True:
5826
5827                 unicode_error = False
5828                 size = 0
5829                 counted_inodes = set()
5830
5831                 for parent, dirs, files in os.walk(destdir):
5832                         try:
5833                                 parent = _unicode_decode(parent,
5834                                         encoding=_encodings['merge'], errors='strict')
5835                         except UnicodeDecodeError:
5836                                 new_parent = _unicode_decode(parent,
5837                                         encoding=_encodings['merge'], errors='replace')
5838                                 new_parent = _unicode_encode(new_parent,
5839                                         encoding=_encodings['merge'], errors='backslashreplace')
5840                                 new_parent = _unicode_decode(new_parent,
5841                                         encoding=_encodings['merge'], errors='replace')
5842                                 os.rename(parent, new_parent)
5843                                 unicode_error = True
5844                                 unicode_errors.append(new_parent[len(destdir):])
5845                                 break
5846
5847                         for fname in chain(dirs, files):
5848                                 try:
5849                                         fname = _unicode_decode(fname,
5850                                                 encoding=_encodings['merge'], errors='strict')
5851                                 except UnicodeDecodeError:
5852                                         fpath = _os.path.join(
5853                                                 parent.encode(_encodings['merge']), fname)
5854                                         new_fname = _unicode_decode(fname,
5855                                                 encoding=_encodings['merge'], errors='replace')
5856                                         new_fname = _unicode_encode(new_fname,
5857                                                 encoding=_encodings['merge'], errors='backslashreplace')
5858                                         new_fname = _unicode_decode(new_fname,
5859                                                 encoding=_encodings['merge'], errors='replace')
5860                                         new_fpath = os.path.join(parent, new_fname)
5861                                         os.rename(fpath, new_fpath)
5862                                         unicode_error = True
5863                                         unicode_errors.append(new_fpath[len(destdir):])
5864                                         fname = new_fname
5865                                         fpath = new_fpath
5866                                 else:
5867                                         fpath = os.path.join(parent, fname)
5868
5869                                 mystat = os.lstat(fpath)
5870                                 if stat.S_ISREG(mystat.st_mode) and \
5871                                         mystat.st_ino not in counted_inodes:
5872                                         counted_inodes.add(mystat.st_ino)
5873                                         size += mystat.st_size
5874                                 if mystat.st_uid != portage_uid and \
5875                                         mystat.st_gid != portage_gid:
5876                                         continue
5877                                 myuid = -1
5878                                 mygid = -1
5879                                 if mystat.st_uid == portage_uid:
5880                                         myuid = inst_uid
5881                                 if mystat.st_gid == portage_gid:
5882                                         mygid = inst_gid
5883                                 apply_secpass_permissions(
5884                                         _unicode_encode(fpath, encoding=_encodings['merge']),
5885                                         uid=myuid, gid=mygid,
5886                                         mode=mystat.st_mode, stat_cached=mystat,
5887                                         follow_links=False)
5888
5889                         if unicode_error:
5890                                 break
5891
5892                 if not unicode_error:
5893                         break
5894
5895         if unicode_errors:
5896                 from portage.elog.messages import eerror
5897                 for l in _merge_unicode_error(unicode_errors):
5898                         eerror(l, phase='install', key=mysettings.mycpv, out=out)
5899
5900         open(_unicode_encode(os.path.join(mysettings['PORTAGE_BUILDDIR'],
5901                 'build-info', 'SIZE')), 'w').write(str(size) + '\n')
5902
5903         if bsd_chflags:
5904                 # Restore all of the flags saved above.
5905                 os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
5906                         (_shell_quote(mysettings["D"]),
5907                         _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
5908
5909 def _merge_unicode_error(errors):
5910         from textwrap import wrap
5911         lines = []
5912
5913         msg = _("This package installs one or more file names containing "
5914                 "characters that do not match your current locale "
5915                 "settings. The current setting for filesystem encoding is '%s'.") \
5916                 % _encodings['merge']
5917         lines.extend(wrap(msg, 72))
5918
5919         lines.append("")
5920         errors.sort()
5921         lines.extend("\t" + x for x in errors)
5922         lines.append("")
5923
5924         if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
5925                 msg = _("For best results, UTF-8 encoding is recommended. See "
5926                         "the Gentoo Linux Localization Guide for instructions "
5927                         "about how to configure your locale for UTF-8 encoding:")
5928                 lines.extend(wrap(msg, 72))
5929                 lines.append("")
5930                 lines.append("\t" + \
5931                         "http://www.gentoo.org/doc/en/guide-localization.xml")
5932                 lines.append("")
5933
5934         return lines
5935
5936 def _post_pkg_preinst_cmd(mysettings):
5937         """
5938         Post phase logic and tasks that have been factored out of
5939         ebuild.sh. Call preinst_mask last so that INSTALL_MASK can
5940         can be used to wipe out any gmon.out files created during
5941         previous functions (in case any tools were built with -pg
5942         in CFLAGS).
5943         """
5944
5945         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5946         misc_sh_binary = os.path.join(portage_bin_path,
5947                 os.path.basename(MISC_SH_BINARY))
5948
5949         mysettings["EBUILD_PHASE"] = ""
5950         global _post_phase_cmds
5951         myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["preinst"]
5952
5953         return myargs
5954
5955 def _post_pkg_postinst_cmd(mysettings):
5956         """
5957         Post phase logic and tasks that have been factored out of
5958         build.sh.
5959         """
5960
5961         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5962         misc_sh_binary = os.path.join(portage_bin_path,
5963                 os.path.basename(MISC_SH_BINARY))
5964
5965         mysettings["EBUILD_PHASE"] = ""
5966         global _post_phase_cmds
5967         myargs = [_shell_quote(misc_sh_binary)] + _post_phase_cmds["postinst"]
5968
5969         return myargs
5970
5971 def _spawn_misc_sh(mysettings, commands, **kwargs):
5972         """
5973         @param mysettings: the ebuild config
5974         @type mysettings: config
5975         @param commands: a list of function names to call in misc-functions.sh
5976         @type commands: list
5977         @rtype: int
5978         @returns: the return value from the spawn() call
5979         """
5980
5981         # Note: PORTAGE_BIN_PATH may differ from the global
5982         # constant when portage is reinstalling itself.
5983         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
5984         misc_sh_binary = os.path.join(portage_bin_path,
5985                 os.path.basename(MISC_SH_BINARY))
5986         mycommand = " ".join([_shell_quote(misc_sh_binary)] + commands)
5987         _doebuild_exit_status_unlink(
5988                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
5989         debug = mysettings.get("PORTAGE_DEBUG") == "1"
5990         logfile = mysettings.get("PORTAGE_LOG_FILE")
5991         mydo = mysettings["EBUILD_PHASE"]
5992         try:
5993                 rval = spawn(mycommand, mysettings, debug=debug,
5994                         logfile=logfile, **kwargs)
5995         finally:
5996                 pass
5997
5998         msg = _doebuild_exit_status_check(mydo, mysettings)
5999         if msg:
6000                 if rval == os.EX_OK:
6001                         rval = 1
6002                 from textwrap import wrap
6003                 from portage.elog.messages import eerror
6004                 for l in wrap(msg, 72):
6005                         eerror(l, phase=mydo, key=mysettings.mycpv)
6006
6007         return rval
6008
6009 _testing_eapis = frozenset()
6010 _deprecated_eapis = frozenset(["2_pre3", "2_pre2", "2_pre1"])
6011
6012 def _eapi_is_deprecated(eapi):
6013         return eapi in _deprecated_eapis
6014
6015 def eapi_is_supported(eapi):
6016         eapi = str(eapi).strip()
6017
6018         if _eapi_is_deprecated(eapi):
6019                 return True
6020
6021         if eapi in _testing_eapis:
6022                 return True
6023
6024         try:
6025                 eapi = int(eapi)
6026         except ValueError:
6027                 eapi = -1
6028         if eapi < 0:
6029                 return False
6030         return eapi <= portage.const.EAPI
6031
6032 # Generally, it's best not to assume that cache entries for unsupported EAPIs
6033 # can be validated. However, the current package manager specification does not
6034 # guarantee that the EAPI can be parsed without sourcing the ebuild, so
6035 # it's too costly to discard existing cache entries for unsupported EAPIs.
6036 # Therefore, by default, assume that cache entries for unsupported EAPIs can be
6037 # validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
6038 # since the EAPI can be determined without the incurring the cost of sourcing
6039 # the ebuild.
6040 _validate_cache_for_unsupported_eapis = True
6041
6042 _parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
6043 _parse_eapi_ebuild_head_max_lines = 30
6044
6045 def _parse_eapi_ebuild_head(f):
6046         count = 0
6047         for line in f:
6048                 m = _parse_eapi_ebuild_head_re.match(line)
6049                 if m is not None:
6050                         return m.group(1).strip()
6051                 count += 1
6052                 if count >= _parse_eapi_ebuild_head_max_lines:
6053                         break
6054         return '0'
6055
6056 # True when FEATURES=parse-eapi-glep-55 is enabled.
6057 _glep_55_enabled = False
6058
6059 _split_ebuild_name_glep55_re = re.compile(r'^(.*)\.ebuild(-([^.]+))?$')
6060
6061 def _split_ebuild_name_glep55(name):
6062         """
6063         @returns: (pkg-ver-rev, eapi)
6064         """
6065         m = _split_ebuild_name_glep55_re.match(name)
6066         if m is None:
6067                 return (None, None)
6068         return (m.group(1), m.group(3))
6069
6070 def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
6071
6072         ebuild_path = os.path.abspath(myebuild)
6073         pkg_dir     = os.path.dirname(ebuild_path)
6074
6075         if "CATEGORY" in mysettings.configdict["pkg"]:
6076                 cat = mysettings.configdict["pkg"]["CATEGORY"]
6077         else:
6078                 cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
6079
6080         eapi = None
6081         if 'parse-eapi-glep-55' in mysettings.features:
6082                 mypv, eapi = portage._split_ebuild_name_glep55(
6083                         os.path.basename(myebuild))
6084         else:
6085                 mypv = os.path.basename(ebuild_path)[:-7]
6086
6087         mycpv = cat+"/"+mypv
6088         mysplit = versions._pkgsplit(mypv)
6089         if mysplit is None:
6090                 raise portage.exception.IncorrectParameter(
6091                         _("Invalid ebuild path: '%s'") % myebuild)
6092
6093         # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
6094         # so that the caller can override it.
6095         tmpdir = mysettings["PORTAGE_TMPDIR"]
6096
6097         if mydo == 'depend':
6098                 if mycpv != mysettings.mycpv:
6099                         # Don't pass in mydbapi here since the resulting aux_get
6100                         # call would lead to infinite 'depend' phase recursion.
6101                         mysettings.setcpv(mycpv)
6102         else:
6103                 # If IUSE isn't in configdict['pkg'], it means that setcpv()
6104                 # hasn't been called with the mydb argument, so we have to
6105                 # call it here (portage code always calls setcpv properly,
6106                 # but api consumers might not).
6107                 if mycpv != mysettings.mycpv or \
6108                         'IUSE' not in mysettings.configdict['pkg']:
6109                         # Reload env.d variables and reset any previous settings.
6110                         mysettings.reload()
6111                         mysettings.reset()
6112                         mysettings.setcpv(mycpv, mydb=mydbapi)
6113
6114         # config.reset() might have reverted a change made by the caller,
6115         # so restore it to it's original value.
6116         mysettings["PORTAGE_TMPDIR"] = tmpdir
6117
6118         mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
6119         mysettings["EBUILD_PHASE"] = mydo
6120
6121         mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
6122
6123         # We are disabling user-specific bashrc files.
6124         mysettings["BASH_ENV"] = INVALID_ENV_FILE
6125
6126         if debug: # Otherwise it overrides emerge's settings.
6127                 # We have no other way to set debug... debug can't be passed in
6128                 # due to how it's coded... Don't overwrite this so we can use it.
6129                 mysettings["PORTAGE_DEBUG"] = "1"
6130
6131         mysettings["ROOT"]     = myroot
6132         mysettings["STARTDIR"] = getcwd()
6133         mysettings["EBUILD"]   = ebuild_path
6134         mysettings["O"]        = pkg_dir
6135         mysettings.configdict["pkg"]["CATEGORY"] = cat
6136         mysettings["FILESDIR"] = pkg_dir+"/files"
6137         mysettings["PF"]       = mypv
6138
6139         if hasattr(mydbapi, '_repo_info'):
6140                 mytree = os.path.dirname(os.path.dirname(pkg_dir))
6141                 repo_info = mydbapi._repo_info[mytree]
6142                 mysettings['PORTDIR'] = repo_info.portdir
6143                 mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
6144
6145         mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
6146         mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
6147         mysettings["RPMDIR"]  = os.path.realpath(mysettings["RPMDIR"])
6148
6149         mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
6150         mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
6151
6152         mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
6153         mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
6154         mysettings["PN"] = mysplit[0]
6155         mysettings["PV"] = mysplit[1]
6156         mysettings["PR"] = mysplit[2]
6157
6158         if portage.util.noiselimit < 0:
6159                 mysettings["PORTAGE_QUIET"] = "1"
6160
6161         if mydo == 'depend' and \
6162                 'EAPI' not in mysettings.configdict['pkg']:
6163
6164                 if eapi is not None:
6165                         # From parse-eapi-glep-55 above.
6166                         pass
6167                 elif 'parse-eapi-ebuild-head' in mysettings.features:
6168                         eapi = _parse_eapi_ebuild_head(
6169                                 codecs.open(_unicode_encode(ebuild_path,
6170                                 encoding=_encodings['fs'], errors='strict'),
6171                                 mode='r', encoding=_encodings['content'], errors='replace'))
6172
6173                 if eapi is not None:
6174                         if not eapi_is_supported(eapi):
6175                                 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
6176                         mysettings.configdict['pkg']['EAPI'] = eapi
6177
6178         if mydo != "depend":
6179                 # Metadata vars such as EAPI and RESTRICT are
6180                 # set by the above config.setcpv() call.
6181                 eapi = mysettings["EAPI"]
6182                 if not eapi_is_supported(eapi):
6183                         # can't do anything with this.
6184                         raise portage.exception.UnsupportedAPIException(mycpv, eapi)
6185
6186         if mysplit[2] == "r0":
6187                 mysettings["PVR"]=mysplit[1]
6188         else:
6189                 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
6190
6191         if "PATH" in mysettings:
6192                 mysplit=mysettings["PATH"].split(":")
6193         else:
6194                 mysplit=[]
6195         # Note: PORTAGE_BIN_PATH may differ from the global constant
6196         # when portage is reinstalling itself.
6197         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6198         if portage_bin_path not in mysplit:
6199                 mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
6200
6201         # Sandbox needs cannonical paths.
6202         mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
6203                 mysettings["PORTAGE_TMPDIR"])
6204         mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
6205         mysettings["PKG_TMPDIR"]   = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
6206         
6207         # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
6208         # locations in order to prevent interference.
6209         if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
6210                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6211                         mysettings["PKG_TMPDIR"],
6212                         mysettings["CATEGORY"], mysettings["PF"])
6213         else:
6214                 mysettings["PORTAGE_BUILDDIR"] = os.path.join(
6215                         mysettings["BUILD_PREFIX"],
6216                         mysettings["CATEGORY"], mysettings["PF"])
6217
6218         mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
6219         mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
6220         mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
6221         mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
6222
6223         mysettings["PORTAGE_BASHRC"] = os.path.join(
6224                 mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
6225         mysettings["EBUILD_EXIT_STATUS_FILE"] = os.path.join(
6226                 mysettings["PORTAGE_BUILDDIR"], ".exit_status")
6227
6228         #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
6229         if eapi not in ('0', '1', '2'):
6230                 # Discard KV for EAPIs that don't support it. Cache KV is restored
6231                 # from the backupenv whenever config.reset() is called.
6232                 mysettings.pop('KV', None)
6233         elif mydo != 'depend' and 'KV' not in mysettings and \
6234                 mydo in ('compile', 'config', 'configure', 'info',
6235                 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
6236                 'prepare', 'prerm', 'setup', 'test', 'unpack'):
6237                 mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
6238                 if mykv:
6239                         # Regular source tree
6240                         mysettings["KV"]=mykv
6241                 else:
6242                         mysettings["KV"]=""
6243                 mysettings.backup_changes("KV")
6244
6245         # Allow color.map to control colors associated with einfo, ewarn, etc...
6246         mycolors = []
6247         for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
6248                 mycolors.append("%s=$'%s'" % \
6249                         (c, portage.output.style_to_ansi_code(c)))
6250         mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
6251
6252 def prepare_build_dirs(myroot, mysettings, cleanup):
6253
6254         clean_dirs = [mysettings["HOME"]]
6255
6256         # We enable cleanup when we want to make sure old cruft (such as the old
6257         # environment) doesn't interfere with the current phase.
6258         if cleanup:
6259                 clean_dirs.append(mysettings["T"])
6260
6261         for clean_dir in clean_dirs:
6262                 try:
6263                         shutil.rmtree(clean_dir)
6264                 except OSError as oe:
6265                         if errno.ENOENT == oe.errno:
6266                                 pass
6267                         elif errno.EPERM == oe.errno:
6268                                 writemsg("%s\n" % oe, noiselevel=-1)
6269                                 writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
6270                                         clean_dir, noiselevel=-1)
6271                                 return 1
6272                         else:
6273                                 raise
6274
6275         def makedirs(dir_path):
6276                 try:
6277                         os.makedirs(dir_path)
6278                 except OSError as oe:
6279                         if errno.EEXIST == oe.errno:
6280                                 pass
6281                         elif errno.EPERM == oe.errno:
6282                                 writemsg("%s\n" % oe, noiselevel=-1)
6283                                 writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
6284                                         dir_path, noiselevel=-1)
6285                                 return False
6286                         else:
6287                                 raise
6288                 return True
6289
6290         mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
6291
6292         mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
6293         mydirs.append(os.path.dirname(mydirs[-1]))
6294
6295         try:
6296                 for mydir in mydirs:
6297                         portage.util.ensure_dirs(mydir)
6298                         portage.util.apply_secpass_permissions(mydir,
6299                                 gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
6300                 for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
6301                         """These directories don't necessarily need to be group writable.
6302                         However, the setup phase is commonly run as a privileged user prior
6303                         to the other phases being run by an unprivileged user.  Currently,
6304                         we use the portage group to ensure that the unprivleged user still
6305                         has write access to these directories in any case."""
6306                         portage.util.ensure_dirs(mysettings[dir_key], mode=0o775)
6307                         portage.util.apply_secpass_permissions(mysettings[dir_key],
6308                                 uid=portage_uid, gid=portage_gid)
6309         except portage.exception.PermissionDenied as e:
6310                 writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
6311                 return 1
6312         except portage.exception.OperationNotPermitted as e:
6313                 writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
6314                 return 1
6315         except portage.exception.FileNotFound as e:
6316                 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
6317                 return 1
6318
6319         _prepare_workdir(mysettings)
6320         if mysettings.get('EBUILD_PHASE') != 'fetch':
6321                 # Avoid spurious permissions adjustments when fetching with
6322                 # a temporary PORTAGE_TMPDIR setting (for fetchonly).
6323                 _prepare_features_dirs(mysettings)
6324
6325 def _adjust_perms_msg(settings, msg):
6326
6327         def write(msg):
6328                 writemsg(msg, noiselevel=-1)
6329
6330         background = settings.get("PORTAGE_BACKGROUND") == "1"
6331         log_path = settings.get("PORTAGE_LOG_FILE")
6332         log_file = None
6333
6334         if background and log_path is not None:
6335                 try:
6336                         log_file = codecs.open(_unicode_encode(log_path,
6337                                 encoding=_encodings['fs'], errors='strict'),
6338                                 mode='a', encoding=_encodings['content'], errors='replace')
6339                 except IOError:
6340                         def write(msg):
6341                                 pass
6342                 else:
6343                         def write(msg):
6344                                 log_file.write(_unicode_decode(msg))
6345                                 log_file.flush()
6346
6347         try:
6348                 write(msg)
6349         finally:
6350                 if log_file is not None:
6351                         log_file.close()
6352
6353 def _prepare_features_dirs(mysettings):
6354
6355         features_dirs = {
6356                 "ccache":{
6357                         "path_dir": "/usr/lib/ccache/bin",
6358                         "basedir_var":"CCACHE_DIR",
6359                         "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
6360                         "always_recurse":False},
6361                 "distcc":{
6362                         "path_dir": "/usr/lib/distcc/bin",
6363                         "basedir_var":"DISTCC_DIR",
6364                         "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
6365                         "subdirs":("lock", "state"),
6366                         "always_recurse":True}
6367         }
6368         dirmode  = 0o2070
6369         filemode =   0o60
6370         modemask =    0o2
6371         restrict = mysettings.get("PORTAGE_RESTRICT","").split()
6372         from portage.data import secpass
6373         droppriv = secpass >= 2 and \
6374                 "userpriv" in mysettings.features and \
6375                 "userpriv" not in restrict
6376         for myfeature, kwargs in features_dirs.items():
6377                 if myfeature in mysettings.features:
6378                         failure = False
6379                         basedir = mysettings.get(kwargs["basedir_var"])
6380                         if basedir is None or not basedir.strip():
6381                                 basedir = kwargs["default_dir"]
6382                                 mysettings[kwargs["basedir_var"]] = basedir
6383                         try:
6384                                 path_dir = kwargs["path_dir"]
6385                                 if not os.path.isdir(path_dir):
6386                                         raise portage.exception.DirectoryNotFound(path_dir)
6387
6388                                 mydirs = [mysettings[kwargs["basedir_var"]]]
6389                                 if "subdirs" in kwargs:
6390                                         for subdir in kwargs["subdirs"]:
6391                                                 mydirs.append(os.path.join(basedir, subdir))
6392                                 for mydir in mydirs:
6393                                         modified = portage.util.ensure_dirs(mydir)
6394                                         # Generally, we only want to apply permissions for
6395                                         # initial creation.  Otherwise, we don't know exactly what
6396                                         # permissions the user wants, so should leave them as-is.
6397                                         droppriv_fix = False
6398                                         if droppriv:
6399                                                 st = os.stat(mydir)
6400                                                 if st.st_gid != portage_gid or \
6401                                                         not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
6402                                                         droppriv_fix = True
6403                                                 if not droppriv_fix:
6404                                                         # Check permissions of files in the directory.
6405                                                         for filename in os.listdir(mydir):
6406                                                                 try:
6407                                                                         subdir_st = os.lstat(
6408                                                                                 os.path.join(mydir, filename))
6409                                                                 except OSError:
6410                                                                         continue
6411                                                                 if subdir_st.st_gid != portage_gid or \
6412                                                                         ((stat.S_ISDIR(subdir_st.st_mode) and \
6413                                                                         not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
6414                                                                         droppriv_fix = True
6415                                                                         break
6416
6417                                         if droppriv_fix:
6418                                                 _adjust_perms_msg(mysettings,
6419                                                         colorize("WARN", " * ") + \
6420                                                         _("Adjusting permissions "
6421                                                         "for FEATURES=userpriv: '%s'\n") % mydir)
6422                                         elif modified:
6423                                                 _adjust_perms_msg(mysettings,
6424                                                         colorize("WARN", " * ") + \
6425                                                         _("Adjusting permissions "
6426                                                         "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
6427
6428                                         if modified or kwargs["always_recurse"] or droppriv_fix:
6429                                                 def onerror(e):
6430                                                         raise   # The feature is disabled if a single error
6431                                                                         # occurs during permissions adjustment.
6432                                                 if not apply_recursive_permissions(mydir,
6433                                                 gid=portage_gid, dirmode=dirmode, dirmask=modemask,
6434                                                 filemode=filemode, filemask=modemask, onerror=onerror):
6435                                                         raise portage.exception.OperationNotPermitted(
6436                                                                 _("Failed to apply recursive permissions for the portage group."))
6437
6438                         except portage.exception.DirectoryNotFound as e:
6439                                 failure = True
6440                                 writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
6441                                         (e,), noiselevel=-1)
6442                                 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6443                                         noiselevel=-1)
6444
6445                         except portage.exception.PortageException as e:
6446                                 failure = True
6447                                 writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
6448                                 writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
6449                                         (kwargs["basedir_var"], basedir), noiselevel=-1)
6450                                 writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
6451                                         noiselevel=-1)
6452
6453                         if failure:
6454                                 mysettings.features.remove(myfeature)
6455                                 mysettings['FEATURES'] = ' '.join(sorted(mysettings.features))
6456                                 time.sleep(5)
6457
6458 def _prepare_workdir(mysettings):
6459         workdir_mode = 0o700
6460         try:
6461                 mode = mysettings["PORTAGE_WORKDIR_MODE"]
6462                 if mode.isdigit():
6463                         parsed_mode = int(mode, 8)
6464                 elif mode == "":
6465                         raise KeyError()
6466                 else:
6467                         raise ValueError()
6468                 if parsed_mode & 0o7777 != parsed_mode:
6469                         raise ValueError("Invalid file mode: %s" % mode)
6470                 else:
6471                         workdir_mode = parsed_mode
6472         except KeyError as e:
6473                 writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
6474         except ValueError as e:
6475                 if len(str(e)) > 0:
6476                         writemsg("%s\n" % e)
6477                 writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
6478                 (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
6479         mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
6480         try:
6481                 apply_secpass_permissions(mysettings["WORKDIR"],
6482                 uid=portage_uid, gid=portage_gid, mode=workdir_mode)
6483         except portage.exception.FileNotFound:
6484                 pass # ebuild.sh will create it
6485
6486         if mysettings.get("PORT_LOGDIR", "") == "":
6487                 while "PORT_LOGDIR" in mysettings:
6488                         del mysettings["PORT_LOGDIR"]
6489         if "PORT_LOGDIR" in mysettings:
6490                 try:
6491                         modified = portage.util.ensure_dirs(mysettings["PORT_LOGDIR"])
6492                         if modified:
6493                                 apply_secpass_permissions(mysettings["PORT_LOGDIR"],
6494                                         uid=portage_uid, gid=portage_gid, mode=0o2770)
6495                 except portage.exception.PortageException as e:
6496                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
6497                         writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
6498                                 mysettings["PORT_LOGDIR"], noiselevel=-1)
6499                         writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
6500                         while "PORT_LOGDIR" in mysettings:
6501                                 del mysettings["PORT_LOGDIR"]
6502         if "PORT_LOGDIR" in mysettings and \
6503                 os.access(mysettings["PORT_LOGDIR"], os.W_OK):
6504                 logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
6505                 if not os.path.exists(logid_path):
6506                         open(_unicode_encode(logid_path), 'w')
6507                 logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
6508                         time.gmtime(os.stat(logid_path).st_mtime)),
6509                         encoding=_encodings['content'], errors='replace')
6510
6511                 if "split-log" in mysettings.features:
6512                         mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6513                                 mysettings["PORT_LOGDIR"], "build", "%s/%s:%s.log" % \
6514                                 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6515                 else:
6516                         mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6517                                 mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
6518                                 (mysettings["CATEGORY"], mysettings["PF"], logid_time))
6519
6520                 util.ensure_dirs(os.path.dirname(mysettings["PORTAGE_LOG_FILE"]))
6521
6522         else:
6523                 # NOTE: When sesandbox is enabled, the local SELinux security policies
6524                 # may not allow output to be piped out of the sesandbox domain. The
6525                 # current policy will allow it to work when a pty is available, but
6526                 # not through a normal pipe. See bug #162404.
6527                 mysettings["PORTAGE_LOG_FILE"] = os.path.join(
6528                         mysettings["T"], "build.log")
6529
6530 def _doebuild_exit_status_check(mydo, settings):
6531         """
6532         Returns an error string if the shell appeared
6533         to exit unsuccessfully, None otherwise.
6534         """
6535         exit_status_file = settings.get("EBUILD_EXIT_STATUS_FILE")
6536         if not exit_status_file or \
6537                 os.path.exists(exit_status_file):
6538                 return None
6539         msg = _("The ebuild phase '%s' has exited "
6540         "unexpectedly. This type of behavior "
6541         "is known to be triggered "
6542         "by things such as failed variable "
6543         "assignments (bug #190128) or bad substitution "
6544         "errors (bug #200313). Normally, before exiting, bash should "
6545         "have displayed an error message above. If bash did not "
6546         "produce an error message above, it's possible "
6547         "that the ebuild has called `exit` when it "
6548         "should have called `die` instead. This behavior may also "
6549         "be triggered by a corrupt bash binary or a hardware "
6550         "problem such as memory or cpu malfunction. If the problem is not "
6551         "reproducible or it appears to occur randomly, then it is likely "
6552         "to be triggered by a hardware problem. "
6553         "If you suspect a hardware problem then you should "
6554         "try some basic hardware diagnostics such as memtest. "
6555         "Please do not report this as a bug unless it is consistently "
6556         "reproducible and you are sure that your bash binary and hardware "
6557         "are functioning properly.") % mydo
6558         return msg
6559
6560 def _doebuild_exit_status_check_and_log(settings, mydo, retval):
6561         msg = _doebuild_exit_status_check(mydo, settings)
6562         if msg:
6563                 if retval == os.EX_OK:
6564                         retval = 1
6565                 from textwrap import wrap
6566                 from portage.elog.messages import eerror
6567                 for l in wrap(msg, 72):
6568                         eerror(l, phase=mydo, key=settings.mycpv)
6569         return retval
6570
6571 def _doebuild_exit_status_unlink(exit_status_file):
6572         """
6573         Double check to make sure it really doesn't exist
6574         and raise an OSError if it still does (it shouldn't).
6575         OSError if necessary.
6576         """
6577         if not exit_status_file:
6578                 return
6579         try:
6580                 os.unlink(exit_status_file)
6581         except OSError:
6582                 pass
6583         if os.path.exists(exit_status_file):
6584                 os.unlink(exit_status_file)
6585
6586 _doebuild_manifest_exempt_depend = 0
6587 _doebuild_manifest_cache = None
6588 _doebuild_broken_ebuilds = set()
6589 _doebuild_broken_manifests = set()
6590
6591 def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
6592         fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
6593         mydbapi=None, vartree=None, prev_mtimes=None,
6594         fd_pipes=None, returnpid=False):
6595
6596         """
6597         Wrapper function that invokes specific ebuild phases through the spawning
6598         of ebuild.sh
6599         
6600         @param myebuild: name of the ebuild to invoke the phase on (CPV)
6601         @type myebuild: String
6602         @param mydo: Phase to run
6603         @type mydo: String
6604         @param myroot: $ROOT (usually '/', see man make.conf)
6605         @type myroot: String
6606         @param mysettings: Portage Configuration
6607         @type mysettings: instance of portage.config
6608         @param debug: Turns on various debug information (eg, debug for spawn)
6609         @type debug: Boolean
6610         @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
6611         @type listonly: Boolean
6612         @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
6613         @type fetchonly: Boolean
6614         @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
6615         @type cleanup: Boolean
6616         @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
6617         @type dbkey: Dict or String
6618         @param use_cache: Enables the cache
6619         @type use_cache: Boolean
6620         @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
6621         @type fetchall: Boolean
6622         @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
6623         @type tree: String
6624         @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
6625         @type mydbapi: portdbapi instance
6626         @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
6627         @type vartree: vartree instance
6628         @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
6629         @type prev_mtimes: dictionary
6630         @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
6631                 for example.
6632         @type fd_pipes: Dictionary
6633         @param returnpid: Return a list of process IDs for a successful spawn, or
6634                 an integer value if spawn is unsuccessful. NOTE: This requires the
6635                 caller clean up all returned PIDs.
6636         @type returnpid: Boolean
6637         @rtype: Boolean
6638         @returns:
6639         1. 0 for success
6640         2. 1 for error
6641         
6642         Most errors have an accompanying error message.
6643         
6644         listonly and fetchonly are only really necessary for operations involving 'fetch'
6645         prev_mtimes are only necessary for merge operations.
6646         Other variables may not be strictly required, many have defaults that are set inside of doebuild.
6647         
6648         """
6649         
6650         if not tree:
6651                 writemsg("Warning: tree not specified to doebuild\n")
6652                 tree = "porttree"
6653         global db
6654         
6655         # chunked out deps for each phase, so that ebuild binary can use it 
6656         # to collapse targets down.
6657         actionmap_deps={
6658         "setup":  [],
6659         "unpack": ["setup"],
6660         "prepare": ["unpack"],
6661         "configure": ["prepare"],
6662         "compile":["configure"],
6663         "test":   ["compile"],
6664         "install":["test"],
6665         "rpm":    ["install"],
6666         "package":["install"],
6667         }
6668         
6669         if mydbapi is None:
6670                 mydbapi = db[myroot][tree].dbapi
6671
6672         if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
6673                 vartree = db[myroot]["vartree"]
6674
6675         features = mysettings.features
6676         noauto = "noauto" in features
6677         from portage.data import secpass
6678
6679         clean_phases = ("clean", "cleanrm")
6680         validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
6681                         "config", "info", "setup", "depend", "pretend",
6682                         "fetch", "fetchall", "digest",
6683                         "unpack", "prepare", "configure", "compile", "test",
6684                         "install", "rpm", "qmerge", "merge",
6685                         "package","unmerge", "manifest"]
6686
6687         if mydo not in validcommands:
6688                 validcommands.sort()
6689                 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
6690                         noiselevel=-1)
6691                 for vcount in range(len(validcommands)):
6692                         if vcount%6 == 0:
6693                                 writemsg("\n!!! ", noiselevel=-1)
6694                         writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
6695                 writemsg("\n", noiselevel=-1)
6696                 return 1
6697
6698         if mydo == "fetchall":
6699                 fetchall = 1
6700                 mydo = "fetch"
6701
6702         parallel_fetchonly = mydo in ("fetch", "fetchall") and \
6703                 "PORTAGE_PARALLEL_FETCHONLY" in mysettings
6704
6705         if mydo not in clean_phases and not os.path.exists(myebuild):
6706                 writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
6707                         noiselevel=-1)
6708                 return 1
6709
6710         global _doebuild_manifest_exempt_depend
6711
6712         if "strict" in features and \
6713                 "digest" not in features and \
6714                 tree == "porttree" and \
6715                 mydo not in ("digest", "manifest", "help") and \
6716                 not _doebuild_manifest_exempt_depend:
6717                 # Always verify the ebuild checksums before executing it.
6718                 global _doebuild_manifest_cache, _doebuild_broken_ebuilds, \
6719                         _doebuild_broken_ebuilds
6720
6721                 if myebuild in _doebuild_broken_ebuilds:
6722                         return 1
6723
6724                 pkgdir = os.path.dirname(myebuild)
6725                 manifest_path = os.path.join(pkgdir, "Manifest")
6726
6727                 # Avoid checking the same Manifest several times in a row during a
6728                 # regen with an empty cache.
6729                 if _doebuild_manifest_cache is None or \
6730                         _doebuild_manifest_cache.getFullname() != manifest_path:
6731                         _doebuild_manifest_cache = None
6732                         if not os.path.exists(manifest_path):
6733                                 out = portage.output.EOutput()
6734                                 out.eerror(_("Manifest not found for '%s'") % (myebuild,))
6735                                 _doebuild_broken_ebuilds.add(myebuild)
6736                                 return 1
6737                         mf = Manifest(pkgdir, mysettings["DISTDIR"])
6738
6739                 else:
6740                         mf = _doebuild_manifest_cache
6741
6742                 try:
6743                         mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
6744                 except KeyError:
6745                         out = portage.output.EOutput()
6746                         out.eerror(_("Missing digest for '%s'") % (myebuild,))
6747                         _doebuild_broken_ebuilds.add(myebuild)
6748                         return 1
6749                 except portage.exception.FileNotFound:
6750                         out = portage.output.EOutput()
6751                         out.eerror(_("A file listed in the Manifest "
6752                                 "could not be found: '%s'") % (myebuild,))
6753                         _doebuild_broken_ebuilds.add(myebuild)
6754                         return 1
6755                 except portage.exception.DigestException as e:
6756                         out = portage.output.EOutput()
6757                         out.eerror(_("Digest verification failed:"))
6758                         out.eerror("%s" % e.value[0])
6759                         out.eerror(_("Reason: %s") % e.value[1])
6760                         out.eerror(_("Got: %s") % e.value[2])
6761                         out.eerror(_("Expected: %s") % e.value[3])
6762                         _doebuild_broken_ebuilds.add(myebuild)
6763                         return 1
6764
6765                 if mf.getFullname() in _doebuild_broken_manifests:
6766                         return 1
6767
6768                 if mf is not _doebuild_manifest_cache:
6769
6770                         # Make sure that all of the ebuilds are
6771                         # actually listed in the Manifest.
6772                         glep55 = 'parse-eapi-glep-55' in mysettings.features
6773                         for f in os.listdir(pkgdir):
6774                                 pf = None
6775                                 if glep55:
6776                                         pf, eapi = _split_ebuild_name_glep55(f)
6777                                 elif f[-7:] == '.ebuild':
6778                                         pf = f[:-7]
6779                                 if pf is not None and not mf.hasFile("EBUILD", f):
6780                                         f = os.path.join(pkgdir, f)
6781                                         if f not in _doebuild_broken_ebuilds:
6782                                                 out = portage.output.EOutput()
6783                                                 out.eerror(_("A file is not listed in the "
6784                                                         "Manifest: '%s'") % (f,))
6785                                         _doebuild_broken_manifests.add(manifest_path)
6786                                         return 1
6787
6788                         # Only cache it if the above stray files test succeeds.
6789                         _doebuild_manifest_cache = mf
6790
6791         def exit_status_check(retval):
6792                 msg = _doebuild_exit_status_check(mydo, mysettings)
6793                 if msg:
6794                         if retval == os.EX_OK:
6795                                 retval = 1
6796                         from textwrap import wrap
6797                         from portage.elog.messages import eerror
6798                         for l in wrap(msg, 72):
6799                                 eerror(l, phase=mydo, key=mysettings.mycpv)
6800                 return retval
6801
6802         # Note: PORTAGE_BIN_PATH may differ from the global
6803         # constant when portage is reinstalling itself.
6804         portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
6805         ebuild_sh_binary = os.path.join(portage_bin_path,
6806                 os.path.basename(EBUILD_SH_BINARY))
6807         misc_sh_binary = os.path.join(portage_bin_path,
6808                 os.path.basename(MISC_SH_BINARY))
6809
6810         logfile=None
6811         builddir_lock = None
6812         tmpdir = None
6813         tmpdir_orig = None
6814
6815         try:
6816                 if mydo in ("digest", "manifest", "help"):
6817                         # Temporarily exempt the depend phase from manifest checks, in case
6818                         # aux_get calls trigger cache generation.
6819                         _doebuild_manifest_exempt_depend += 1
6820
6821                 # If we don't need much space and we don't need a constant location,
6822                 # we can temporarily override PORTAGE_TMPDIR with a random temp dir
6823                 # so that there's no need for locking and it can be used even if the
6824                 # user isn't in the portage group.
6825                 if mydo in ("info",):
6826                         from tempfile import mkdtemp
6827                         tmpdir = mkdtemp()
6828                         tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
6829                         mysettings["PORTAGE_TMPDIR"] = tmpdir
6830
6831                 doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
6832                         use_cache, mydbapi)
6833
6834                 if mydo in clean_phases:
6835                         retval = spawn(_shell_quote(ebuild_sh_binary) + " clean",
6836                                 mysettings, debug=debug, fd_pipes=fd_pipes, free=1,
6837                                 logfile=None, returnpid=returnpid)
6838                         return retval
6839
6840                 restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
6841                 # get possible slot information from the deps file
6842                 if mydo == "depend":
6843                         writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
6844                         droppriv = "userpriv" in mysettings.features
6845                         if returnpid:
6846                                 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6847                                         mysettings, fd_pipes=fd_pipes, returnpid=True,
6848                                         droppriv=droppriv)
6849                                 return mypids
6850                         elif isinstance(dbkey, dict):
6851                                 mysettings["dbkey"] = ""
6852                                 pr, pw = os.pipe()
6853                                 fd_pipes = {
6854                                         0:sys.stdin.fileno(),
6855                                         1:sys.stdout.fileno(),
6856                                         2:sys.stderr.fileno(),
6857                                         9:pw}
6858                                 mypids = spawn(_shell_quote(ebuild_sh_binary) + " depend",
6859                                         mysettings,
6860                                         fd_pipes=fd_pipes, returnpid=True, droppriv=droppriv)
6861                                 os.close(pw) # belongs exclusively to the child process now
6862                                 f = os.fdopen(pr, 'rb')
6863                                 for k, v in zip(auxdbkeys,
6864                                         (_unicode_decode(line).rstrip('\n') for line in f)):
6865                                         dbkey[k] = v
6866                                 f.close()
6867                                 retval = os.waitpid(mypids[0], 0)[1]
6868                                 portage.process.spawned_pids.remove(mypids[0])
6869                                 # If it got a signal, return the signal that was sent, but
6870                                 # shift in order to distinguish it from a return value. (just
6871                                 # like portage.process.spawn() would do).
6872                                 if retval & 0xff:
6873                                         retval = (retval & 0xff) << 8
6874                                 else:
6875                                         # Otherwise, return its exit code.
6876                                         retval = retval >> 8
6877                                 if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
6878                                         # Don't trust bash's returncode if the
6879                                         # number of lines is incorrect.
6880                                         retval = 1
6881                                 return retval
6882                         elif dbkey:
6883                                 mysettings["dbkey"] = dbkey
6884                         else:
6885                                 mysettings["dbkey"] = \
6886                                         os.path.join(mysettings.depcachedir, "aux_db_key_temp")
6887
6888                         return spawn(_shell_quote(ebuild_sh_binary) + " depend",
6889                                 mysettings,
6890                                 droppriv=droppriv)
6891
6892                 # Validate dependency metadata here to ensure that ebuilds with invalid
6893                 # data are never installed via the ebuild command. Don't bother when
6894                 # returnpid == True since there's no need to do this every time emerge
6895                 # executes a phase.
6896                 if not returnpid:
6897                         rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
6898                         if rval != os.EX_OK:
6899                                 return rval
6900
6901                 if "PORTAGE_TMPDIR" not in mysettings or \
6902                         not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
6903                         writemsg(_("The directory specified in your "
6904                                 "PORTAGE_TMPDIR variable, '%s',\n"
6905                                 "does not exist.  Please create this directory or "
6906                                 "correct your PORTAGE_TMPDIR setting.\n") % mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
6907                         return 1
6908                 
6909                 # as some people use a separate PORTAGE_TMPDIR mount
6910                 # we prefer that as the checks below would otherwise be pointless
6911                 # for those people.
6912                 if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
6913                         checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
6914                 else:
6915                         checkdir = mysettings["PORTAGE_TMPDIR"]
6916
6917                 if not os.access(checkdir, os.W_OK):
6918                         writemsg(_("%s is not writable.\n"
6919                                 "Likely cause is that you've mounted it as readonly.\n") % checkdir,
6920                                 noiselevel=-1)
6921                         return 1
6922                 else:
6923                         from tempfile import NamedTemporaryFile
6924                         fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
6925                         os.chmod(fd.name, 0o755)
6926                         if not os.access(fd.name, os.X_OK):
6927                                 writemsg(_("Can not execute files in %s\n"
6928                                         "Likely cause is that you've mounted it with one of the\n"
6929                                         "following mount options: 'noexec', 'user', 'users'\n\n"
6930                                         "Please make sure that portage can execute files in this directory.\n") % checkdir,
6931                                         noiselevel=-1)
6932                                 fd.close()
6933                                 return 1
6934                         fd.close()
6935                 del checkdir
6936
6937                 if mydo == "unmerge":
6938                         return unmerge(mysettings["CATEGORY"],
6939                                 mysettings["PF"], myroot, mysettings, vartree=vartree)
6940
6941                 # Build directory creation isn't required for any of these.
6942                 # In the fetch phase, the directory is needed only for RESTRICT=fetch
6943                 # in order to satisfy the sane $PWD requirement (from bug #239560)
6944                 # when pkg_nofetch is spawned.
6945                 have_build_dirs = False
6946                 if not parallel_fetchonly and \
6947                         mydo not in ('digest', 'help', 'manifest') and \
6948                         not (mydo == 'fetch' and 'fetch' not in restrict):
6949                         mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
6950                         if mystatus:
6951                                 return mystatus
6952                         have_build_dirs = True
6953
6954                         # emerge handles logging externally
6955                         if not returnpid:
6956                                 # PORTAGE_LOG_FILE is set by the
6957                                 # above prepare_build_dirs() call.
6958                                 logfile = mysettings.get("PORTAGE_LOG_FILE")
6959
6960                 if have_build_dirs:
6961                         env_file = os.path.join(mysettings["T"], "environment")
6962                         env_stat = None
6963                         saved_env = None
6964                         try:
6965                                 env_stat = os.stat(env_file)
6966                         except OSError as e:
6967                                 if e.errno != errno.ENOENT:
6968                                         raise
6969                                 del e
6970                         if not env_stat:
6971                                 saved_env = os.path.join(
6972                                         os.path.dirname(myebuild), "environment.bz2")
6973                                 if not os.path.isfile(saved_env):
6974                                         saved_env = None
6975                         if saved_env:
6976                                 retval = os.system(
6977                                         "bzip2 -dc %s > %s" % \
6978                                         (_shell_quote(saved_env),
6979                                         _shell_quote(env_file)))
6980                                 try:
6981                                         env_stat = os.stat(env_file)
6982                                 except OSError as e:
6983                                         if e.errno != errno.ENOENT:
6984                                                 raise
6985                                         del e
6986                                 if os.WIFEXITED(retval) and \
6987                                         os.WEXITSTATUS(retval) == os.EX_OK and \
6988                                         env_stat and env_stat.st_size > 0:
6989                                         # This is a signal to ebuild.sh, so that it knows to filter
6990                                         # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
6991                                         # would be preserved between normal phases.
6992                                         open(_unicode_encode(env_file + '.raw'), 'w')
6993                                 else:
6994                                         writemsg(_("!!! Error extracting saved "
6995                                                 "environment: '%s'\n") % \
6996                                                 saved_env, noiselevel=-1)
6997                                         try:
6998                                                 os.unlink(env_file)
6999                                         except OSError as e:
7000                                                 if e.errno != errno.ENOENT:
7001                                                         raise
7002                                                 del e
7003                                         env_stat = None
7004                         if env_stat:
7005                                 pass
7006                         else:
7007                                 for var in ("ARCH", ):
7008                                         value = mysettings.get(var)
7009                                         if value and value.strip():
7010                                                 continue
7011                                         msg = _("%(var)s is not set... "
7012                                                 "Are you missing the '%(configroot)setc/make.profile' symlink? "
7013                                                 "Is the symlink correct? "
7014                                                 "Is your portage tree complete?") % \
7015                                                 {"var": var, "configroot": mysettings["PORTAGE_CONFIGROOT"]}
7016                                         from portage.elog.messages import eerror
7017                                         from textwrap import wrap
7018                                         for line in wrap(msg, 70):
7019                                                 eerror(line, phase="setup", key=mysettings.mycpv)
7020                                         from portage.elog import elog_process
7021                                         elog_process(mysettings.mycpv, mysettings)
7022                                         return 1
7023                         del env_file, env_stat, saved_env
7024                         _doebuild_exit_status_unlink(
7025                                 mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7026                 else:
7027                         mysettings.pop("EBUILD_EXIT_STATUS_FILE", None)
7028
7029                 # if any of these are being called, handle them -- running them out of
7030                 # the sandbox -- and stop now.
7031                 if mydo == "help":
7032                         return spawn(_shell_quote(ebuild_sh_binary) + " " + mydo,
7033                                 mysettings, debug=debug, free=1, logfile=logfile)
7034                 elif mydo == "setup":
7035                         retval = spawn(
7036                                 _shell_quote(ebuild_sh_binary) + " " + mydo, mysettings,
7037                                 debug=debug, free=1, logfile=logfile, fd_pipes=fd_pipes,
7038                                 returnpid=returnpid)
7039                         if returnpid:
7040                                 return retval
7041                         retval = exit_status_check(retval)
7042                         if secpass >= 2:
7043                                 """ Privileged phases may have left files that need to be made
7044                                 writable to a less privileged user."""
7045                                 apply_recursive_permissions(mysettings["T"],
7046                                         uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
7047                                         filemode=0o60, filemask=0)
7048                         return retval
7049                 elif mydo == "preinst":
7050                         phase_retval = spawn(
7051                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
7052                                 mysettings, debug=debug, free=1, logfile=logfile,
7053                                 fd_pipes=fd_pipes, returnpid=returnpid)
7054
7055                         if returnpid:
7056                                 return phase_retval
7057
7058                         phase_retval = exit_status_check(phase_retval)
7059                         if phase_retval == os.EX_OK:
7060                                 _doebuild_exit_status_unlink(
7061                                         mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7062                                 mysettings.pop("EBUILD_PHASE", None)
7063                                 phase_retval = spawn(
7064                                         " ".join(_post_pkg_preinst_cmd(mysettings)),
7065                                         mysettings, debug=debug, free=1, logfile=logfile)
7066                                 phase_retval = exit_status_check(phase_retval)
7067                                 if phase_retval != os.EX_OK:
7068                                         writemsg(_("!!! post preinst failed; exiting.\n"),
7069                                                 noiselevel=-1)
7070                         return phase_retval
7071                 elif mydo == "postinst":
7072                         phase_retval = spawn(
7073                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
7074                                 mysettings, debug=debug, free=1, logfile=logfile,
7075                                 fd_pipes=fd_pipes, returnpid=returnpid)
7076
7077                         if returnpid:
7078                                 return phase_retval
7079
7080                         phase_retval = exit_status_check(phase_retval)
7081                         if phase_retval == os.EX_OK:
7082                                 _doebuild_exit_status_unlink(
7083                                         mysettings.get("EBUILD_EXIT_STATUS_FILE"))
7084                                 mysettings.pop("EBUILD_PHASE", None)
7085                                 phase_retval = spawn(" ".join(_post_pkg_postinst_cmd(mysettings)),
7086                                         mysettings, debug=debug, free=1, logfile=logfile)
7087                                 phase_retval = exit_status_check(phase_retval)
7088                                 if phase_retval != os.EX_OK:
7089                                         writemsg(_("!!! post postinst failed; exiting.\n"),
7090                                                 noiselevel=-1)
7091                         return phase_retval
7092                 elif mydo in ("prerm", "postrm", "config", "info"):
7093                         retval =  spawn(
7094                                 _shell_quote(ebuild_sh_binary) + " " + mydo,
7095                                 mysettings, debug=debug, free=1, logfile=logfile,
7096                                 fd_pipes=fd_pipes, returnpid=returnpid)
7097
7098                         if returnpid:
7099                                 return retval
7100
7101                         retval = exit_status_check(retval)
7102                         return retval
7103
7104                 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
7105
7106                 emerge_skip_distfiles = returnpid
7107                 emerge_skip_digest = returnpid
7108                 # Only try and fetch the files if we are going to need them ...
7109                 # otherwise, if user has FEATURES=noauto and they run `ebuild clean
7110                 # unpack compile install`, we will try and fetch 4 times :/
7111                 need_distfiles = not emerge_skip_distfiles and \
7112                         (mydo in ("fetch", "unpack") or \
7113                         mydo not in ("digest", "manifest") and "noauto" not in features)
7114                 alist = mysettings.configdict["pkg"].get("A")
7115                 aalist = mysettings.configdict["pkg"].get("AA")
7116                 if need_distfiles or alist is None or aalist is None:
7117                         # Make sure we get the correct tree in case there are overlays.
7118                         mytree = os.path.realpath(
7119                                 os.path.dirname(os.path.dirname(mysettings["O"])))
7120                         useflags = mysettings["PORTAGE_USE"].split()
7121                         try:
7122                                 alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
7123                                         mytree=mytree)
7124                                 aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
7125                         except portage.exception.InvalidDependString as e:
7126                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
7127                                 writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
7128                                         noiselevel=-1)
7129                                 del e
7130                                 return 1
7131                         mysettings.configdict["pkg"]["A"] = " ".join(alist)
7132                         mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
7133                 else:
7134                         alist = set(alist.split())
7135                         aalist = set(aalist.split())
7136                 if ("mirror" in features) or fetchall:
7137                         fetchme = aalist
7138                         checkme = aalist
7139                 else:
7140                         fetchme = alist
7141                         checkme = alist
7142
7143                 if mydo == "fetch":
7144                         # Files are already checked inside fetch(),
7145                         # so do not check them again.
7146                         checkme = []
7147
7148                 if not emerge_skip_distfiles and \
7149                         need_distfiles and not fetch(
7150                         fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
7151                         return 1
7152
7153                 if mydo == "fetch" and listonly:
7154                         return 0
7155
7156                 try:
7157                         if mydo == "manifest":
7158                                 return not digestgen(aalist, mysettings, overwrite=1,
7159                                         manifestonly=1, myportdb=mydbapi)
7160                         elif mydo == "digest":
7161                                 return not digestgen(aalist, mysettings, overwrite=1,
7162                                         myportdb=mydbapi)
7163                         elif mydo != 'fetch' and not emerge_skip_digest and \
7164                                 "digest" in mysettings.features:
7165                                 # Don't do this when called by emerge or when called just
7166                                 # for fetch (especially parallel-fetch) since it's not needed
7167                                 # and it can interfere with parallel tasks.
7168                                 digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
7169                 except portage.exception.PermissionDenied as e:
7170                         writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
7171                         if mydo in ("digest", "manifest"):
7172                                 return 1
7173
7174                 # See above comment about fetching only when needed
7175                 if not emerge_skip_distfiles and \
7176                         not digestcheck(checkme, mysettings, "strict" in features):
7177                         return 1
7178
7179                 if mydo == "fetch":
7180                         return 0
7181
7182                 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
7183                 if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
7184                         orig_distdir = mysettings["DISTDIR"]
7185                         mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
7186                         edpath = mysettings["DISTDIR"] = \
7187                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
7188                         portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
7189
7190                         # Remove any unexpected files or directories.
7191                         for x in os.listdir(edpath):
7192                                 symlink_path = os.path.join(edpath, x)
7193                                 st = os.lstat(symlink_path)
7194                                 if x in alist and stat.S_ISLNK(st.st_mode):
7195                                         continue
7196                                 if stat.S_ISDIR(st.st_mode):
7197                                         shutil.rmtree(symlink_path)
7198                                 else:
7199                                         os.unlink(symlink_path)
7200
7201                         # Check for existing symlinks and recreate if necessary.
7202                         for x in alist:
7203                                 symlink_path = os.path.join(edpath, x)
7204                                 target = os.path.join(orig_distdir, x)
7205                                 try:
7206                                         link_target = os.readlink(symlink_path)
7207                                 except OSError:
7208                                         os.symlink(target, symlink_path)
7209                                 else:
7210                                         if link_target != target:
7211                                                 os.unlink(symlink_path)
7212                                                 os.symlink(target, symlink_path)
7213
7214                 #initial dep checks complete; time to process main commands
7215
7216                 restrict = mysettings["PORTAGE_RESTRICT"].split()
7217                 nosandbox = (("userpriv" in features) and \
7218                         ("usersandbox" not in features) and \
7219                         "userpriv" not in restrict and \
7220                         "nouserpriv" not in restrict)
7221                 if nosandbox and ("userpriv" not in features or \
7222                         "userpriv" in restrict or \
7223                         "nouserpriv" in restrict):
7224                         nosandbox = ("sandbox" not in features and \
7225                                 "usersandbox" not in features)
7226
7227                 sesandbox = mysettings.selinux_enabled() and \
7228                         "sesandbox" in mysettings.features
7229
7230                 droppriv = "userpriv" in mysettings.features and \
7231                         "userpriv" not in restrict and \
7232                         secpass >= 2
7233
7234                 fakeroot = "fakeroot" in mysettings.features
7235
7236                 ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
7237                 misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
7238
7239                 # args are for the to spawn function
7240                 actionmap = {
7241 "pretend":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
7242 "setup":    {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
7243 "unpack":   {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
7244 "prepare":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
7245 "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7246 "compile":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7247 "test":     {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
7248 "install":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":0,         "sesandbox":sesandbox, "fakeroot":fakeroot}},
7249 "rpm":      {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
7250 "package":  {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
7251                 }
7252
7253                 # merge the deps in so we have again a 'full' actionmap
7254                 # be glad when this can die.
7255                 for x in actionmap:
7256                         if len(actionmap_deps.get(x, [])):
7257                                 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
7258
7259                 if mydo in actionmap:
7260                         if mydo == "package":
7261                                 # Make sure the package directory exists before executing
7262                                 # this phase. This can raise PermissionDenied if
7263                                 # the current user doesn't have write access to $PKGDIR.
7264                                 parent_dir = os.path.join(mysettings["PKGDIR"],
7265                                         mysettings["CATEGORY"])
7266                                 portage.util.ensure_dirs(parent_dir)
7267                                 if not os.access(parent_dir, os.W_OK):
7268                                         raise portage.exception.PermissionDenied(
7269                                                 "access('%s', os.W_OK)" % parent_dir)
7270                         retval = spawnebuild(mydo,
7271                                 actionmap, mysettings, debug, logfile=logfile,
7272                                 fd_pipes=fd_pipes, returnpid=returnpid)
7273                 elif mydo=="qmerge":
7274                         # check to ensure install was run.  this *only* pops up when users
7275                         # forget it and are using ebuild
7276                         if not os.path.exists(
7277                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
7278                                 writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
7279                                         noiselevel=-1)
7280                                 return 1
7281                         # qmerge is a special phase that implies noclean.
7282                         if "noclean" not in mysettings.features:
7283                                 mysettings.features.add("noclean")
7284                         #qmerge is specifically not supposed to do a runtime dep check
7285                         retval = merge(
7286                                 mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
7287                                 os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
7288                                 myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
7289                                 mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
7290                 elif mydo=="merge":
7291                         retval = spawnebuild("install", actionmap, mysettings, debug,
7292                                 alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
7293                                 returnpid=returnpid)
7294                         retval = exit_status_check(retval)
7295                         if retval != os.EX_OK:
7296                                 # The merge phase handles this already.  Callers don't know how
7297                                 # far this function got, so we have to call elog_process() here
7298                                 # so that it's only called once.
7299                                 from portage.elog import elog_process
7300                                 elog_process(mysettings.mycpv, mysettings)
7301                         if retval == os.EX_OK:
7302                                 retval = merge(mysettings["CATEGORY"], mysettings["PF"],
7303                                         mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
7304                                         "build-info"), myroot, mysettings,
7305                                         myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
7306                                         vartree=vartree, prev_mtimes=prev_mtimes)
7307                 else:
7308                         print(_("!!! Unknown mydo: %s") % mydo)
7309                         return 1
7310
7311                 return retval
7312
7313         finally:
7314
7315                 if tmpdir:
7316                         mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
7317                         shutil.rmtree(tmpdir)
7318                 if builddir_lock:
7319                         portage.locks.unlockdir(builddir_lock)
7320
7321                 # Make sure that DISTDIR is restored to it's normal value before we return!
7322                 if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
7323                         mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
7324                         del mysettings["PORTAGE_ACTUAL_DISTDIR"]
7325
7326                 if logfile:
7327                         try:
7328                                 if os.stat(logfile).st_size == 0:
7329                                         os.unlink(logfile)
7330                         except OSError:
7331                                 pass
7332
7333                 if mydo in ("digest", "manifest", "help"):
7334                         # If necessary, depend phase has been triggered by aux_get calls
7335                         # and the exemption is no longer needed.
7336                         _doebuild_manifest_exempt_depend -= 1
7337
7338 def _validate_deps(mysettings, myroot, mydo, mydbapi):
7339
7340         invalid_dep_exempt_phases = \
7341                 set(["clean", "cleanrm", "help", "prerm", "postrm"])
7342         dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7343         misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
7344         other_keys = ["SLOT"]
7345         all_keys = dep_keys + misc_keys + other_keys
7346         metadata = dict(zip(all_keys,
7347                 mydbapi.aux_get(mysettings.mycpv, all_keys)))
7348
7349         class FakeTree(object):
7350                 def __init__(self, mydb):
7351                         self.dbapi = mydb
7352         dep_check_trees = {myroot:{}}
7353         dep_check_trees[myroot]["porttree"] = \
7354                 FakeTree(fakedbapi(settings=mysettings))
7355
7356         msgs = []
7357         for dep_type in dep_keys:
7358                 mycheck = dep_check(metadata[dep_type], None, mysettings,
7359                         myuse="all", myroot=myroot, trees=dep_check_trees)
7360                 if not mycheck[0]:
7361                         msgs.append("  %s: %s\n    %s\n" % (
7362                                 dep_type, metadata[dep_type], mycheck[1]))
7363
7364         for k in misc_keys:
7365                 try:
7366                         portage.dep.use_reduce(
7367                                 portage.dep.paren_reduce(metadata[k]), matchall=True)
7368                 except portage.exception.InvalidDependString as e:
7369                         msgs.append("  %s: %s\n    %s\n" % (
7370                                 k, metadata[k], str(e)))
7371
7372         if not metadata["SLOT"]:
7373                 msgs.append(_("  SLOT is undefined\n"))
7374
7375         if msgs:
7376                 portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
7377                         (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
7378                 for x in msgs:
7379                         portage.util.writemsg_level(x,
7380                                 level=logging.ERROR, noiselevel=-1)
7381                 if mydo not in invalid_dep_exempt_phases:
7382                         return 1
7383
7384         return os.EX_OK
7385
7386 expandcache={}
7387
7388 def _movefile(src, dest, **kwargs):
7389         """Calls movefile and raises a PortageException if an error occurs."""
7390         if movefile(src, dest, **kwargs) is None:
7391                 raise portage.exception.PortageException(
7392                         "mv '%s' '%s'" % (src, dest))
7393
7394 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
7395                 hardlink_candidates=None, encoding=_encodings['fs']):
7396         """moves a file from src to dest, preserving all permissions and attributes; mtime will
7397         be preserved even when moving across filesystems.  Returns true on success and false on
7398         failure.  Move is atomic."""
7399         #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
7400
7401         if mysettings is None:
7402                 global settings
7403                 mysettings = settings
7404
7405         selinux_enabled = mysettings.selinux_enabled()
7406         if selinux_enabled:
7407                 selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
7408
7409         lchown = _unicode_func_wrapper(data.lchown, encoding=encoding)
7410         os = _unicode_module_wrapper(_os,
7411                 encoding=encoding, overrides=_os_overrides)
7412         shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
7413
7414         try:
7415                 if not sstat:
7416                         sstat=os.lstat(src)
7417
7418         except SystemExit as e:
7419                 raise
7420         except Exception as e:
7421                 print(_("!!! Stating source file failed... movefile()"))
7422                 print("!!!",e)
7423                 return None
7424
7425         destexists=1
7426         try:
7427                 dstat=os.lstat(dest)
7428         except (OSError, IOError):
7429                 dstat=os.lstat(os.path.dirname(dest))
7430                 destexists=0
7431
7432         if bsd_chflags:
7433                 if destexists and dstat.st_flags != 0:
7434                         bsd_chflags.lchflags(dest, 0)
7435                 # Use normal stat/chflags for the parent since we want to
7436                 # follow any symlinks to the real parent directory.
7437                 pflags = os.stat(os.path.dirname(dest)).st_flags
7438                 if pflags != 0:
7439                         bsd_chflags.chflags(os.path.dirname(dest), 0)
7440
7441         if destexists:
7442                 if stat.S_ISLNK(dstat[stat.ST_MODE]):
7443                         try:
7444                                 os.unlink(dest)
7445                                 destexists=0
7446                         except SystemExit as e:
7447                                 raise
7448                         except Exception as e:
7449                                 pass
7450
7451         if stat.S_ISLNK(sstat[stat.ST_MODE]):
7452                 try:
7453                         target=os.readlink(src)
7454                         if mysettings and mysettings["D"]:
7455                                 if target.find(mysettings["D"])==0:
7456                                         target=target[len(mysettings["D"]):]
7457                         if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
7458                                 os.unlink(dest)
7459                         if selinux_enabled:
7460                                 selinux.symlink(target, dest, src)
7461                         else:
7462                                 os.symlink(target,dest)
7463                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7464                         # utime() only works on the target of a symlink, so it's not
7465                         # possible to perserve mtime on symlinks.
7466                         return os.lstat(dest)[stat.ST_MTIME]
7467                 except SystemExit as e:
7468                         raise
7469                 except Exception as e:
7470                         print(_("!!! failed to properly create symlink:"))
7471                         print("!!!",dest,"->",target)
7472                         print("!!!",e)
7473                         return None
7474
7475         hardlinked = False
7476         # Since identical files might be merged to multiple filesystems,
7477         # so os.link() calls might fail for some paths, so try them all.
7478         # For atomic replacement, first create the link as a temp file
7479         # and them use os.rename() to replace the destination.
7480         if hardlink_candidates:
7481                 head, tail = os.path.split(dest)
7482                 hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
7483                         (tail, os.getpid()))
7484                 try:
7485                         os.unlink(hardlink_tmp)
7486                 except OSError as e:
7487                         if e.errno != errno.ENOENT:
7488                                 writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
7489                                         (hardlink_tmp,), noiselevel=-1)
7490                                 writemsg("!!! %s\n" % (e,), noiselevel=-1)
7491                                 return None
7492                         del e
7493                 for hardlink_src in hardlink_candidates:
7494                         try:
7495                                 os.link(hardlink_src, hardlink_tmp)
7496                         except OSError:
7497                                 continue
7498                         else:
7499                                 try:
7500                                         os.rename(hardlink_tmp, dest)
7501                                 except OSError as e:
7502                                         writemsg(_("!!! Failed to rename %s to %s\n") % \
7503                                                 (hardlink_tmp, dest), noiselevel=-1)
7504                                         writemsg("!!! %s\n" % (e,), noiselevel=-1)
7505                                         return None
7506                                 hardlinked = True
7507                                 break
7508
7509         renamefailed=1
7510         if hardlinked:
7511                 renamefailed = False
7512         if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
7513                 try:
7514                         if selinux_enabled:
7515                                 ret = selinux.rename(src, dest)
7516                         else:
7517                                 ret=os.rename(src,dest)
7518                         renamefailed=0
7519                 except SystemExit as e:
7520                         raise
7521                 except Exception as e:
7522                         if e[0]!=errno.EXDEV:
7523                                 # Some random error.
7524                                 print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
7525                                 print("!!!",e)
7526                                 return None
7527                         # Invalid cross-device-link 'bind' mounted or actually Cross-Device
7528         if renamefailed:
7529                 didcopy=0
7530                 if stat.S_ISREG(sstat[stat.ST_MODE]):
7531                         try: # For safety copy then move it over.
7532                                 if selinux_enabled:
7533                                         selinux.copyfile(src, dest + "#new")
7534                                         selinux.rename(dest + "#new", dest)
7535                                 else:
7536                                         shutil.copyfile(src,dest+"#new")
7537                                         os.rename(dest+"#new",dest)
7538                                 didcopy=1
7539                         except SystemExit as e:
7540                                 raise
7541                         except Exception as e:
7542                                 print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
7543                                 print("!!!",e)
7544                                 return None
7545                 else:
7546                         #we don't yet handle special, so we need to fall back to /bin/mv
7547                         a = process.spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
7548                         if a != os.EX_OK:
7549                                 writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
7550                                 writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
7551                                         {"src": _unicode_decode(src, encoding=encoding),
7552                                         "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
7553                                 writemsg("!!! %s\n" % a, noiselevel=-1)
7554                                 return None # failure
7555                 try:
7556                         if didcopy:
7557                                 if stat.S_ISLNK(sstat[stat.ST_MODE]):
7558                                         lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7559                                 else:
7560                                         os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
7561                                 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
7562                                 os.unlink(src)
7563                 except SystemExit as e:
7564                         raise
7565                 except Exception as e:
7566                         print(_("!!! Failed to chown/chmod/unlink in movefile()"))
7567                         print("!!!",dest)
7568                         print("!!!",e)
7569                         return None
7570
7571         try:
7572                 if hardlinked:
7573                         newmtime = long(os.stat(dest).st_mtime)
7574                 else:
7575                         if newmtime is not None:
7576                                 os.utime(dest, (newmtime, newmtime))
7577                         else:
7578                                 os.utime(dest, (sstat.st_atime, sstat.st_mtime))
7579                                 newmtime = long(sstat.st_mtime)
7580         except OSError:
7581                 # The utime can fail here with EPERM even though the move succeeded.
7582                 # Instead of failing, use stat to return the mtime if possible.
7583                 try:
7584                         newmtime = long(os.stat(dest).st_mtime)
7585                 except OSError as e:
7586                         writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
7587                         writemsg("!!! %s\n" % dest, noiselevel=-1)
7588                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
7589                         return None
7590
7591         if bsd_chflags:
7592                 # Restore the flags we saved before moving
7593                 if pflags:
7594                         bsd_chflags.chflags(os.path.dirname(dest), pflags)
7595
7596         return newmtime
7597
7598 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
7599         mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
7600         scheduler=None):
7601         if not os.access(myroot, os.W_OK):
7602                 writemsg(_("Permission denied: access('%s', W_OK)\n") % myroot,
7603                         noiselevel=-1)
7604                 return errno.EACCES
7605         mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
7606                 vartree=vartree, blockers=blockers, scheduler=scheduler)
7607         return mylink.merge(pkgloc, infloc, myroot, myebuild,
7608                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
7609
7610 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
7611         ldpath_mtimes=None, scheduler=None):
7612         mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
7613                 vartree=vartree, scheduler=scheduler)
7614         vartree = mylink.vartree
7615         try:
7616                 mylink.lockdb()
7617                 if mylink.exists():
7618                         retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
7619                                 ldpath_mtimes=ldpath_mtimes)
7620                         if retval == os.EX_OK:
7621                                 mylink.delete()
7622                         return retval
7623                 return os.EX_OK
7624         finally:
7625                 mylink.unlockdb()
7626
7627 def dep_virtual(mysplit, mysettings):
7628         "Does virtual dependency conversion"
7629         newsplit=[]
7630         myvirtuals = mysettings.getvirtuals()
7631         for x in mysplit:
7632                 if isinstance(x, list):
7633                         newsplit.append(dep_virtual(x, mysettings))
7634                 else:
7635                         mykey=dep_getkey(x)
7636                         mychoices = myvirtuals.get(mykey, None)
7637                         if mychoices:
7638                                 if len(mychoices) == 1:
7639                                         a = x.replace(mykey, dep_getkey(mychoices[0]), 1)
7640                                 else:
7641                                         if x[0]=="!":
7642                                                 # blocker needs "and" not "or(||)".
7643                                                 a=[]
7644                                         else:
7645                                                 a=['||']
7646                                         for y in mychoices:
7647                                                 a.append(x.replace(mykey, dep_getkey(y), 1))
7648                                 newsplit.append(a)
7649                         else:
7650                                 newsplit.append(x)
7651         return newsplit
7652
7653 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
7654         trees=None, use_mask=None, use_force=None, **kwargs):
7655         """Recursively expand new-style virtuals so as to collapse one or more
7656         levels of indirection.  In dep_zapdeps, new-style virtuals will be assigned
7657         zero cost regardless of whether or not they are currently installed. Virtual
7658         blockers are supported but only when the virtual expands to a single
7659         atom because it wouldn't necessarily make sense to block all the components
7660         of a compound virtual.  When more than one new-style virtual is matched,
7661         the matches are sorted from highest to lowest versions and the atom is
7662         expanded to || ( highest match ... lowest match )."""
7663         newsplit = []
7664         mytrees = trees[myroot]
7665         portdb = mytrees["porttree"].dbapi
7666         atom_graph = mytrees.get("atom_graph")
7667         parent = mytrees.get("parent")
7668         virt_parent = mytrees.get("virt_parent")
7669         graph_parent = None
7670         eapi = None
7671         if parent is not None:
7672                 if virt_parent is not None:
7673                         graph_parent = virt_parent
7674                         eapi = virt_parent[0].metadata['EAPI']
7675                 else:
7676                         graph_parent = parent
7677                         eapi = parent.metadata["EAPI"]
7678         repoman = not mysettings.local_config
7679         if kwargs["use_binaries"]:
7680                 portdb = trees[myroot]["bintree"].dbapi
7681         myvirtuals = mysettings.getvirtuals()
7682         pprovideddict = mysettings.pprovideddict
7683         myuse = kwargs["myuse"]
7684         for x in mysplit:
7685                 if x == "||":
7686                         newsplit.append(x)
7687                         continue
7688                 elif isinstance(x, list):
7689                         newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
7690                                 mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
7691                                 use_force=use_force, **kwargs))
7692                         continue
7693
7694                 if not isinstance(x, portage.dep.Atom):
7695                         try:
7696                                 x = portage.dep.Atom(x)
7697                         except portage.exception.InvalidAtom:
7698                                 if portage.dep._dep_check_strict:
7699                                         raise portage.exception.ParseError(
7700                                                 _("invalid atom: '%s'") % x)
7701                                 else:
7702                                         # Only real Atom instances are allowed past this point.
7703                                         continue
7704                         else:
7705                                 if x.blocker and x.blocker.overlap.forbid and \
7706                                         eapi in ("0", "1") and portage.dep._dep_check_strict:
7707                                         raise portage.exception.ParseError(
7708                                                 _("invalid atom: '%s'") % (x,))
7709                                 if x.use and eapi in ("0", "1") and \
7710                                         portage.dep._dep_check_strict:
7711                                         raise portage.exception.ParseError(
7712                                                 _("invalid atom: '%s'") % (x,))
7713
7714                 if repoman and x.use and x.use.conditional:
7715                         evaluated_atom = portage.dep.remove_slot(x)
7716                         if x.slot:
7717                                 evaluated_atom += ":%s" % x.slot
7718                         evaluated_atom += str(x.use._eval_qa_conditionals(
7719                                 use_mask, use_force))
7720                         x = portage.dep.Atom(evaluated_atom)
7721
7722                 if not repoman and \
7723                         myuse is not None and isinstance(x, portage.dep.Atom) and x.use:
7724                         if x.use.conditional:
7725                                 evaluated_atom = portage.dep.remove_slot(x)
7726                                 if x.slot:
7727                                         evaluated_atom += ":%s" % x.slot
7728                                 evaluated_atom += str(x.use.evaluate_conditionals(myuse))
7729                                 x = portage.dep.Atom(evaluated_atom)
7730
7731                 mykey = x.cp
7732                 if not mykey.startswith("virtual/"):
7733                         newsplit.append(x)
7734                         if atom_graph is not None:
7735                                 atom_graph.add(x, graph_parent)
7736                         continue
7737                 mychoices = myvirtuals.get(mykey, [])
7738                 if x.blocker:
7739                         # Virtual blockers are no longer expanded here since
7740                         # the un-expanded virtual atom is more useful for
7741                         # maintaining a cache of blocker atoms.
7742                         newsplit.append(x)
7743                         if atom_graph is not None:
7744                                 atom_graph.add(x, graph_parent)
7745                         continue
7746
7747                 if repoman or not hasattr(portdb, 'match_pkgs'):
7748                         if portdb.cp_list(x.cp):
7749                                 newsplit.append(x)
7750                         else:
7751                                 # TODO: Add PROVIDE check for repoman.
7752                                 a = []
7753                                 for y in mychoices:
7754                                         a.append(dep.Atom(x.replace(x.cp, y.cp, 1)))
7755                                 if not a:
7756                                         newsplit.append(x)
7757                                 elif len(a) == 1:
7758                                         newsplit.append(a[0])
7759                                 else:
7760                                         newsplit.append(['||'] + a)
7761                         continue
7762
7763                 pkgs = []
7764                 # Ignore USE deps here, since otherwise we might not
7765                 # get any matches. Choices with correct USE settings
7766                 # will be preferred in dep_zapdeps().
7767                 matches = portdb.match_pkgs(x.without_use)
7768                 # Use descending order to prefer higher versions.
7769                 matches.reverse()
7770                 for pkg in matches:
7771                         # only use new-style matches
7772                         if pkg.cp.startswith("virtual/"):
7773                                 pkgs.append(pkg)
7774                 if not (pkgs or mychoices):
7775                         # This one couldn't be expanded as a new-style virtual.  Old-style
7776                         # virtuals have already been expanded by dep_virtual, so this one
7777                         # is unavailable and dep_zapdeps will identify it as such.  The
7778                         # atom is not eliminated here since it may still represent a
7779                         # dependency that needs to be satisfied.
7780                         newsplit.append(x)
7781                         if atom_graph is not None:
7782                                 atom_graph.add(x, graph_parent)
7783                         continue
7784
7785                 a = []
7786                 for pkg in pkgs:
7787                         virt_atom = '=' + pkg.cpv
7788                         if x.use:
7789                                 virt_atom += str(x.use)
7790                         virt_atom = dep.Atom(virt_atom)
7791                         # According to GLEP 37, RDEPEND is the only dependency
7792                         # type that is valid for new-style virtuals. Repoman
7793                         # should enforce this.
7794                         depstring = pkg.metadata['RDEPEND']
7795                         pkg_kwargs = kwargs.copy()
7796                         pkg_kwargs["myuse"] = pkg.use.enabled
7797                         if edebug:
7798                                 util.writemsg_level(_("Virtual Parent:      %s\n") \
7799                                         % (pkg,), noiselevel=-1, level=logging.DEBUG)
7800                                 util.writemsg_level(_("Virtual Depstring:   %s\n") \
7801                                         % (depstring,), noiselevel=-1, level=logging.DEBUG)
7802
7803                         # Set EAPI used for validation in dep_check() recursion.
7804                         mytrees["virt_parent"] = (pkg, virt_atom)
7805
7806                         try:
7807                                 mycheck = dep_check(depstring, mydbapi, mysettings,
7808                                         myroot=myroot, trees=trees, **pkg_kwargs)
7809                         finally:
7810                                 # Restore previous EAPI after recursion.
7811                                 if virt_parent is not None:
7812                                         mytrees["virt_parent"] = virt_parent
7813                                 else:
7814                                         del mytrees["virt_parent"]
7815
7816                         if not mycheck[0]:
7817                                 raise portage.exception.ParseError(
7818                                         "%s: %s '%s'" % (y[0], mycheck[1], depstring))
7819
7820                         # pull in the new-style virtual
7821                         mycheck[1].append(virt_atom)
7822                         a.append(mycheck[1])
7823                         if atom_graph is not None:
7824                                 atom_graph.add(virt_atom, graph_parent)
7825                 # Plain old-style virtuals.  New-style virtuals are preferred.
7826                 if not pkgs:
7827                                 for y in mychoices:
7828                                         new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7829                                         matches = portdb.match(new_atom)
7830                                         # portdb is an instance of depgraph._dep_check_composite_db, so
7831                                         # USE conditionals are already evaluated.
7832                                         if matches and mykey in \
7833                                                 portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
7834                                                 a.append(new_atom)
7835                                                 if atom_graph is not None:
7836                                                         atom_graph.add(new_atom, graph_parent)
7837
7838                 if not a and mychoices:
7839                         # Check for a virtual package.provided match.
7840                         for y in mychoices:
7841                                 new_atom = dep.Atom(x.replace(x.cp, y.cp, 1))
7842                                 if match_from_list(new_atom,
7843                                         pprovideddict.get(new_atom.cp, [])):
7844                                         a.append(new_atom)
7845                                         if atom_graph is not None:
7846                                                 atom_graph.add(new_atom, graph_parent)
7847
7848                 if not a:
7849                         newsplit.append(x)
7850                         if atom_graph is not None:
7851                                 atom_graph.add(x, graph_parent)
7852                 elif len(a) == 1:
7853                         newsplit.append(a[0])
7854                 else:
7855                         newsplit.append(['||'] + a)
7856
7857         return newsplit
7858
7859 def dep_eval(deplist):
7860         if not deplist:
7861                 return 1
7862         if deplist[0]=="||":
7863                 #or list; we just need one "1"
7864                 for x in deplist[1:]:
7865                         if isinstance(x, list):
7866                                 if dep_eval(x)==1:
7867                                         return 1
7868                         elif x==1:
7869                                         return 1
7870                 #XXX: unless there's no available atoms in the list
7871                 #in which case we need to assume that everything is
7872                 #okay as some ebuilds are relying on an old bug.
7873                 if len(deplist) == 1:
7874                         return 1
7875                 return 0
7876         else:
7877                 for x in deplist:
7878                         if isinstance(x, list):
7879                                 if dep_eval(x)==0:
7880                                         return 0
7881                         elif x==0 or x==2:
7882                                 return 0
7883                 return 1
7884
7885 def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
7886         """Takes an unreduced and reduced deplist and removes satisfied dependencies.
7887         Returned deplist contains steps that must be taken to satisfy dependencies."""
7888         if trees is None:
7889                 global db
7890                 trees = db
7891         writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
7892         if not reduced or unreduced == ["||"] or dep_eval(reduced):
7893                 return []
7894
7895         if unreduced[0] != "||":
7896                 unresolved = []
7897                 for x, satisfied in zip(unreduced, reduced):
7898                         if isinstance(x, list):
7899                                 unresolved += dep_zapdeps(x, satisfied, myroot,
7900                                         use_binaries=use_binaries, trees=trees)
7901                         elif not satisfied:
7902                                 unresolved.append(x)
7903                 return unresolved
7904
7905         # We're at a ( || atom ... ) type level and need to make a choice
7906         deps = unreduced[1:]
7907         satisfieds = reduced[1:]
7908
7909         # Our preference order is for an the first item that:
7910         # a) contains all unmasked packages with the same key as installed packages
7911         # b) contains all unmasked packages
7912         # c) contains masked installed packages
7913         # d) is the first item
7914
7915         preferred_installed = []
7916         preferred_in_graph = []
7917         preferred_any_slot = []
7918         preferred_non_installed = []
7919         unsat_use_in_graph = []
7920         unsat_use_installed = []
7921         unsat_use_non_installed = []
7922         other = []
7923
7924         # Alias the trees we'll be checking availability against
7925         parent   = trees[myroot].get("parent")
7926         priority = trees[myroot].get("priority")
7927         graph_db = trees[myroot].get("graph_db")
7928         vardb = None
7929         if "vartree" in trees[myroot]:
7930                 vardb = trees[myroot]["vartree"].dbapi
7931         if use_binaries:
7932                 mydbapi = trees[myroot]["bintree"].dbapi
7933         else:
7934                 mydbapi = trees[myroot]["porttree"].dbapi
7935
7936         # Sort the deps into installed, not installed but already 
7937         # in the graph and other, not installed and not in the graph
7938         # and other, with values of [[required_atom], availablility]
7939         for x, satisfied in zip(deps, satisfieds):
7940                 if isinstance(x, list):
7941                         atoms = dep_zapdeps(x, satisfied, myroot,
7942                                 use_binaries=use_binaries, trees=trees)
7943                 else:
7944                         atoms = [x]
7945                 if not vardb:
7946                         # called by repoman
7947                         other.append((atoms, None, False))
7948                         continue
7949
7950                 all_available = True
7951                 all_use_satisfied = True
7952                 versions = {}
7953                 for atom in atoms:
7954                         if atom.blocker:
7955                                 continue
7956                         # Ignore USE dependencies here since we don't want USE
7957                         # settings to adversely affect || preference evaluation.
7958                         avail_pkg = mydbapi.match(atom.without_use)
7959                         if avail_pkg:
7960                                 avail_pkg = avail_pkg[-1] # highest (ascending order)
7961                                 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7962                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7963                         if not avail_pkg:
7964                                 all_available = False
7965                                 all_use_satisfied = False
7966                                 break
7967
7968                         if atom.use:
7969                                 avail_pkg_use = mydbapi.match(atom)
7970                                 if not avail_pkg_use:
7971                                         all_use_satisfied = False
7972                                 else:
7973                                         # highest (ascending order)
7974                                         avail_pkg_use = avail_pkg_use[-1]
7975                                         if avail_pkg_use != avail_pkg:
7976                                                 avail_pkg = avail_pkg_use
7977                                                 avail_slot = dep.Atom("%s:%s" % (atom.cp,
7978                                                         mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
7979
7980                         versions[avail_slot] = avail_pkg
7981
7982                 this_choice = (atoms, versions, all_available)
7983                 if all_available:
7984                         # The "all installed" criterion is not version or slot specific.
7985                         # If any version of a package is already in the graph then we
7986                         # assume that it is preferred over other possible packages choices.
7987                         all_installed = True
7988                         for atom in set(dep.Atom(atom.cp) for atom in atoms \
7989                                 if not atom.blocker):
7990                                 # New-style virtuals have zero cost to install.
7991                                 if not vardb.match(atom) and not atom.startswith("virtual/"):
7992                                         all_installed = False
7993                                         break
7994                         all_installed_slots = False
7995                         if all_installed:
7996                                 all_installed_slots = True
7997                                 for slot_atom in versions:
7998                                         # New-style virtuals have zero cost to install.
7999                                         if not vardb.match(slot_atom) and \
8000                                                 not slot_atom.startswith("virtual/"):
8001                                                 all_installed_slots = False
8002                                                 break
8003                         if graph_db is None:
8004                                 if all_use_satisfied:
8005                                         if all_installed:
8006                                                 if all_installed_slots:
8007                                                         preferred_installed.append(this_choice)
8008                                                 else:
8009                                                         preferred_any_slot.append(this_choice)
8010                                         else:
8011                                                 preferred_non_installed.append(this_choice)
8012                                 else:
8013                                         if all_installed_slots:
8014                                                 unsat_use_installed.append(this_choice)
8015                                         else:
8016                                                 unsat_use_non_installed.append(this_choice)
8017                         else:
8018                                 all_in_graph = True
8019                                 for slot_atom in versions:
8020                                         # New-style virtuals have zero cost to install.
8021                                         if not graph_db.match(slot_atom) and \
8022                                                 not slot_atom.startswith("virtual/"):
8023                                                 all_in_graph = False
8024                                                 break
8025                                 circular_atom = None
8026                                 if all_in_graph:
8027                                         if parent is None or priority is None:
8028                                                 pass
8029                                         elif priority.buildtime:
8030                                                 # Check if the atom would result in a direct circular
8031                                                 # dependency and try to avoid that if it seems likely
8032                                                 # to be unresolvable. This is only relevant for
8033                                                 # buildtime deps that aren't already satisfied by an
8034                                                 # installed package.
8035                                                 cpv_slot_list = [parent]
8036                                                 for atom in atoms:
8037                                                         if atom.blocker:
8038                                                                 continue
8039                                                         if vardb.match(atom):
8040                                                                 # If the atom is satisfied by an installed
8041                                                                 # version then it's not a circular dep.
8042                                                                 continue
8043                                                         if atom.cp != parent.cp:
8044                                                                 continue
8045                                                         if match_from_list(atom, cpv_slot_list):
8046                                                                 circular_atom = atom
8047                                                                 break
8048                                 if circular_atom is not None:
8049                                         other.append(this_choice)
8050                                 else:
8051                                         if all_use_satisfied:
8052                                                 if all_in_graph:
8053                                                         preferred_in_graph.append(this_choice)
8054                                                 elif all_installed:
8055                                                         if all_installed_slots:
8056                                                                 preferred_installed.append(this_choice)
8057                                                         else:
8058                                                                 preferred_any_slot.append(this_choice)
8059                                                 else:
8060                                                         preferred_non_installed.append(this_choice)
8061                                         else:
8062                                                 if all_in_graph:
8063                                                         unsat_use_in_graph.append(this_choice)
8064                                                 elif all_installed_slots:
8065                                                         unsat_use_installed.append(this_choice)
8066                                                 else:
8067                                                         unsat_use_non_installed.append(this_choice)
8068                 else:
8069                         other.append(this_choice)
8070
8071         # unsat_use_* must come after preferred_non_installed
8072         # for correct ordering in cases like || ( foo[a] foo[b] ).
8073         preferred = preferred_in_graph + preferred_installed + \
8074                 preferred_any_slot + preferred_non_installed + \
8075                 unsat_use_in_graph + unsat_use_installed + unsat_use_non_installed + \
8076                 other
8077
8078         for allow_masked in (False, True):
8079                 for atoms, versions, all_available in preferred:
8080                         if all_available or allow_masked:
8081                                 return atoms
8082
8083         assert(False) # This point should not be reachable
8084
8085 def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
8086         '''
8087         @rtype: Atom
8088         '''
8089         if not len(mydep):
8090                 return mydep
8091         if mydep[0]=="*":
8092                 mydep=mydep[1:]
8093         orig_dep = mydep
8094         if isinstance(orig_dep, dep.Atom):
8095                 mydep = orig_dep.cp
8096         else:
8097                 mydep = orig_dep
8098                 has_cat = '/' in orig_dep
8099                 if not has_cat:
8100                         alphanum = re.search(r'\w', orig_dep)
8101                         if alphanum:
8102                                 mydep = orig_dep[:alphanum.start()] + "null/" + \
8103                                         orig_dep[alphanum.start():]
8104                 try:
8105                         mydep = dep.Atom(mydep)
8106                 except exception.InvalidAtom:
8107                         # Missing '=' prefix is allowed for backward compatibility.
8108                         if not dep.isvalidatom("=" + mydep):
8109                                 raise
8110                         mydep = dep.Atom('=' + mydep)
8111                         orig_dep = '=' + orig_dep
8112                 if not has_cat:
8113                         null_cat, pn = catsplit(mydep.cp)
8114                         mydep = pn
8115                 else:
8116                         mydep = mydep.cp
8117         expanded = cpv_expand(mydep, mydb=mydb,
8118                 use_cache=use_cache, settings=settings)
8119         return portage.dep.Atom(orig_dep.replace(mydep, expanded, 1))
8120
8121 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
8122         use_cache=1, use_binaries=0, myroot="/", trees=None):
8123         """Takes a depend string and parses the condition."""
8124         edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
8125         #check_config_instance(mysettings)
8126         if trees is None:
8127                 trees = globals()["db"]
8128         if use=="yes":
8129                 if myuse is None:
8130                         #default behavior
8131                         myusesplit = mysettings["PORTAGE_USE"].split()
8132                 else:
8133                         myusesplit = myuse
8134                         # We've been given useflags to use.
8135                         #print "USE FLAGS PASSED IN."
8136                         #print myuse
8137                         #if "bindist" in myusesplit:
8138                         #       print "BINDIST is set!"
8139                         #else:
8140                         #       print "BINDIST NOT set."
8141         else:
8142                 #we are being run by autouse(), don't consult USE vars yet.
8143                 # WE ALSO CANNOT USE SETTINGS
8144                 myusesplit=[]
8145
8146         #convert parenthesis to sublists
8147         try:
8148                 mysplit = portage.dep.paren_reduce(depstring)
8149         except portage.exception.InvalidDependString as e:
8150                 return [0, str(e)]
8151
8152         mymasks = set()
8153         useforce = set()
8154         useforce.add(mysettings["ARCH"])
8155         if use == "all":
8156                 # This masking/forcing is only for repoman.  In other cases, relevant
8157                 # masking/forcing should have already been applied via
8158                 # config.regenerate().  Also, binary or installed packages may have
8159                 # been built with flags that are now masked, and it would be
8160                 # inconsistent to mask them now.  Additionally, myuse may consist of
8161                 # flags from a parent package that is being merged to a $ROOT that is
8162                 # different from the one that mysettings represents.
8163                 mymasks.update(mysettings.usemask)
8164                 mymasks.update(mysettings.archlist())
8165                 mymasks.discard(mysettings["ARCH"])
8166                 useforce.update(mysettings.useforce)
8167                 useforce.difference_update(mymasks)
8168         try:
8169                 mysplit = portage.dep.use_reduce(mysplit, uselist=myusesplit,
8170                         masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
8171         except portage.exception.InvalidDependString as e:
8172                 return [0, str(e)]
8173
8174         # Do the || conversions
8175         mysplit=portage.dep.dep_opconvert(mysplit)
8176
8177         if mysplit == []:
8178                 #dependencies were reduced to nothing
8179                 return [1,[]]
8180
8181         # Recursively expand new-style virtuals so as to
8182         # collapse one or more levels of indirection.
8183         try:
8184                 mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
8185                         use=use, mode=mode, myuse=myuse,
8186                         use_force=useforce, use_mask=mymasks, use_cache=use_cache,
8187                         use_binaries=use_binaries, myroot=myroot, trees=trees)
8188         except portage.exception.ParseError as e:
8189                 return [0, str(e)]
8190
8191         mysplit2=mysplit[:]
8192         mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
8193         if mysplit2 is None:
8194                 return [0, _("Invalid token")]
8195
8196         writemsg("\n\n\n", 1)
8197         writemsg("mysplit:  %s\n" % (mysplit), 1)
8198         writemsg("mysplit2: %s\n" % (mysplit2), 1)
8199
8200         try:
8201                 selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
8202                         use_binaries=use_binaries, trees=trees)
8203         except portage.exception.InvalidAtom as e:
8204                 if portage.dep._dep_check_strict:
8205                         raise # This shouldn't happen.
8206                 # dbapi.match() failed due to an invalid atom in
8207                 # the dependencies of an installed package.
8208                 return [0, _("Invalid atom: '%s'") % (e,)]
8209
8210         return [1, selected_atoms]
8211
8212 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
8213         "Reduces the deplist to ones and zeros"
8214         deplist=mydeplist[:]
8215         for mypos, token in enumerate(deplist):
8216                 if isinstance(deplist[mypos], list):
8217                         #recurse
8218                         deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
8219                 elif deplist[mypos]=="||":
8220                         pass
8221                 elif token[:1] == "!":
8222                         deplist[mypos] = False
8223                 else:
8224                         mykey = deplist[mypos].cp
8225                         if mysettings and mykey in mysettings.pprovideddict and \
8226                                 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
8227                                 deplist[mypos]=True
8228                         elif mydbapi is None:
8229                                 # Assume nothing is satisfied.  This forces dep_zapdeps to
8230                                 # return all of deps the deps that have been selected
8231                                 # (excluding those satisfied by package.provided).
8232                                 deplist[mypos] = False
8233                         else:
8234                                 if mode:
8235                                         x = mydbapi.xmatch(mode, deplist[mypos])
8236                                         if mode.startswith("minimum-"):
8237                                                 mydep = []
8238                                                 if x:
8239                                                         mydep.append(x)
8240                                         else:
8241                                                 mydep = x
8242                                 else:
8243                                         mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
8244                                 if mydep!=None:
8245                                         tmp=(len(mydep)>=1)
8246                                         if deplist[mypos][0]=="!":
8247                                                 tmp=False
8248                                         deplist[mypos]=tmp
8249                                 else:
8250                                         #encountered invalid string
8251                                         return None
8252         return deplist
8253
8254 _cpv_key_re = re.compile('^' + versions._cpv + '$', re.VERBOSE)
8255 def cpv_getkey(mycpv):
8256         """Calls pkgsplit on a cpv and returns only the cp."""
8257         m = _cpv_key_re.match(mycpv)
8258         if m is not None:
8259                 return m.group(2)
8260
8261         warnings.warn("portage.cpv_getkey() called with invalid cpv: '%s'" \
8262                 % (mycpv,), DeprecationWarning)
8263
8264         myslash = mycpv.split("/", 1)
8265         mysplit = versions._pkgsplit(myslash[-1])
8266         if mysplit is None:
8267                 return None
8268         mylen=len(myslash)
8269         if mylen==2:
8270                 return myslash[0]+"/"+mysplit[0]
8271         else:
8272                 return mysplit[0]
8273
8274 getCPFromCPV = cpv_getkey
8275
8276 def key_expand(mykey, mydb=None, use_cache=1, settings=None):
8277         """This is deprecated because it just returns the first match instead of
8278         raising AmbiguousPackageName like cpv_expand does."""
8279         warnings.warn("portage.key_expand() is deprecated", DeprecationWarning)
8280         mysplit=mykey.split("/")
8281         if settings is None:
8282                 settings = globals()["settings"]
8283         virts = settings.getvirtuals("/")
8284         virts_p = settings.get_virts_p("/")
8285         if len(mysplit)==1:
8286                 if hasattr(mydb, "cp_list"):
8287                         for x in mydb.categories:
8288                                 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
8289                                         return dep.Atom(x + "/" + mykey)
8290                         if mykey in virts_p:
8291                                 return(virts_p[mykey][0])
8292                 return dep.Atom("null/" + mykey)
8293         elif mydb:
8294                 if hasattr(mydb, "cp_list"):
8295                         if not mydb.cp_list(mykey, use_cache=use_cache) and \
8296                                 virts and mykey in virts:
8297                                 return virts[mykey][0]
8298                 if not isinstance(mykey, dep.Atom):
8299                         mykey = dep.Atom(mykey)
8300                 return mykey
8301
8302 def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
8303         """Given a string (packagename or virtual) expand it into a valid
8304         cat/package string. Virtuals use the mydb to determine which provided
8305         virtual is a valid choice and defaults to the first element when there
8306         are no installed/available candidates."""
8307         myslash=mycpv.split("/")
8308         mysplit = versions._pkgsplit(myslash[-1])
8309         if settings is None:
8310                 settings = globals()["settings"]
8311         virts = settings.getvirtuals("/")
8312         virts_p = settings.get_virts_p("/")
8313         if len(myslash)>2:
8314                 # this is illegal case.
8315                 mysplit=[]
8316                 mykey=mycpv
8317         elif len(myslash)==2:
8318                 if mysplit:
8319                         mykey=myslash[0]+"/"+mysplit[0]
8320                 else:
8321                         mykey=mycpv
8322                 if mydb and virts and mykey in virts:
8323                         writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
8324                         if hasattr(mydb, "cp_list"):
8325                                 if not mydb.cp_list(mykey, use_cache=use_cache):
8326                                         writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
8327                                         mykey_orig = mykey[:]
8328                                         for vkey in virts[mykey]:
8329                                                 # The virtuals file can contain a versioned atom, so
8330                                                 # it may be necessary to remove the operator and
8331                                                 # version from the atom before it is passed into
8332                                                 # dbapi.cp_list().
8333                                                 if mydb.cp_list(dep_getkey(vkey), use_cache=use_cache):
8334                                                         mykey = str(vkey)
8335                                                         writemsg(_("virts chosen: %s\n") % (mykey), 1)
8336                                                         break
8337                                         if mykey == mykey_orig:
8338                                                 mykey = str(virts[mykey][0])
8339                                                 writemsg(_("virts defaulted: %s\n") % (mykey), 1)
8340                         #we only perform virtual expansion if we are passed a dbapi
8341         else:
8342                 #specific cpv, no category, ie. "foo-1.0"
8343                 if mysplit:
8344                         myp=mysplit[0]
8345                 else:
8346                         # "foo" ?
8347                         myp=mycpv
8348                 mykey=None
8349                 matches=[]
8350                 if mydb and hasattr(mydb, "categories"):
8351                         for x in mydb.categories:
8352                                 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
8353                                         matches.append(x+"/"+myp)
8354                 if len(matches) > 1:
8355                         virtual_name_collision = False
8356                         if len(matches) == 2:
8357                                 for x in matches:
8358                                         if not x.startswith("virtual/"):
8359                                                 # Assume that the non-virtual is desired.  This helps
8360                                                 # avoid the ValueError for invalid deps that come from
8361                                                 # installed packages (during reverse blocker detection,
8362                                                 # for example).
8363                                                 mykey = x
8364                                         else:
8365                                                 virtual_name_collision = True
8366                         if not virtual_name_collision:
8367                                 # AmbiguousPackageName inherits from ValueError,
8368                                 # for backward compatibility with calling code
8369                                 # that already handles ValueError.
8370                                 raise portage.exception.AmbiguousPackageName(matches)
8371                 elif matches:
8372                         mykey=matches[0]
8373
8374                 if not mykey and not isinstance(mydb, list):
8375                         if myp in virts_p:
8376                                 mykey=virts_p[myp][0]
8377                         #again, we only perform virtual expansion if we have a dbapi (not a list)
8378                 if not mykey:
8379                         mykey="null/"+myp
8380         if mysplit:
8381                 if mysplit[2]=="r0":
8382                         return mykey+"-"+mysplit[1]
8383                 else:
8384                         return mykey+"-"+mysplit[1]+"-"+mysplit[2]
8385         else:
8386                 return mykey
8387
8388 def getmaskingreason(mycpv, metadata=None, settings=None, portdb=None, return_location=False):
8389         from portage.util import grablines
8390         if settings is None:
8391                 settings = globals()["settings"]
8392         if portdb is None:
8393                 portdb = globals()["portdb"]
8394         mysplit = catpkgsplit(mycpv)
8395         if not mysplit:
8396                 raise ValueError(_("invalid CPV: %s") % mycpv)
8397         if metadata is None:
8398                 db_keys = list(portdb._aux_cache_keys)
8399                 try:
8400                         metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8401                 except KeyError:
8402                         if not portdb.cpv_exists(mycpv):
8403                                 raise
8404         if metadata is None:
8405                 # Can't access SLOT due to corruption.
8406                 cpv_slot_list = [mycpv]
8407         else:
8408                 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8409         mycp=mysplit[0]+"/"+mysplit[1]
8410
8411         # XXX- This is a temporary duplicate of code from the config constructor.
8412         locations = [os.path.join(settings["PORTDIR"], "profiles")]
8413         locations.extend(settings.profiles)
8414         for ov in settings["PORTDIR_OVERLAY"].split():
8415                 profdir = os.path.join(normalize_path(ov), "profiles")
8416                 if os.path.isdir(profdir):
8417                         locations.append(profdir)
8418         locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
8419                 USER_CONFIG_PATH))
8420         locations.reverse()
8421         pmasklists = [(x, grablines(os.path.join(x, "package.mask"), recursive=1)) for x in locations]
8422
8423         if mycp in settings.pmaskdict:
8424                 for x in settings.pmaskdict[mycp]:
8425                         if match_from_list(x, cpv_slot_list):
8426                                 comment = ""
8427                                 l = "\n"
8428                                 comment_valid = -1
8429                                 for pmask in pmasklists:
8430                                         pmask_filename = os.path.join(pmask[0], "package.mask")
8431                                         for i in range(len(pmask[1])):
8432                                                 l = pmask[1][i].strip()
8433                                                 if l == "":
8434                                                         comment = ""
8435                                                         comment_valid = -1
8436                                                 elif l[0] == "#":
8437                                                         comment += (l+"\n")
8438                                                         comment_valid = i + 1
8439                                                 elif l == x:
8440                                                         if comment_valid != i:
8441                                                                 comment = ""
8442                                                         if return_location:
8443                                                                 return (comment, pmask_filename)
8444                                                         else:
8445                                                                 return comment
8446                                                 elif comment_valid != -1:
8447                                                         # Apparently this comment applies to muliple masks, so
8448                                                         # it remains valid until a blank line is encountered.
8449                                                         comment_valid += 1
8450         if return_location:
8451                 return (None, None)
8452         else:
8453                 return None
8454
8455 def getmaskingstatus(mycpv, settings=None, portdb=None):
8456         if settings is None:
8457                 settings = config(clone=globals()["settings"])
8458         if portdb is None:
8459                 portdb = globals()["portdb"]
8460
8461         metadata = None
8462         installed = False
8463         if not isinstance(mycpv, basestring):
8464                 # emerge passed in a Package instance
8465                 pkg = mycpv
8466                 mycpv = pkg.cpv
8467                 metadata = pkg.metadata
8468                 installed = pkg.installed
8469
8470         mysplit = catpkgsplit(mycpv)
8471         if not mysplit:
8472                 raise ValueError(_("invalid CPV: %s") % mycpv)
8473         if metadata is None:
8474                 db_keys = list(portdb._aux_cache_keys)
8475                 try:
8476                         metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
8477                 except KeyError:
8478                         if not portdb.cpv_exists(mycpv):
8479                                 raise
8480                         return ["corruption"]
8481                 if "?" in metadata["LICENSE"]:
8482                         settings.setcpv(mycpv, mydb=metadata)
8483                         metadata["USE"] = settings["PORTAGE_USE"]
8484                 else:
8485                         metadata["USE"] = ""
8486         mycp=mysplit[0]+"/"+mysplit[1]
8487
8488         rValue = []
8489
8490         # profile checking
8491         if settings._getProfileMaskAtom(mycpv, metadata):
8492                 rValue.append("profile")
8493
8494         # package.mask checking
8495         if settings._getMaskAtom(mycpv, metadata):
8496                 rValue.append("package.mask")
8497
8498         # keywords checking
8499         eapi = metadata["EAPI"]
8500         mygroups = settings._getKeywords(mycpv, metadata)
8501         licenses = metadata["LICENSE"]
8502         properties = metadata["PROPERTIES"]
8503         slot = metadata["SLOT"]
8504         if eapi.startswith("-"):
8505                 eapi = eapi[1:]
8506         if not eapi_is_supported(eapi):
8507                 return ["EAPI %s" % eapi]
8508         elif _eapi_is_deprecated(eapi) and not installed:
8509                 return ["EAPI %s" % eapi]
8510         egroups = settings.configdict["backupenv"].get(
8511                 "ACCEPT_KEYWORDS", "").split()
8512         pgroups = settings["ACCEPT_KEYWORDS"].split()
8513         myarch = settings["ARCH"]
8514         if pgroups and myarch not in pgroups:
8515                 """For operating systems other than Linux, ARCH is not necessarily a
8516                 valid keyword."""
8517                 myarch = pgroups[0].lstrip("~")
8518
8519         cp = dep_getkey(mycpv)
8520         pkgdict = settings.pkeywordsdict.get(cp)
8521         matches = False
8522         if pkgdict:
8523                 cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
8524                 for atom, pkgkeywords in pkgdict.items():
8525                         if match_from_list(atom, cpv_slot_list):
8526                                 matches = True
8527                                 pgroups.extend(pkgkeywords)
8528         if matches or egroups:
8529                 pgroups.extend(egroups)
8530                 inc_pgroups = set()
8531                 for x in pgroups:
8532                         if x.startswith("-"):
8533                                 if x == "-*":
8534                                         inc_pgroups.clear()
8535                                 else:
8536                                         inc_pgroups.discard(x[1:])
8537                         else:
8538                                 inc_pgroups.add(x)
8539                 pgroups = inc_pgroups
8540                 del inc_pgroups
8541
8542         kmask = "missing"
8543
8544         if '**' in pgroups:
8545                 kmask = None
8546         else:
8547                 for keyword in pgroups:
8548                         if keyword in mygroups:
8549                                 kmask = None
8550                                 break
8551
8552         if kmask:
8553                 fallback = None
8554                 for gp in mygroups:
8555                         if gp=="*":
8556                                 kmask=None
8557                                 break
8558                         elif gp=="-"+myarch and myarch in pgroups:
8559                                 kmask="-"+myarch
8560                                 break
8561                         elif gp=="~"+myarch and myarch in pgroups:
8562                                 kmask="~"+myarch
8563                                 break
8564
8565         try:
8566                 missing_licenses = settings._getMissingLicenses(mycpv, metadata)
8567                 if missing_licenses:
8568                         allowed_tokens = set(["||", "(", ")"])
8569                         allowed_tokens.update(missing_licenses)
8570                         license_split = licenses.split()
8571                         license_split = [x for x in license_split \
8572                                 if x in allowed_tokens]
8573                         msg = license_split[:]
8574                         msg.append("license(s)")
8575                         rValue.append(" ".join(msg))
8576         except portage.exception.InvalidDependString as e:
8577                 rValue.append("LICENSE: "+str(e))
8578
8579         try:
8580                 missing_properties = settings._getMissingProperties(mycpv, metadata)
8581                 if missing_properties:
8582                         allowed_tokens = set(["||", "(", ")"])
8583                         allowed_tokens.update(missing_properties)
8584                         properties_split = properties.split()
8585                         properties_split = [x for x in properties_split \
8586                                         if x in allowed_tokens]
8587                         msg = properties_split[:]
8588                         msg.append("properties")
8589                         rValue.append(" ".join(msg))
8590         except portage.exception.InvalidDependString as e:
8591                 rValue.append("PROPERTIES: "+str(e))
8592
8593         # Only show KEYWORDS masks for installed packages
8594         # if they're not masked for any other reason.
8595         if kmask and (not installed or not rValue):
8596                 rValue.append(kmask+" keyword")
8597
8598         return rValue
8599
8600 auxdbkeys=[
8601   'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
8602         'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
8603         'KEYWORDS',  'INHERITED', 'IUSE', 'UNUSED_00',
8604         'PDEPEND',   'PROVIDE', 'EAPI',
8605         'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
8606         'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
8607         ]
8608 auxdbkeylen=len(auxdbkeys)
8609
8610 from portage.dbapi import dbapi
8611 from portage.dbapi.virtual import fakedbapi
8612 from portage.dbapi.bintree import bindbapi, binarytree
8613 from portage.dbapi.vartree import vardbapi, vartree, dblink
8614 from portage.dbapi.porttree import close_portdbapi_caches, portdbapi, portagetree
8615
8616 class FetchlistDict(portage.cache.mappings.Mapping):
8617         """This provide a mapping interface to retrieve fetch lists.  It's used
8618         to allow portage.manifest.Manifest to access fetch lists via a standard
8619         mapping interface rather than use the dbapi directly."""
8620         def __init__(self, pkgdir, settings, mydbapi):
8621                 """pkgdir is a directory containing ebuilds and settings is passed into
8622                 portdbapi.getfetchlist for __getitem__ calls."""
8623                 self.pkgdir = pkgdir
8624                 self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
8625                 self.settings = settings
8626                 self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
8627                 self.portdb = mydbapi
8628         def __getitem__(self, pkg_key):
8629                 """Returns the complete fetch list for a given package."""
8630                 return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
8631         def __contains__(self, cpv):
8632                 return cpv in self.__iter__()
8633         def has_key(self, pkg_key):
8634                 """Returns true if the given package exists within pkgdir."""
8635                 return pkg_key in self
8636
8637         def __iter__(self):
8638                 return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
8639
8640         def __len__(self):
8641                 """This needs to be implemented in order to avoid
8642                 infinite recursion in some cases."""
8643                 return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
8644
8645         def keys(self):
8646                 """Returns keys for all packages within pkgdir"""
8647                 return self.portdb.cp_list(self.cp, mytree=self.mytree)
8648
8649         if sys.hexversion >= 0x3000000:
8650                 keys = __iter__
8651
8652 def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
8653         vartree=None, prev_mtimes=None, blockers=None):
8654         """will merge a .tbz2 file, returning a list of runtime dependencies
8655                 that must be satisfied, or None if there was a merge error.     This
8656                 code assumes the package exists."""
8657         global db
8658         if mydbapi is None:
8659                 mydbapi = db[myroot]["bintree"].dbapi
8660         if vartree is None:
8661                 vartree = db[myroot]["vartree"]
8662         if mytbz2[-5:]!=".tbz2":
8663                 print(_("!!! Not a .tbz2 file"))
8664                 return 1
8665
8666         tbz2_lock = None
8667         mycat = None
8668         mypkg = None
8669         did_merge_phase = False
8670         success = False
8671         try:
8672                 """ Don't lock the tbz2 file because the filesytem could be readonly or
8673                 shared by a cluster."""
8674                 #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)
8675
8676                 mypkg = os.path.basename(mytbz2)[:-5]
8677                 xptbz2 = portage.xpak.tbz2(mytbz2)
8678                 mycat = xptbz2.getfile(_unicode_encode("CATEGORY",
8679                         encoding=_encodings['repo.content']))
8680                 if not mycat:
8681                         writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"),
8682                                 noiselevel=-1)
8683                         return 1
8684                 mycat = _unicode_decode(mycat,
8685                         encoding=_encodings['repo.content'], errors='replace')
8686                 mycat = mycat.strip()
8687
8688                 # These are the same directories that would be used at build time.
8689                 builddir = os.path.join(
8690                         mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
8691                 catdir = os.path.dirname(builddir)
8692                 pkgloc = os.path.join(builddir, "image")
8693                 infloc = os.path.join(builddir, "build-info")
8694                 myebuild = os.path.join(
8695                         infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
8696                 portage.util.ensure_dirs(os.path.dirname(catdir),
8697                         uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8698                 catdir_lock = portage.locks.lockdir(catdir)
8699                 portage.util.ensure_dirs(catdir,
8700                         uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
8701                 try:
8702                         shutil.rmtree(builddir)
8703                 except (IOError, OSError) as e:
8704                         if e.errno != errno.ENOENT:
8705                                 raise
8706                         del e
8707                 for mydir in (builddir, pkgloc, infloc):
8708                         portage.util.ensure_dirs(mydir, uid=portage_uid,
8709                                 gid=portage_gid, mode=0o755)
8710                 writemsg_stdout(_(">>> Extracting info\n"))
8711                 xptbz2.unpackinfo(infloc)
8712                 mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
8713                 # Store the md5sum in the vdb.
8714                 fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
8715                 fp.write(str(portage.checksum.perform_md5(mytbz2))+"\n")
8716                 fp.close()
8717
8718                 # This gives bashrc users an opportunity to do various things
8719                 # such as remove binary packages after they're installed.
8720                 mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
8721                 mysettings.backup_changes("PORTAGE_BINPKG_FILE")
8722                 debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
8723
8724                 # Eventually we'd like to pass in the saved ebuild env here.
8725                 retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
8726                         tree="bintree", mydbapi=mydbapi, vartree=vartree)
8727                 if retval != os.EX_OK:
8728                         writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
8729                         return retval
8730
8731                 writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
8732                 retval = portage.process.spawn_bash(
8733                         "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
8734                         env=mysettings.environ())
8735                 if retval != os.EX_OK:
8736                         writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
8737                         return retval
8738                 #portage.locks.unlockfile(tbz2_lock)
8739                 #tbz2_lock = None
8740
8741                 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
8742                         treetype="bintree", blockers=blockers)
8743                 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
8744                         mydbapi=mydbapi, prev_mtimes=prev_mtimes)
8745                 did_merge_phase = True
8746                 success = retval == os.EX_OK
8747                 return retval
8748         finally:
8749                 mysettings.pop("PORTAGE_BINPKG_FILE", None)
8750                 if tbz2_lock:
8751                         portage.locks.unlockfile(tbz2_lock)
8752                 if True:
8753                         if not did_merge_phase:
8754                                 # The merge phase handles this already.  Callers don't know how
8755                                 # far this function got, so we have to call elog_process() here
8756                                 # so that it's only called once.
8757                                 from portage.elog import elog_process
8758                                 elog_process(mycat + "/" + mypkg, mysettings)
8759                         try:
8760                                 if success:
8761                                         shutil.rmtree(builddir)
8762                         except (IOError, OSError) as e:
8763                                 if e.errno != errno.ENOENT:
8764                                         raise
8765                                 del e
8766
8767 def deprecated_profile_check(settings=None):
8768         config_root = "/"
8769         if settings is not None:
8770                 config_root = settings["PORTAGE_CONFIGROOT"]
8771         deprecated_profile_file = os.path.join(config_root,
8772                 DEPRECATED_PROFILE_FILE)
8773         if not os.access(deprecated_profile_file, os.R_OK):
8774                 return False
8775         dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
8776                 encoding=_encodings['fs'], errors='strict'), 
8777                 mode='r', encoding=_encodings['content'], errors='replace').readlines()
8778         writemsg(colorize("BAD", _("\n!!! Your current profile is "
8779                 "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
8780         writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
8781                 "profile.")) + "\n", noiselevel=-1)
8782         if not dcontent:
8783                 writemsg(colorize("BAD", _("!!! Please refer to the "
8784                         "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
8785                 return True
8786         newprofile = dcontent[0]
8787         writemsg(colorize("BAD", _("!!! Please upgrade to the "
8788                 "following profile if possible:")) + "\n", noiselevel=-1)
8789         writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
8790         if len(dcontent) > 1:
8791                 writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
8792                 for myline in dcontent[1:]:
8793                         writemsg(myline, noiselevel=-1)
8794                 writemsg("\n\n", noiselevel=-1)
8795         return True
8796
8797 # gets virtual package settings
8798 def getvirtuals(myroot):
8799         global settings
8800         writemsg("--- DEPRECATED call to getvirtual\n")
8801         return settings.getvirtuals(myroot)
8802
8803 def commit_mtimedb(mydict=None, filename=None):
8804         if mydict is None:
8805                 global mtimedb
8806                 if "mtimedb" not in globals() or mtimedb is None:
8807                         return
8808                 mtimedb.commit()
8809                 return
8810         if filename is None:
8811                 global mtimedbfile
8812                 filename = mtimedbfile
8813         mydict["version"] = VERSION
8814         d = {} # for full backward compat, pickle it as a plain dict object.
8815         d.update(mydict)
8816         try:
8817                 f = atomic_ofstream(filename, mode='wb')
8818                 pickle.dump(d, f, protocol=2)
8819                 f.close()
8820                 portage.util.apply_secpass_permissions(filename,
8821                         uid=uid, gid=portage_gid, mode=0o644)
8822         except (IOError, OSError) as e:
8823                 pass
8824
8825 def portageexit():
8826         global uid,portage_gid,portdb,db
8827         if secpass and os.environ.get("SANDBOX_ON") != "1":
8828                 close_portdbapi_caches()
8829                 commit_mtimedb()
8830
8831 atexit_register(portageexit)
8832
8833 def _global_updates(trees, prev_mtimes):
8834         """
8835         Perform new global updates if they exist in $PORTDIR/profiles/updates/.
8836
8837         @param trees: A dictionary containing portage trees.
8838         @type trees: dict
8839         @param prev_mtimes: A dictionary containing mtimes of files located in
8840                 $PORTDIR/profiles/updates/.
8841         @type prev_mtimes: dict
8842         @rtype: None or List
8843         @return: None if no were no updates, otherwise a list of update commands
8844                 that have been performed.
8845         """
8846         # only do this if we're root and not running repoman/ebuild digest
8847         global secpass
8848         if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
8849                 return
8850         root = "/"
8851         mysettings = trees["/"]["vartree"].settings
8852         updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
8853
8854         try:
8855                 if mysettings["PORTAGE_CALLER"] == "fixpackages":
8856                         update_data = grab_updates(updpath)
8857                 else:
8858                         update_data = grab_updates(updpath, prev_mtimes)
8859         except portage.exception.DirectoryNotFound:
8860                 writemsg(_("--- 'profiles/updates' is empty or "
8861                         "not available. Empty portage tree?\n"), noiselevel=1)
8862                 return
8863         myupd = None
8864         if len(update_data) > 0:
8865                 do_upgrade_packagesmessage = 0
8866                 myupd = []
8867                 timestamps = {}
8868                 for mykey, mystat, mycontent in update_data:
8869                         writemsg_stdout("\n\n")
8870                         writemsg_stdout(colorize("GOOD",
8871                                 _("Performing Global Updates: "))+bold(mykey)+"\n")
8872                         writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
8873                         writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
8874                                 "%s='/var/db update'  %s='/var/db move'\n"
8875                                 "  %s='/var/db SLOT move'  %s='binary move'  "
8876                                 "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
8877                                 (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
8878                         valid_updates, errors = parse_updates(mycontent)
8879                         myupd.extend(valid_updates)
8880                         writemsg_stdout(len(valid_updates) * "." + "\n")
8881                         if len(errors) == 0:
8882                                 # Update our internal mtime since we
8883                                 # processed all of our directives.
8884                                 timestamps[mykey] = long(mystat.st_mtime)
8885                         else:
8886                                 for msg in errors:
8887                                         writemsg("%s\n" % msg, noiselevel=-1)
8888
8889                 world_file = os.path.join(root, WORLD_FILE)
8890                 world_list = grabfile(world_file)
8891                 world_modified = False
8892                 for update_cmd in myupd:
8893                         for pos, atom in enumerate(world_list):
8894                                 new_atom = update_dbentry(update_cmd, atom)
8895                                 if atom != new_atom:
8896                                         world_list[pos] = new_atom
8897                                         world_modified = True
8898                 if world_modified:
8899                         world_list.sort()
8900                         write_atomic(world_file,
8901                                 "".join("%s\n" % (x,) for x in world_list))
8902
8903                 update_config_files("/",
8904                         mysettings.get("CONFIG_PROTECT","").split(),
8905                         mysettings.get("CONFIG_PROTECT_MASK","").split(),
8906                         myupd)
8907
8908                 trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
8909                         settings=mysettings)
8910                 vardb = trees["/"]["vartree"].dbapi
8911                 bindb = trees["/"]["bintree"].dbapi
8912                 if not os.access(bindb.bintree.pkgdir, os.W_OK):
8913                         bindb = None
8914                 for update_cmd in myupd:
8915                         if update_cmd[0] == "move":
8916                                 moves = vardb.move_ent(update_cmd)
8917                                 if moves:
8918                                         writemsg_stdout(moves * "@")
8919                                 if bindb:
8920                                         moves = bindb.move_ent(update_cmd)
8921                                         if moves:
8922                                                 writemsg_stdout(moves * "%")
8923                         elif update_cmd[0] == "slotmove":
8924                                 moves = vardb.move_slot_ent(update_cmd)
8925                                 if moves:
8926                                         writemsg_stdout(moves * "s")
8927                                 if bindb:
8928                                         moves = bindb.move_slot_ent(update_cmd)
8929                                         if moves:
8930                                                 writemsg_stdout(moves * "S")
8931
8932                 # The above global updates proceed quickly, so they
8933                 # are considered a single mtimedb transaction.
8934                 if len(timestamps) > 0:
8935                         # We do not update the mtime in the mtimedb
8936                         # until after _all_ of the above updates have
8937                         # been processed because the mtimedb will
8938                         # automatically commit when killed by ctrl C.
8939                         for mykey, mtime in timestamps.items():
8940                                 prev_mtimes[mykey] = mtime
8941
8942                 # We gotta do the brute force updates for these now.
8943                 if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
8944                 "fixpackages" in mysettings.features:
8945                         def onUpdate(maxval, curval):
8946                                 if curval > 0:
8947                                         writemsg_stdout("#")
8948                         vardb.update_ents(myupd, onUpdate=onUpdate)
8949                         if bindb:
8950                                 def onUpdate(maxval, curval):
8951                                         if curval > 0:
8952                                                 writemsg_stdout("*")
8953                                 bindb.update_ents(myupd, onUpdate=onUpdate)
8954                 else:
8955                         do_upgrade_packagesmessage = 1
8956
8957                 # Update progress above is indicated by characters written to stdout so
8958                 # we print a couple new lines here to separate the progress output from
8959                 # what follows.
8960                 print()
8961                 print()
8962
8963                 if do_upgrade_packagesmessage and bindb and \
8964                         bindb.cpv_all():
8965                         writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
8966                         writemsg_stdout(bold(_("Note: This can take a very long time.")))
8967                         writemsg_stdout("\n")
8968         if myupd:
8969                 return myupd
8970
8971 #continue setting up other trees
8972
8973 class MtimeDB(dict):
8974         def __init__(self, filename):
8975                 dict.__init__(self)
8976                 self.filename = filename
8977                 self._load(filename)
8978
8979         def _load(self, filename):
8980                 try:
8981                         f = open(_unicode_encode(filename), 'rb')
8982                         mypickle = pickle.Unpickler(f)
8983                         try:
8984                                 mypickle.find_global = None
8985                         except AttributeError:
8986                                 # TODO: If py3k, override Unpickler.find_class().
8987                                 pass
8988                         d = mypickle.load()
8989                         f.close()
8990                         del f
8991                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
8992                         if isinstance(e, pickle.UnpicklingError):
8993                                 writemsg(_("!!! Error loading '%s': %s\n") % \
8994                                         (filename, str(e)), noiselevel=-1)
8995                         del e
8996                         d = {}
8997
8998                 if "old" in d:
8999                         d["updates"] = d["old"]
9000                         del d["old"]
9001                 if "cur" in d:
9002                         del d["cur"]
9003
9004                 d.setdefault("starttime", 0)
9005                 d.setdefault("version", "")
9006                 for k in ("info", "ldpath", "updates"):
9007                         d.setdefault(k, {})
9008
9009                 mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
9010                         "starttime", "updates", "version"))
9011
9012                 for k in list(d):
9013                         if k not in mtimedbkeys:
9014                                 writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
9015                                 del d[k]
9016                 self.update(d)
9017                 self._clean_data = copy.deepcopy(d)
9018
9019         def commit(self):
9020                 if not self.filename:
9021                         return
9022                 d = {}
9023                 d.update(self)
9024                 # Only commit if the internal state has changed.
9025                 if d != self._clean_data:
9026                         commit_mtimedb(mydict=d, filename=self.filename)
9027                         self._clean_data = copy.deepcopy(d)
9028
9029 def create_trees(config_root=None, target_root=None, trees=None):
9030         if trees is None:
9031                 trees = {}
9032         else:
9033                 # clean up any existing portdbapi instances
9034                 for myroot in trees:
9035                         portdb = trees[myroot]["porttree"].dbapi
9036                         portdb.close_caches()
9037                         portdbapi.portdbapi_instances.remove(portdb)
9038                         del trees[myroot]["porttree"], myroot, portdb
9039
9040         settings = config(config_root=config_root, target_root=target_root,
9041                 config_incrementals=portage.const.INCREMENTALS)
9042         settings.lock()
9043
9044         myroots = [(settings["ROOT"], settings)]
9045         if settings["ROOT"] != "/":
9046
9047                 # When ROOT != "/" we only want overrides from the calling
9048                 # environment to apply to the config that's associated
9049                 # with ROOT != "/", so pass an empty dict for the env parameter.
9050                 settings = config(config_root=None, target_root="/", env={})
9051                 settings.lock()
9052                 myroots.append((settings["ROOT"], settings))
9053
9054         for myroot, mysettings in myroots:
9055                 trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
9056                 trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
9057                 trees[myroot].addLazySingleton(
9058                         "vartree", vartree, myroot, categories=mysettings.categories,
9059                                 settings=mysettings)
9060                 trees[myroot].addLazySingleton("porttree",
9061                         portagetree, myroot, settings=mysettings)
9062                 trees[myroot].addLazySingleton("bintree",
9063                         binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
9064         return trees
9065
9066 class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
9067         """
9068         Instances of these serve as proxies to global variables
9069         that are initialized on demand.
9070         """
9071
9072         __slots__ = ('_name',)
9073
9074         def __init__(self, name):
9075                 proxy.objectproxy.ObjectProxy.__init__(self)
9076                 object.__setattr__(self, '_name', name)
9077
9078         def _get_target(self):
9079                 init_legacy_globals()
9080                 name = object.__getattribute__(self, '_name')
9081                 return globals()[name]
9082
9083 class _PortdbProxy(proxy.objectproxy.ObjectProxy):
9084         """
9085         The portdb is initialized separately from the rest
9086         of the variables, since sometimes the other variables
9087         are needed while the portdb is not.
9088         """
9089
9090         __slots__ = ()
9091
9092         def _get_target(self):
9093                 init_legacy_globals()
9094                 global db, portdb, root, _portdb_initialized
9095                 if not _portdb_initialized:
9096                         portdb = db[root]["porttree"].dbapi
9097                         _portdb_initialized = True
9098                 return portdb
9099
9100 class _MtimedbProxy(proxy.objectproxy.ObjectProxy):
9101         """
9102         The mtimedb is independent from the portdb and other globals.
9103         """
9104
9105         __slots__ = ('_name',)
9106
9107         def __init__(self, name):
9108                 proxy.objectproxy.ObjectProxy.__init__(self)
9109                 object.__setattr__(self, '_name', name)
9110
9111         def _get_target(self):
9112                 global mtimedb, mtimedbfile, _mtimedb_initialized
9113                 if not _mtimedb_initialized:
9114                         mtimedbfile = os.path.join(os.path.sep,
9115                                 CACHE_PATH, "mtimedb")
9116                         mtimedb = MtimeDB(mtimedbfile)
9117                         _mtimedb_initialized = True
9118                 name = object.__getattribute__(self, '_name')
9119                 return globals()[name]
9120
9121 _legacy_global_var_names = ("archlist", "db", "features",
9122         "groups", "mtimedb", "mtimedbfile", "pkglines",
9123         "portdb", "profiledir", "root", "selinux_enabled",
9124         "settings", "thirdpartymirrors", "usedefaults")
9125
9126 def _disable_legacy_globals():
9127         """
9128         This deletes the ObjectProxy instances that are used
9129         for lazy initialization of legacy global variables.
9130         The purpose of deleting them is to prevent new code
9131         from referencing these deprecated variables.
9132         """
9133         global _legacy_global_var_names
9134         for k in _legacy_global_var_names:
9135                 globals().pop(k, None)
9136
9137 # Initialization of legacy globals.  No functions/classes below this point
9138 # please!  When the above functions and classes become independent of the
9139 # below global variables, it will be possible to make the below code
9140 # conditional on a backward compatibility flag (backward compatibility could
9141 # be disabled via an environment variable, for example).  This will enable new
9142 # code that is aware of this flag to import portage without the unnecessary
9143 # overhead (and other issues!) of initializing the legacy globals.
9144
9145 def init_legacy_globals():
9146         global _globals_initialized
9147         if _globals_initialized:
9148                 return
9149         _globals_initialized = True
9150
9151         global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
9152         archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
9153         profiledir, flushmtimedb
9154
9155         # Portage needs to ensure a sane umask for the files it creates.
9156         os.umask(0o22)
9157
9158         kwargs = {}
9159         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
9160                 kwargs[k] = os.environ.get(envvar, "/")
9161
9162         global _initializing_globals
9163         _initializing_globals = True
9164         db = create_trees(**kwargs)
9165         del _initializing_globals
9166
9167         settings = db["/"]["vartree"].settings
9168
9169         for myroot in db:
9170                 if myroot != "/":
9171                         settings = db[myroot]["vartree"].settings
9172                         break
9173
9174         root = settings["ROOT"]
9175         output._init(config_root=settings['PORTAGE_CONFIGROOT'])
9176
9177         # ========================================================================
9178         # COMPATIBILITY
9179         # These attributes should not be used
9180         # within Portage under any circumstances.
9181         # ========================================================================
9182         archlist    = settings.archlist()
9183         features    = settings.features
9184         groups      = settings["ACCEPT_KEYWORDS"].split()
9185         pkglines    = settings.packages
9186         selinux_enabled   = settings.selinux_enabled()
9187         thirdpartymirrors = settings.thirdpartymirrors()
9188         usedefaults       = settings.use_defs
9189         profiledir  = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
9190         if not os.path.isdir(profiledir):
9191                 profiledir = None
9192         def flushmtimedb(record):
9193                 writemsg("portage.flushmtimedb() is DEPRECATED\n")
9194         # ========================================================================
9195         # COMPATIBILITY
9196         # These attributes should not be used
9197         # within Portage under any circumstances.
9198         # ========================================================================
9199
9200 if True:
9201
9202         _mtimedb_initialized = False
9203         mtimedb     = _MtimedbProxy("mtimedb")
9204         mtimedbfile = _MtimedbProxy("mtimedbfile")
9205
9206         _portdb_initialized  = False
9207         portdb = _PortdbProxy()
9208
9209         _globals_initialized = False
9210
9211         for k in ("db", "settings", "root", "selinux_enabled",
9212                 "archlist", "features", "groups",
9213                 "pkglines", "thirdpartymirrors", "usedefaults", "profiledir",
9214                 "flushmtimedb"):
9215                 globals()[k] = _LegacyGlobalProxy(k)
9216
9217 # Clear the cache
9218 dircache={}
9219
9220 # ============================================================================
9221 # ============================================================================
9222