1 # Copyright 2004-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 __all__ = ['apply_permissions', 'apply_recursive_permissions',
5 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
6 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
7 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
8 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
9 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
10 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
11 'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
12 'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
14 from copy import deepcopy
18 from itertools import filterfalse
20 from itertools import ifilterfalse as filterfalse
31 portage.proxy.lazyimport.lazyimport(globals(),
34 'portage.util.listdir:_ignorecvs_dirs'
37 from portage import os
38 from portage import subprocess_getstatusoutput
39 from portage import _encodings
40 from portage import _os_merge
41 from portage import _unicode_encode
42 from portage import _unicode_decode
43 from portage.exception import InvalidAtom, PortageException, FileNotFound, \
44 OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
45 from portage.localization import _
46 from portage.proxy.objectproxy import ObjectProxy
47 from portage.cache.mappings import UserDict
51 def initialize_logger(level=logging.WARN):
52 """Sets up basic logging of portage activities
54 level: the level to emit messages at ('info', 'debug', 'warning' ...)
58 logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
60 def writemsg(mystr,noiselevel=0,fd=None):
61 """Prints out warning and debug messages based on the noiselimit setting"""
65 if noiselevel <= noiselimit:
66 # avoid potential UnicodeEncodeError
67 if isinstance(fd, io.StringIO):
68 mystr = _unicode_decode(mystr,
69 encoding=_encodings['content'], errors='replace')
71 mystr = _unicode_encode(mystr,
72 encoding=_encodings['stdio'], errors='backslashreplace')
73 if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
78 def writemsg_stdout(mystr,noiselevel=0):
79 """Prints messages stdout based on the noiselimit setting"""
80 writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
82 def writemsg_level(msg, level=0, noiselevel=0):
84 Show a message for the given level as defined by the logging module
85 (default is 0). When level >= logging.WARNING then the message is
86 sent to stderr, otherwise it is sent to stdout. The noiselevel is
87 passed directly to writemsg().
90 @param msg: a message string, including newline if appropriate
92 @param level: a numeric logging level (see the logging module)
94 @param noiselevel: passed directly to writemsg
96 if level >= logging.WARNING:
100 writemsg(msg, noiselevel=noiselevel, fd=fd)
102 def normalize_path(mypath):
104 os.path.normpath("//foo") returns "//foo" instead of "/foo"
105 We dislike this behavior so we create our own normpath func
108 if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
109 path_sep = os.path.sep.encode()
111 path_sep = os.path.sep
113 if mypath.startswith(path_sep):
114 # posixpath.normpath collapses 3 or more leading slashes to just 1.
115 return os.path.normpath(2*path_sep + mypath)
117 return os.path.normpath(mypath)
119 def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
120 """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
121 begins with a #, it is ignored, as are empty lines"""
123 mylines=grablines(myfilename, recursive, remember_source_file=True)
126 for x, source_file in mylines:
127 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
130 if x and x[0] != "#":
134 mylinetemp.append(item)
139 myline = " ".join(myline)
143 # Check if we have a compat-level string. BC-integration data.
144 # '##COMPAT==>N<==' 'some string attached to it'
145 mylinetest = myline.split("<==",1)
146 if len(mylinetest) == 2:
147 myline_potential = mylinetest[1]
148 mylinetest = mylinetest[0].split("##COMPAT==>")
149 if len(mylinetest) == 2:
150 if compat_level >= int(mylinetest[1]):
151 # It's a compat line, and the key matches.
152 newlines.append(myline_potential)
156 if remember_source_file:
157 newlines.append((myline, source_file))
159 newlines.append(myline)
162 def map_dictlist_vals(func,myDict):
163 """Performs a function on each value of each key in a dictlist.
164 Returns a new dictlist."""
168 new_dl[key] = [func(x) for x in myDict[key]]
171 def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
173 Stacks an array of dict-types into one array. Optionally merging or
174 overwriting matching key/value pairs for the dict[key]->list.
175 Returns a single dict. Higher index in lists is preferenced.
178 >>> from portage.util import stack_dictlist
179 >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
180 >>> {'a':'b','x':'y'}
181 >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
183 >>> a = {'KEYWORDS':['x86','alpha']}
184 >>> b = {'KEYWORDS':['-x86']}
185 >>> print stack_dictlist( [a,b] )
186 >>> { 'KEYWORDS':['x86','alpha','-x86']}
187 >>> print stack_dictlist( [a,b], incremental=True)
188 >>> { 'KEYWORDS':['alpha'] }
189 >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
190 >>> { 'KEYWORDS':['alpha'] }
192 @param original_dicts a list of (dictionary objects or None)
194 @param incremental True or false depending on whether new keys should overwrite
195 keys which already exist.
197 @param incrementals A list of items that should be incremental (-foo removes foo from
200 @param ignore_none Appears to be ignored, but probably was used long long ago.
205 for mydict in original_dicts:
209 if not y in final_dict:
212 for thing in mydict[y]:
214 if incremental or y in incrementals:
218 elif thing[:1] == '-':
220 final_dict[y].remove(thing[1:])
224 if thing not in final_dict[y]:
225 final_dict[y].append(thing)
226 if y in final_dict and not final_dict[y]:
230 def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
231 """Stacks an array of dict-types into one array. Optionally merging or
232 overwriting matching key/value pairs for the dict[key]->string.
233 Returns a single dict."""
238 for k, v in mydict.items():
239 if k in final_dict and (incremental or (k in incrementals)):
240 final_dict[k] += " " + v
245 def append_repo(atom_list, repo_name, remember_source_file=False):
247 Takes a list of valid atoms without repo spec and appends ::repo_name.
249 if remember_source_file:
250 return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
251 for atom, source in atom_list]
253 return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
254 for atom in atom_list]
256 def stack_lists(lists, incremental=1, remember_source_file=False,
257 warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
258 """Stacks an array of list-types into one array. Optionally removing
259 distinct values using '-value' notation. Higher index is preferenced.
261 all elements must be hashable."""
262 matched_removals = set()
263 unmatched_removals = {}
265 for sub_list in lists:
266 for token in sub_list:
268 if remember_source_file:
269 token, source_file = token
279 elif token[:1] == '-':
281 if ignore_repo and not "::" in token:
282 #Let -cat/pkg remove cat/pkg::repo.
284 token_slice = token[1:]
285 for atom in new_list:
286 atom_without_repo = atom
287 if atom.repo is not None:
288 # Atom.without_repo instantiates a new Atom,
289 # which is unnecessary here, so use string
290 # replacement instead.
291 atom_without_repo = \
292 atom.replace("::" + atom.repo, "", 1)
293 if atom_without_repo == token_slice:
294 to_be_removed.append(atom)
297 for atom in to_be_removed:
301 new_list.pop(token[1:])
308 (strict_warn_for_unmatched_removal or \
309 token_key not in matched_removals):
310 unmatched_removals.setdefault(source_file, set()).add(token)
312 matched_removals.add(token_key)
314 new_list[token] = source_file
316 new_list[token] = source_file
318 if warn_for_unmatched_removal:
319 for source_file, tokens in unmatched_removals.items():
321 selected = [tokens.pop(), tokens.pop(), tokens.pop()]
322 writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \
323 (source_file, ", ".join(selected), len(tokens)),
326 writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
329 if remember_source_file:
330 return list(new_list.items())
332 return list(new_list)
334 def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
336 This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
338 @param myfilename: file to process
339 @type myfilename: string (path)
340 @param juststrings: only return strings
341 @type juststrings: Boolean (integer)
342 @param empty: Ignore certain lines
343 @type empty: Boolean (integer)
344 @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
345 @type recursive: Boolean (integer)
346 @param incremental: Append to the return list, don't overwrite
347 @type incremental: Boolean (integer)
350 1. Returns the lines in a file in a dictionary, for example:
351 'sys-apps/portage x86 amd64 ppc'
353 { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
354 the line syntax is key : [list of values]
357 for x in grablines(myfilename, recursive):
358 #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
366 mylinetemp.append(item)
370 if len(myline) < 2 and empty == 0:
372 if len(myline) < 1 and empty == 1:
375 newdict.setdefault(myline[0], []).extend(myline[1:])
377 newdict[myline[0]] = myline[1:]
379 for k, v in newdict.items():
380 newdict[k] = " ".join(v)
383 def read_corresponding_eapi_file(filename):
385 Read the 'eapi' file from the directory 'filename' is in.
386 Returns "0" if the file is not present or invalid.
389 eapi_file = os.path.join(os.path.dirname(filename), "eapi")
391 f = open(eapi_file, "r")
392 lines = f.readlines()
394 eapi = lines[0].rstrip("\n")
396 writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
405 def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
406 verify_eapi=False, eapi=None):
407 """ Does the same thing as grabdict except it validates keys
408 with isvalidatom()"""
409 pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
412 if verify_eapi and eapi is None:
413 eapi = read_corresponding_eapi_file(myfilename)
415 # We need to call keys() here in order to avoid the possibility of
416 # "RuntimeError: dictionary changed size during iteration"
417 # when an invalid atom is deleted.
419 for k, v in pkgs.items():
421 k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
422 except InvalidAtom as e:
423 writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
429 def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
430 remember_source_file=False, verify_eapi=False, eapi=None):
432 pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
435 if verify_eapi and eapi is None:
436 eapi = read_corresponding_eapi_file(myfilename)
437 mybasename = os.path.basename(myfilename)
439 for pkg, source_file in pkgs:
441 # for packages and package.mask files
444 if pkg[:1] == '*' and mybasename == 'packages':
447 pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
448 except InvalidAtom as e:
449 writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
452 if pkg_orig == str(pkg):
453 # normal atom, so return as Atom instance
454 if remember_source_file:
455 atoms.append((pkg, source_file))
459 # atom has special prefix, so return as string
460 if remember_source_file:
461 atoms.append((pkg_orig, source_file))
463 atoms.append(pkg_orig)
466 def grablines(myfilename, recursive=0, remember_source_file=False):
468 if recursive and os.path.isdir(myfilename):
469 if os.path.basename(myfilename) in _ignorecvs_dirs:
471 dirlist = os.listdir(myfilename)
474 if not f.startswith(".") and not f.endswith("~"):
475 mylines.extend(grablines(
476 os.path.join(myfilename, f), recursive, remember_source_file))
479 myfile = io.open(_unicode_encode(myfilename,
480 encoding=_encodings['fs'], errors='strict'),
481 mode='r', encoding=_encodings['content'], errors='replace')
482 if remember_source_file:
483 mylines = [(line, myfilename) for line in myfile.readlines()]
485 mylines = myfile.readlines()
488 if e.errno == PermissionDenied.errno:
489 raise PermissionDenied(myfilename)
493 def writedict(mydict,myfilename,writekey=True):
494 """Writes out a dict to a file; writekey=0 mode doesn't write out
495 the key and assumes all values are strings, not lists."""
498 for v in mydict.values():
499 lines.append(v + "\n")
501 for k, v in mydict.items():
502 lines.append("%s %s\n" % (k, " ".join(v)))
503 write_atomic(myfilename, "".join(lines))
507 This is equivalent to shlex.split, but if the current interpreter is
508 python2, it temporarily encodes unicode strings to bytes since python2's
509 shlex.split() doesn't handle unicode strings.
511 convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes)
513 s = _unicode_encode(s)
514 rval = shlex.split(s)
516 rval = [_unicode_decode(x) for x in rval]
519 class _tolerant_shlex(shlex.shlex):
520 def sourcehook(self, newfile):
522 return shlex.shlex.sourcehook(self, newfile)
523 except EnvironmentError as e:
524 writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
525 (self.infile, str(e)), noiselevel=-1)
526 return (newfile, io.StringIO())
528 _invalid_var_name_re = re.compile(r'^\d|\W')
530 def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
531 if isinstance(expand, dict):
532 # Some existing variable definitions have been
533 # passed in, for use in substitutions.
541 # NOTE: shlex doesn't support unicode objects with Python 2
542 # (produces spurious \0 characters).
543 if sys.hexversion < 0x3000000:
544 f = open(_unicode_encode(mycfg,
545 encoding=_encodings['fs'], errors='strict'), 'rb')
547 f = open(_unicode_encode(mycfg,
548 encoding=_encodings['fs'], errors='strict'), mode='r',
549 encoding=_encodings['content'], errors='replace')
552 if e.errno == PermissionDenied.errno:
553 raise PermissionDenied(mycfg)
554 if e.errno != errno.ENOENT:
555 writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
556 if e.errno not in (errno.EISDIR,):
563 # Workaround for avoiding a silent error in shlex that is
564 # triggered by a source statement at the end of the file
565 # without a trailing newline after the source statement.
566 if content and content[-1] != '\n':
569 # Warn about dos-style line endings since that prevents
570 # people from being able to source them with bash.
572 writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
573 "in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
577 shlex_class = _tolerant_shlex
579 shlex_class = shlex.shlex
580 # The default shlex.sourcehook() implementation
581 # only joins relative paths when the infile
582 # attribute is properly set.
583 lex = shlex_class(content, infile=mycfg, posix=True)
584 lex.wordchars = string.digits + string.ascii_letters + \
585 "~!@#$%*_\:;?,./-+{}"
592 key = lex.get_token()
598 #unexpected end of file
599 #lex.error_leader(self.filename,lex.lineno)
601 writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
603 raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
608 #lex.error_leader(self.filename,lex.lineno)
610 raise Exception(_("ParseError: Invalid token "
611 "'%s' (not '='): %s: line %s") % \
612 (equ, mycfg, lex.lineno))
617 #unexpected end of file
618 #lex.error_leader(self.filename,lex.lineno)
620 writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
622 raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
625 key = _unicode_decode(key)
626 val = _unicode_decode(val)
628 if _invalid_var_name_re.search(key) is not None:
631 "ParseError: Invalid variable name '%s': line %s") % \
632 (key, lex.lineno - 1))
633 writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
634 % (key, lex.lineno - 1, mycfg), noiselevel=-1)
638 mykeys[key] = varexpand(val, expand_map)
639 expand_map[key] = mykeys[key]
642 except SystemExit as e:
644 except Exception as e:
645 raise portage.exception.ParseError(str(e)+" in "+mycfg)
648 #cache expansions of constant strings
650 def varexpand(mystring, mydict=None):
653 newstring = cexpand.get(" "+mystring, None)
654 if newstring is not None:
658 new variable expansion code. Preserves quotes, handles \n, etc.
659 This code is used by the configfile code, as well as others (parser)
660 This would be a good bunch of code to port to C.
663 mystring=" "+mystring
664 #in single, double quotes
669 while (pos<len(mystring)):
670 if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
672 newstring=newstring+"'"
674 newstring += "'" # Quote removal is handled by shlex.
678 elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
680 newstring=newstring+'"'
682 newstring += '"' # Quote removal is handled by shlex.
688 if (mystring[pos]=="\n"):
689 #convert newlines to spaces
690 newstring=newstring+" "
692 elif (mystring[pos]=="\\"):
693 # For backslash expansion, this function used to behave like
694 # echo -e, but that's not needed for our purposes. We want to
695 # behave like bash does when expanding a variable assignment
696 # in a sourced file, in which case it performs backslash
697 # removal for \\ and \$ but nothing more. It also removes
698 # escaped newline characters. Note that we don't handle
699 # escaped quotes here, since getconfig() uses shlex
700 # to handle that earlier.
701 if (pos+1>=len(mystring)):
702 newstring=newstring+mystring[pos]
705 a = mystring[pos + 1]
708 newstring = newstring + a
712 newstring = newstring + mystring[pos-2:pos]
714 elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
716 if mystring[pos]=="{":
722 validchars=string.ascii_letters+string.digits+"_"
723 while mystring[pos] in validchars:
724 if (pos+1)>=len(mystring):
732 myvarname=mystring[myvstart:pos]
734 if mystring[pos]!="}":
739 if len(myvarname)==0:
743 if myvarname in mydict:
744 newstring=newstring+mydict[myvarname]
746 newstring=newstring+mystring[pos]
749 newstring=newstring+mystring[pos]
752 cexpand[mystring]=newstring[1:]
755 # broken and removed, but can still be imported
758 def pickle_read(filename,default=None,debug=0):
759 if not os.access(filename, os.R_OK):
760 writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
764 myf = open(_unicode_encode(filename,
765 encoding=_encodings['fs'], errors='strict'), 'rb')
766 mypickle = pickle.Unpickler(myf)
767 data = mypickle.load()
770 writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
771 except SystemExit as e:
773 except Exception as e:
774 writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
778 def dump_traceback(msg, noiselevel=1):
779 info = sys.exc_info()
781 stack = traceback.extract_stack()[:-1]
784 stack = traceback.extract_tb(info[2])
786 writemsg("\n====================================\n", noiselevel=noiselevel)
787 writemsg("%s\n\n" % msg, noiselevel=noiselevel)
788 for line in traceback.format_list(stack):
789 writemsg(line, noiselevel=noiselevel)
791 writemsg(error+"\n", noiselevel=noiselevel)
792 writemsg("====================================\n\n", noiselevel=noiselevel)
794 class cmp_sort_key(object):
796 In python-3.0 the list.sort() method no longer has a "cmp" keyword
797 argument. This class acts as an adapter which converts a cmp function
798 into one that's suitable for use as the "key" keyword argument to
799 list.sort(), making it easier to port code for python-3.0 compatibility.
800 It works by generating key objects which use the given cmp function to
801 implement their __lt__ method.
803 __slots__ = ("_cmp_func",)
805 def __init__(self, cmp_func):
807 @type cmp_func: callable which takes 2 positional arguments
808 @param cmp_func: A cmp function.
810 self._cmp_func = cmp_func
812 def __call__(self, lhs):
813 return self._cmp_key(self._cmp_func, lhs)
815 class _cmp_key(object):
816 __slots__ = ("_cmp_func", "_obj")
818 def __init__(self, cmp_func, obj):
819 self._cmp_func = cmp_func
822 def __lt__(self, other):
823 if other.__class__ is not self.__class__:
824 raise TypeError("Expected type %s, got %s" % \
825 (self.__class__, other.__class__))
826 return self._cmp_func(self._obj, other._obj) < 0
829 """lifted from python cookbook, credit: Tim Peters
830 Return a list of the elements in s in arbitrary order, sans duplicates"""
832 # assume all elements are hashable, if so, it's linear
838 # so much for linear. abuse sort.
850 t[lasti] = last = t[i]
855 # blah. back to original portage.unique_array
862 def unique_everseen(iterable, key=None):
864 List unique elements, preserving order. Remember all elements ever seen.
865 Taken from itertools documentation.
867 # unique_everseen('AAAABBBCCDAABBB') --> A B C D
868 # unique_everseen('ABBCcAD', str.lower) --> A B C D
872 for element in filterfalse(seen.__contains__, iterable):
876 for element in iterable:
882 def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
883 stat_cached=None, follow_links=True):
884 """Apply user, group, and mode bits to a file if the existing bits do not
885 already match. The default behavior is to force an exact match of mode
886 bits. When mask=0 is specified, mode bits on the target file are allowed
887 to be a superset of the mode argument (via logical OR). When mask>0, the
888 mode bits that the target file is allowed to have are restricted via
890 Returns True if the permissions were modified and False otherwise."""
894 if stat_cached is None:
897 stat_cached = os.stat(filename)
899 stat_cached = os.lstat(filename)
900 except OSError as oe:
901 func_call = "stat('%s')" % filename
902 if oe.errno == errno.EPERM:
903 raise OperationNotPermitted(func_call)
904 elif oe.errno == errno.EACCES:
905 raise PermissionDenied(func_call)
906 elif oe.errno == errno.ENOENT:
907 raise FileNotFound(filename)
911 if (uid != -1 and uid != stat_cached.st_uid) or \
912 (gid != -1 and gid != stat_cached.st_gid):
915 os.chown(filename, uid, gid)
917 portage.data.lchown(filename, uid, gid)
919 except OSError as oe:
920 func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
921 if oe.errno == errno.EPERM:
922 raise OperationNotPermitted(func_call)
923 elif oe.errno == errno.EACCES:
924 raise PermissionDenied(func_call)
925 elif oe.errno == errno.EROFS:
926 raise ReadOnlyFileSystem(func_call)
927 elif oe.errno == errno.ENOENT:
928 raise FileNotFound(filename)
933 st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
936 mode = 0 # Don't add any mode bits when mode is unspecified.
939 if (mode & st_mode != mode) or \
940 ((mask ^ st_mode) & st_mode != st_mode):
941 new_mode = mode | st_mode
942 new_mode = (mask ^ new_mode) & new_mode
944 mode = mode & 0o7777 # protect from unwanted bits
948 # The chown system call may clear S_ISUID and S_ISGID
949 # bits, so those bits are restored if necessary.
950 if modified and new_mode == -1 and \
951 (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
957 new_mode = mode | st_mode
958 new_mode = (mask ^ new_mode) & new_mode
961 if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
964 if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
965 # Mode doesn't matter for symlinks.
970 os.chmod(filename, new_mode)
972 except OSError as oe:
973 func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
974 if oe.errno == errno.EPERM:
975 raise OperationNotPermitted(func_call)
976 elif oe.errno == errno.EACCES:
977 raise PermissionDenied(func_call)
978 elif oe.errno == errno.EROFS:
979 raise ReadOnlyFileSystem(func_call)
980 elif oe.errno == errno.ENOENT:
981 raise FileNotFound(filename)
985 def apply_stat_permissions(filename, newstat, **kwargs):
986 """A wrapper around apply_secpass_permissions that gets
987 uid, gid, and mode from a stat object"""
988 return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
989 mode=newstat.st_mode, **kwargs)
991 def apply_recursive_permissions(top, uid=-1, gid=-1,
992 dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
993 """A wrapper around apply_secpass_permissions that applies permissions
994 recursively. If optional argument onerror is specified, it should be a
995 function; it will be called with one argument, a PortageException instance.
996 Returns True if all permissions are applied and False if some are left
999 # Avoid issues with circular symbolic links, as in bug #339670.
1000 follow_links = False
1003 # Default behavior is to dump errors to stderr so they won't
1004 # go unnoticed. Callers can pass in a quiet instance.
1006 if isinstance(e, OperationNotPermitted):
1007 writemsg(_("Operation Not Permitted: %s\n") % str(e),
1009 elif isinstance(e, FileNotFound):
1010 writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
1015 for dirpath, dirnames, filenames in os.walk(top):
1017 applied = apply_secpass_permissions(dirpath,
1018 uid=uid, gid=gid, mode=dirmode, mask=dirmask,
1019 follow_links=follow_links)
1022 except PortageException as e:
1026 for name in filenames:
1028 applied = apply_secpass_permissions(os.path.join(dirpath, name),
1029 uid=uid, gid=gid, mode=filemode, mask=filemask,
1030 follow_links=follow_links)
1033 except PortageException as e:
1034 # Ignore InvalidLocation exceptions such as FileNotFound
1035 # and DirectoryNotFound since sometimes things disappear,
1036 # like when adjusting permissions on DISTCC_DIR.
1037 if not isinstance(e, portage.exception.InvalidLocation):
1042 def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
1043 stat_cached=None, follow_links=True):
1044 """A wrapper around apply_permissions that uses secpass and simple
1045 logic to apply as much of the permissions as possible without
1046 generating an obviously avoidable permission exception. Despite
1047 attempts to avoid an exception, it's possible that one will be raised
1048 anyway, so be prepared.
1049 Returns True if all permissions are applied and False if some are left
1052 if stat_cached is None:
1055 stat_cached = os.stat(filename)
1057 stat_cached = os.lstat(filename)
1058 except OSError as oe:
1059 func_call = "stat('%s')" % filename
1060 if oe.errno == errno.EPERM:
1061 raise OperationNotPermitted(func_call)
1062 elif oe.errno == errno.EACCES:
1063 raise PermissionDenied(func_call)
1064 elif oe.errno == errno.ENOENT:
1065 raise FileNotFound(filename)
1071 if portage.data.secpass < 2:
1074 uid != stat_cached.st_uid:
1079 gid != stat_cached.st_gid and \
1080 gid not in os.getgroups():
1084 apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
1085 stat_cached=stat_cached, follow_links=follow_links)
1088 class atomic_ofstream(ObjectProxy):
1089 """Write a file atomically via os.rename(). Atomic replacement prevents
1090 interprocess interference and prevents corruption of the target
1091 file when the write is interrupted (for example, when an 'out of space'
1094 def __init__(self, filename, mode='w', follow_links=True, **kargs):
1095 """Opens a temporary filename.pid in the same directory as filename."""
1096 ObjectProxy.__init__(self)
1097 object.__setattr__(self, '_aborted', False)
1102 kargs.setdefault('encoding', _encodings['content'])
1103 kargs.setdefault('errors', 'backslashreplace')
1106 canonical_path = os.path.realpath(filename)
1107 object.__setattr__(self, '_real_name', canonical_path)
1108 tmp_name = "%s.%i" % (canonical_path, os.getpid())
1110 object.__setattr__(self, '_file',
1111 open_func(_unicode_encode(tmp_name,
1112 encoding=_encodings['fs'], errors='strict'),
1113 mode=mode, **kargs))
1115 except IOError as e:
1116 if canonical_path == filename:
1118 # Ignore this error, since it's irrelevant
1119 # and the below open call will produce a
1120 # new error if necessary.
1122 object.__setattr__(self, '_real_name', filename)
1123 tmp_name = "%s.%i" % (filename, os.getpid())
1124 object.__setattr__(self, '_file',
1125 open_func(_unicode_encode(tmp_name,
1126 encoding=_encodings['fs'], errors='strict'),
1127 mode=mode, **kargs))
1129 def _get_target(self):
1130 return object.__getattribute__(self, '_file')
1132 if sys.hexversion >= 0x3000000:
1134 def __getattribute__(self, attr):
1135 if attr in ('close', 'abort', '__del__'):
1136 return object.__getattribute__(self, attr)
1137 return getattr(object.__getattribute__(self, '_file'), attr)
1141 # For TextIOWrapper, automatically coerce write calls to
1142 # unicode, in order to avoid TypeError when writing raw
1143 # bytes with python2.
1145 def __getattribute__(self, attr):
1146 if attr in ('close', 'abort', 'write', '__del__'):
1147 return object.__getattribute__(self, attr)
1148 return getattr(object.__getattribute__(self, '_file'), attr)
1151 f = object.__getattribute__(self, '_file')
1152 if isinstance(f, io.TextIOWrapper):
1153 s = _unicode_decode(s)
1157 """Closes the temporary file, copies permissions (if possible),
1158 and performs the atomic replacement via os.rename(). If the abort()
1159 method has been called, then the temp file is closed and removed."""
1160 f = object.__getattribute__(self, '_file')
1161 real_name = object.__getattribute__(self, '_real_name')
1165 if not object.__getattribute__(self, '_aborted'):
1167 apply_stat_permissions(f.name, os.stat(real_name))
1168 except OperationNotPermitted:
1170 except FileNotFound:
1172 except OSError as oe: # from the above os.stat call
1173 if oe.errno in (errno.ENOENT, errno.EPERM):
1177 os.rename(f.name, real_name)
1179 # Make sure we cleanup the temp file
1180 # even if an exception is raised.
1183 except OSError as oe:
1187 """If an error occurs while writing the file, the user should
1188 call this method in order to leave the target file unchanged.
1189 This will call close() automatically."""
1190 if not object.__getattribute__(self, '_aborted'):
1191 object.__setattr__(self, '_aborted', True)
1195 """If the user does not explicitely call close(), it is
1196 assumed that an error has occurred, so we abort()."""
1198 f = object.__getattribute__(self, '_file')
1199 except AttributeError:
1204 # ensure destructor from the base class is called
1205 base_destructor = getattr(ObjectProxy, '__del__', None)
1206 if base_destructor is not None:
1207 base_destructor(self)
1209 def write_atomic(file_path, content, **kwargs):
1212 f = atomic_ofstream(file_path, **kwargs)
1215 except (IOError, OSError) as e:
1218 func_call = "write_atomic('%s')" % file_path
1219 if e.errno == errno.EPERM:
1220 raise OperationNotPermitted(func_call)
1221 elif e.errno == errno.EACCES:
1222 raise PermissionDenied(func_call)
1223 elif e.errno == errno.EROFS:
1224 raise ReadOnlyFileSystem(func_call)
1225 elif e.errno == errno.ENOENT:
1226 raise FileNotFound(file_path)
1230 def ensure_dirs(dir_path, **kwargs):
1231 """Create a directory and call apply_permissions.
1232 Returns True if a directory is created or the permissions needed to be
1233 modified, and False otherwise.
1235 This function's handling of EEXIST errors makes it useful for atomic
1236 directory creation, in which multiple processes may be competing to
1237 create the same directory.
1243 os.makedirs(dir_path)
1245 except OSError as oe:
1246 func_call = "makedirs('%s')" % dir_path
1247 if oe.errno in (errno.EEXIST,):
1250 if os.path.isdir(dir_path):
1251 # NOTE: DragonFly raises EPERM for makedir('/')
1252 # and that is supposed to be ignored here.
1253 # Also, sometimes mkdir raises EISDIR on FreeBSD
1254 # and we want to ignore that too (bug #187518).
1256 elif oe.errno == errno.EPERM:
1257 raise OperationNotPermitted(func_call)
1258 elif oe.errno == errno.EACCES:
1259 raise PermissionDenied(func_call)
1260 elif oe.errno == errno.EROFS:
1261 raise ReadOnlyFileSystem(func_call)
1265 perms_modified = apply_permissions(dir_path, **kwargs)
1267 perms_modified = False
1268 return created_dir or perms_modified
1270 class LazyItemsDict(UserDict):
1271 """A mapping object that behaves like a standard dict except that it allows
1272 for lazy initialization of values via callable objects. Lazy items can be
1273 overwritten and deleted just as normal items."""
1275 __slots__ = ('lazy_items',)
1277 def __init__(self, *args, **kwargs):
1279 self.lazy_items = {}
1280 UserDict.__init__(self, *args, **kwargs)
1282 def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
1283 """Add a lazy item for the given key. When the item is requested,
1284 value_callable will be called with *pargs and **kwargs arguments."""
1285 self.lazy_items[item_key] = \
1286 self._LazyItem(value_callable, pargs, kwargs, False)
1287 # make it show up in self.keys(), etc...
1288 UserDict.__setitem__(self, item_key, None)
1290 def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
1291 """This is like addLazyItem except value_callable will only be called
1292 a maximum of 1 time and the result will be cached for future requests."""
1293 self.lazy_items[item_key] = \
1294 self._LazyItem(value_callable, pargs, kwargs, True)
1295 # make it show up in self.keys(), etc...
1296 UserDict.__setitem__(self, item_key, None)
1298 def update(self, *args, **kwargs):
1301 "expected at most 1 positional argument, got " + \
1309 elif isinstance(map_obj, LazyItemsDict):
1311 if k in map_obj.lazy_items:
1312 UserDict.__setitem__(self, k, None)
1314 UserDict.__setitem__(self, k, map_obj[k])
1315 self.lazy_items.update(map_obj.lazy_items)
1317 UserDict.update(self, map_obj)
1319 UserDict.update(self, kwargs)
1321 def __getitem__(self, item_key):
1322 if item_key in self.lazy_items:
1323 lazy_item = self.lazy_items[item_key]
1324 pargs = lazy_item.pargs
1327 kwargs = lazy_item.kwargs
1330 result = lazy_item.func(*pargs, **kwargs)
1331 if lazy_item.singleton:
1332 self[item_key] = result
1336 return UserDict.__getitem__(self, item_key)
1338 def __setitem__(self, item_key, value):
1339 if item_key in self.lazy_items:
1340 del self.lazy_items[item_key]
1341 UserDict.__setitem__(self, item_key, value)
1343 def __delitem__(self, item_key):
1344 if item_key in self.lazy_items:
1345 del self.lazy_items[item_key]
1346 UserDict.__delitem__(self, item_key)
1349 self.lazy_items.clear()
1350 UserDict.clear(self)
1353 return self.__copy__()
1356 return self.__class__(self)
1358 def __deepcopy__(self, memo=None):
1360 This forces evaluation of each contained lazy item, and deepcopy of
1361 the result. A TypeError is raised if any contained lazy item is not
1362 a singleton, since it is not necessarily possible for the behavior
1363 of this type of item to be safely preserved.
1367 result = self.__class__()
1368 memo[id(self)] = result
1370 k_copy = deepcopy(k, memo)
1371 lazy_item = self.lazy_items.get(k)
1372 if lazy_item is not None:
1373 if not lazy_item.singleton:
1374 raise TypeError(_unicode_decode("LazyItemsDict " + \
1375 "deepcopy is unsafe with lazy items that are " + \
1376 "not singletons: key=%s value=%s") % (k, lazy_item,))
1377 UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
1380 class _LazyItem(object):
1382 __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
1384 def __init__(self, func, pargs, kwargs, singleton):
1393 self.kwargs = kwargs
1394 self.singleton = singleton
1397 return self.__class__(self.func, self.pargs,
1398 self.kwargs, self.singleton)
1400 def __deepcopy__(self, memo=None):
1402 Override this since the default implementation can fail silently,
1403 leaving some attributes unset.
1407 result = self.__copy__()
1408 memo[id(self)] = result
1409 result.func = deepcopy(self.func, memo)
1410 result.pargs = deepcopy(self.pargs, memo)
1411 result.kwargs = deepcopy(self.kwargs, memo)
1412 result.singleton = deepcopy(self.singleton, memo)
1415 class ConfigProtect(object):
1416 def __init__(self, myroot, protect_list, mask_list):
1417 self.myroot = myroot
1418 self.protect_list = protect_list
1419 self.mask_list = mask_list
1420 self.updateprotect()
1422 def updateprotect(self):
1423 """Update internal state for isprotected() calls. Nonexistent paths
1430 for x in self.protect_list:
1431 ppath = normalize_path(
1432 os.path.join(self.myroot, x.lstrip(os.path.sep)))
1434 if stat.S_ISDIR(os.stat(ppath).st_mode):
1435 self._dirs.add(ppath)
1436 self.protect.append(ppath)
1438 # If it doesn't exist, there's no need to protect it.
1441 self.protectmask = []
1442 for x in self.mask_list:
1443 ppath = normalize_path(
1444 os.path.join(self.myroot, x.lstrip(os.path.sep)))
1446 """Use lstat so that anything, even a broken symlink can be
1448 if stat.S_ISDIR(os.lstat(ppath).st_mode):
1449 self._dirs.add(ppath)
1450 self.protectmask.append(ppath)
1451 """Now use stat in case this is a symlink to a directory."""
1452 if stat.S_ISDIR(os.stat(ppath).st_mode):
1453 self._dirs.add(ppath)
1455 # If it doesn't exist, there's no need to mask it.
1458 def isprotected(self, obj):
1459 """Returns True if obj is protected, False otherwise. The caller must
1460 ensure that obj is normalized with a single leading slash. A trailing
1461 slash is optional for directories."""
1465 for ppath in self.protect:
1466 if len(ppath) > masked and obj.startswith(ppath):
1467 if ppath in self._dirs:
1468 if obj != ppath and not obj.startswith(ppath + sep):
1469 # /etc/foo does not match /etc/foobaz
1472 # force exact match when CONFIG_PROTECT lists a
1475 protected = len(ppath)
1476 #config file management
1477 for pmpath in self.protectmask:
1478 if len(pmpath) >= protected and obj.startswith(pmpath):
1479 if pmpath in self._dirs:
1480 if obj != pmpath and \
1481 not obj.startswith(pmpath + sep):
1482 # /etc/foo does not match /etc/foobaz
1485 # force exact match when CONFIG_PROTECT_MASK lists
1488 #skip, it's in the mask
1489 masked = len(pmpath)
1490 return protected > masked
1492 def new_protect_filename(mydest, newmd5=None, force=False):
1493 """Resolves a config-protect filename for merging, optionally
1494 using the last filename if the md5 matches. If force is True,
1495 then a new filename will be generated even if mydest does not
1497 (dest,md5) ==> 'string' --- path_to_target_filename
1498 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
1501 # config protection filename format:
1511 not os.path.exists(mydest):
1514 real_filename = os.path.basename(mydest)
1515 real_dirname = os.path.dirname(mydest)
1516 for pfile in os.listdir(real_dirname):
1517 if pfile[0:5] != "._cfg":
1519 if pfile[10:] != real_filename:
1522 new_prot_num = int(pfile[5:9])
1523 if new_prot_num > prot_num:
1524 prot_num = new_prot_num
1528 prot_num = prot_num + 1
1530 new_pfile = normalize_path(os.path.join(real_dirname,
1531 "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
1532 old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
1533 if last_pfile and newmd5:
1535 last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
1536 except FileNotFound:
1537 # The file suddenly disappeared or it's a broken symlink.
1540 if last_pfile_md5 == newmd5:
1544 def find_updated_config_files(target_root, config_protect):
1546 Return a tuple of configuration files that needs to be updated.
1547 The tuple contains lists organized like this:
1548 [ protected_dir, file_list ]
1549 If the protected config isn't a protected_dir but a procted_file, list is:
1550 [ protected_file, None ]
1551 If no configuration files needs to be updated, None is returned
1557 # directories with some protect files in them
1558 for x in config_protect:
1561 x = os.path.join(target_root, x.lstrip(os.path.sep))
1562 if not os.access(x, os.W_OK):
1565 mymode = os.lstat(x).st_mode
1569 if stat.S_ISLNK(mymode):
1570 # We want to treat it like a directory if it
1571 # is a symlink to an existing directory.
1573 real_mode = os.stat(x).st_mode
1574 if stat.S_ISDIR(real_mode):
1579 if stat.S_ISDIR(mymode):
1581 "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
1583 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
1584 os.path.split(x.rstrip(os.path.sep))
1585 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
1586 a = subprocess_getstatusoutput(mycommand)
1589 files = a[1].split('\0')
1590 # split always produces an empty string as the last element
1591 if files and not files[-1]:
1594 if stat.S_ISDIR(mymode):
1599 def getlibpaths(root, env=None):
1600 def read_ld_so_conf(path):
1601 for l in grabfile(path):
1602 if l.startswith('include '):
1603 subpath = os.path.join(os.path.dirname(path), l[8:].strip())
1604 for p in glob.glob(subpath):
1605 for r in read_ld_so_conf(p):
1610 """ Return a list of paths that are used for library lookups """
1613 # the following is based on the information from ld.so(8)
1614 rval = env.get("LD_LIBRARY_PATH", "").split(":")
1615 rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
1616 rval.append("/usr/lib")
1619 return [normalize_path(x) for x in rval if x]