1 # Copyright 1998-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 __all__ = ["PreservedLibsRegistry", "LinkageMap",
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dep:dep_getkey,isjustname,flatten,match_from_list,' + \
14 'use_reduce,paren_reduce,_slot_re',
15 'portage.elog:elog_process',
16 'portage.locks:lockdir,unlockdir',
17 'portage.output:bold,colorize',
18 'portage.package.ebuild.doebuild:doebuild,doebuild_environment,' + \
20 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
21 'portage.update:fixdbentries',
22 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
23 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
24 'grabfile,grabdict,normalize_path,new_protect_filename,getlibpaths',
25 'portage.util.digraph:digraph',
26 'portage.util.env_update:env_update',
27 'portage.util.listdir:dircache,listdir',
28 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
32 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
33 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
34 from portage.dbapi import dbapi
35 from portage.exception import CommandNotFound, \
36 InvalidData, InvalidPackageName, \
37 FileNotFound, PermissionDenied, UnsupportedAPIException
38 from portage.localization import _
39 from portage.util.movefile import movefile
41 from portage import abssymlink, _movefile, bsd_chflags
43 # This is a special version of the os module, wrapped for unicode support.
44 from portage import os
45 from portage import _encodings
46 from portage import _os_merge
47 from portage import _selinux_merge
48 from portage import _unicode_decode
49 from portage import _unicode_encode
51 from portage.cache.mappings import slot_dict_class
55 import re, shutil, stat, errno, copy, subprocess
65 import cPickle as pickle
69 if sys.hexversion >= 0x3000000:
73 class PreservedLibsRegistry(object):
74 """ This class handles the tracking of preserved library objects """
75 def __init__(self, root, filename, autocommit=True):
77 @param root: root used to check existence of paths in pruneNonExisting
79 @param filename: absolute path for saving the preserved libs records
80 @type filename: String
81 @param autocommit: determines if the file is written after every update
82 @type autocommit: Boolean
85 self._filename = filename
86 self._autocommit = autocommit
88 self.pruneNonExisting()
91 """ Reload the registry data from file """
94 self._data = pickle.load(
95 open(_unicode_encode(self._filename,
96 encoding=_encodings['fs'], errors='strict'), 'rb'))
97 except (ValueError, pickle.UnpicklingError) as e:
98 writemsg_level(_("!!! Error loading '%s': %s\n") % \
99 (self._filename, e), level=logging.ERROR, noiselevel=-1)
100 except (EOFError, IOError) as e:
101 if isinstance(e, EOFError) or e.errno == errno.ENOENT:
103 elif e.errno == PermissionDenied.errno:
104 raise PermissionDenied(self._filename)
107 if self._data is None:
109 self._data_orig = self._data.copy()
111 """ Store the registry data to file. No need to call this if autocommit
114 if os.environ.get("SANDBOX_ON") == "1" or \
115 self._data == self._data_orig:
118 f = atomic_ofstream(self._filename, 'wb')
119 pickle.dump(self._data, f, protocol=2)
121 except EnvironmentError as e:
122 if e.errno != PermissionDenied.errno:
123 writemsg("!!! %s %s\n" % (e, self._filename), noiselevel=-1)
125 self._data_orig = self._data.copy()
127 def register(self, cpv, slot, counter, paths):
128 """ Register new objects in the registry. If there is a record with the
129 same packagename (internally derived from cpv) and slot it is
130 overwritten with the new data.
131 @param cpv: package instance that owns the objects
132 @type cpv: CPV (as String)
133 @param slot: the value of SLOT of the given package instance
135 @param counter: vdb counter value for the package instace
136 @type counter: Integer
137 @param paths: absolute paths of objects that got preserved during an update
140 cp = "/".join(catpkgsplit(cpv)[:2])
142 if len(paths) == 0 and cps in self._data \
143 and self._data[cps][0] == cpv and int(self._data[cps][1]) == int(counter):
146 self._data[cps] = (cpv, counter, paths)
150 def unregister(self, cpv, slot, counter):
151 """ Remove a previous registration of preserved objects for the given package.
152 @param cpv: package instance whose records should be removed
153 @type cpv: CPV (as String)
154 @param slot: the value of SLOT of the given package instance
157 self.register(cpv, slot, counter, [])
159 def pruneNonExisting(self):
160 """ Remove all records for objects that no longer exist on the filesystem. """
164 for cps in list(self._data):
165 cpv, counter, paths = self._data[cps]
166 paths = [f for f in paths \
167 if os.path.exists(os.path.join(self._root, f.lstrip(os.sep)))]
169 self._data[cps] = (cpv, counter, paths)
175 def hasEntries(self):
176 """ Check if this registry contains any records. """
177 return len(self._data) > 0
179 def getPreservedLibs(self):
180 """ Return a mapping of packages->preserved objects.
181 @returns mapping of package instances to preserved objects
182 @rtype Dict cpv->list-of-paths
185 for cps in self._data:
186 rValue[self._data[cps][0]] = self._data[cps][2]
189 class LinkageMap(object):
191 """Models dynamic linker dependencies."""
193 _needed_aux_key = "NEEDED.ELF.2"
194 _soname_map_class = slot_dict_class(
195 ("consumers", "providers"), prefix="")
197 def __init__(self, vardbapi):
198 self._dbapi = vardbapi
199 self._root = self._dbapi.root
201 self._obj_properties = {}
202 self._obj_key_cache = {}
203 self._defpath = set()
204 self._path_key_cache = {}
206 def _clear_cache(self):
208 self._obj_properties.clear()
209 self._obj_key_cache.clear()
210 self._defpath.clear()
211 self._path_key_cache.clear()
213 def _path_key(self, path):
214 key = self._path_key_cache.get(path)
216 key = self._ObjectKey(path, self._root)
217 self._path_key_cache[path] = key
220 def _obj_key(self, path):
221 key = self._obj_key_cache.get(path)
223 key = self._ObjectKey(path, self._root)
224 self._obj_key_cache[path] = key
227 class _ObjectKey(object):
229 """Helper class used as _obj_properties keys for objects."""
231 __slots__ = ("__weakref__", "_key")
233 def __init__(self, obj, root):
235 This takes a path to an object.
237 @param object: path to a file
238 @type object: string (example: '/usr/bin/bar')
241 self._key = self._generate_object_key(obj, root)
244 return hash(self._key)
246 def __eq__(self, other):
247 return self._key == other._key
249 def _generate_object_key(self, obj, root):
251 Generate object key for a given object.
253 @param object: path to a file
254 @type object: string (example: '/usr/bin/bar')
255 @rtype: 2-tuple of types (long, int) if object exists. string if
256 object does not exist.
258 1. 2-tuple of object's inode and device from a stat call, if object
260 2. realpath of object if object does not exist.
268 encoding=_encodings['merge'], errors='strict')
269 except UnicodeEncodeError:
270 # The package appears to have been merged with a
271 # different value of sys.getfilesystemencoding(),
272 # so fall back to utf_8 if appropriate.
275 encoding=_encodings['fs'], errors='strict')
276 except UnicodeEncodeError:
281 abs_path = os.path.join(root, obj.lstrip(os.sep))
283 object_stat = os.stat(abs_path)
285 # Use the realpath as the key if the file does not exists on the
287 return os.path.realpath(abs_path)
288 # Return a tuple of the device and inode.
289 return (object_stat.st_dev, object_stat.st_ino)
291 def file_exists(self):
293 Determine if the file for this key exists on the filesystem.
297 1. True if the file exists.
298 2. False if the file does not exist or is a broken symlink.
301 return isinstance(self._key, tuple)
303 class _LibGraphNode(_ObjectKey):
304 __slots__ = ("alt_paths",)
306 def __init__(self, obj, root):
307 LinkageMap._ObjectKey.__init__(self, obj, root)
308 self.alt_paths = set()
311 return str(sorted(self.alt_paths))
313 def rebuild(self, exclude_pkgs=None, include_file=None):
315 Raises CommandNotFound if there are preserved libs
316 and the scanelf binary is not available.
321 root_len = len(root) - 1
323 self._defpath.update(getlibpaths(self._root))
325 obj_key_cache = self._obj_key_cache
326 obj_properties = self._obj_properties
330 # Data from include_file is processed first so that it
331 # overrides any data from previously installed files.
332 if include_file is not None:
333 lines += grabfile(include_file)
335 aux_keys = [self._needed_aux_key]
336 for cpv in self._dbapi.cpv_all():
337 if exclude_pkgs is not None and cpv in exclude_pkgs:
339 lines += self._dbapi.aux_get(cpv, aux_keys)[0].split('\n')
340 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
341 self._dbapi.flush_cache()
343 # have to call scanelf for preserved libs here as they aren't
344 # registered in NEEDED.ELF.2 files
346 if self._dbapi.plib_registry and self._dbapi.plib_registry.getPreservedLibs():
347 args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
348 for items in self._dbapi.plib_registry.getPreservedLibs().values():
350 args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
353 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
354 except EnvironmentError as e:
355 if e.errno != errno.ENOENT:
357 raise CommandNotFound(args[0])
359 for l in proc.stdout:
361 l = _unicode_decode(l,
362 encoding=_encodings['content'], errors='strict')
363 except UnicodeDecodeError:
364 l = _unicode_decode(l,
365 encoding=_encodings['content'], errors='replace')
366 writemsg_level(_("\nError decoding characters " \
367 "returned from scanelf: %s\n\n") % (l,),
368 level=logging.ERROR, noiselevel=-1)
369 l = l[3:].rstrip("\n")
372 fields = l.split(";")
374 writemsg_level(_("\nWrong number of fields " \
375 "returned from scanelf: %s\n\n") % (l,),
376 level=logging.ERROR, noiselevel=-1)
378 fields[1] = fields[1][root_len:]
379 plibs.discard(fields[1])
380 lines.append(";".join(fields))
384 # Preserved libraries that did not appear in the scanelf output.
385 # This is known to happen with statically linked libraries.
386 # Generate dummy lines for these, so we can assume that every
387 # preserved library has an entry in self._obj_properties. This
388 # is important in order to prevent findConsumers from raising
389 # an unwanted KeyError.
391 lines.append(";".join(['', x, '', '', '']))
397 fields = l.split(";")
399 writemsg_level(_("\nWrong number of fields " \
400 "in %s: %s\n\n") % (self._needed_aux_key, l),
401 level=logging.ERROR, noiselevel=-1)
406 path = set([normalize_path(x) \
407 for x in filter(None, fields[3].replace(
408 "${ORIGIN}", os.path.dirname(obj)).replace(
409 "$ORIGIN", os.path.dirname(obj)).split(":"))])
410 needed = [x for x in fields[4].split(",") if x]
412 obj_key = self._obj_key(obj)
414 myprops = obj_properties.get(obj_key)
417 myprops = (arch, needed, path, soname, set())
418 obj_properties[obj_key] = myprops
419 # All object paths are added into the obj_properties tuple.
422 # Don't index the same file more that once since only one
423 # set of data can be correct and therefore mixing data
424 # may corrupt the index (include_file overrides previously
429 arch_map = libs.get(arch)
432 libs[arch] = arch_map
434 soname_map = arch_map.get(soname)
435 if soname_map is None:
436 soname_map = self._soname_map_class(
437 providers=set(), consumers=set())
438 arch_map[soname] = soname_map
439 soname_map.providers.add(obj_key)
440 for needed_soname in needed:
441 soname_map = arch_map.get(needed_soname)
442 if soname_map is None:
443 soname_map = self._soname_map_class(
444 providers=set(), consumers=set())
445 arch_map[needed_soname] = soname_map
446 soname_map.consumers.add(obj_key)
448 def listBrokenBinaries(self, debug=False):
450 Find binaries and their needed sonames, which have no providers.
452 @param debug: Boolean to enable debug output
454 @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
455 @return: The return value is an object -> set-of-sonames mapping, where
456 object is a broken binary and the set consists of sonames needed by
457 object that have no corresponding libraries to fulfill the dependency.
463 class _LibraryCache(object):
466 Caches properties associated with paths.
468 The purpose of this class is to prevent multiple instances of
469 _ObjectKey for the same paths.
473 def __init__(cache_self):
474 cache_self.cache = {}
476 def get(cache_self, obj):
478 Caches and returns properties associated with an object.
480 @param obj: absolute path (can be symlink)
481 @type obj: string (example: '/usr/lib/libfoo.so')
482 @rtype: 4-tuple with types
483 (string or None, string or None, 2-tuple, Boolean)
484 @return: 4-tuple with the following components:
485 1. arch as a string or None if it does not exist,
486 2. soname as a string or None if it does not exist,
487 3. obj_key as 2-tuple,
488 4. Boolean representing whether the object exists.
489 (example: ('libfoo.so.1', (123L, 456L), True))
492 if obj in cache_self.cache:
493 return cache_self.cache[obj]
495 obj_key = self._obj_key(obj)
496 # Check that the library exists on the filesystem.
497 if obj_key.file_exists():
498 # Get the arch and soname from LinkageMap._obj_properties if
499 # it exists. Otherwise, None.
500 arch, _needed, _path, soname, _objs = \
501 self._obj_properties.get(obj_key, (None,)*5)
502 return cache_self.cache.setdefault(obj, \
503 (arch, soname, obj_key, True))
505 return cache_self.cache.setdefault(obj, \
506 (None, None, obj_key, False))
509 cache = _LibraryCache()
510 providers = self.listProviders()
512 # Iterate over all obj_keys and their providers.
513 for obj_key, sonames in providers.items():
514 arch, _needed, path, _soname, objs = self._obj_properties[obj_key]
515 path = path.union(self._defpath)
516 # Iterate over each needed soname and the set of library paths that
517 # fulfill the soname to determine if the dependency is broken.
518 for soname, libraries in sonames.items():
519 # validLibraries is used to store libraries, which satisfy soname,
520 # so if no valid libraries are found, the soname is not satisfied
521 # for obj_key. If unsatisfied, objects associated with obj_key
523 validLibraries = set()
524 # It could be the case that the library to satisfy the soname is
525 # not in the obj's runpath, but a symlink to the library is (eg
526 # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
527 # does not catalog symlinks, broken or missing symlinks may go
528 # unnoticed. As a result of these cases, check that a file with
529 # the same name as the soname exists in obj's runpath.
530 # XXX If we catalog symlinks in LinkageMap, this could be improved.
531 for directory in path:
532 cachedArch, cachedSoname, cachedKey, cachedExists = \
533 cache.get(os.path.join(directory, soname))
534 # Check that this library provides the needed soname. Doing
535 # this, however, will cause consumers of libraries missing
536 # sonames to be unnecessarily emerged. (eg libmix.so)
537 if cachedSoname == soname and cachedArch == arch:
538 validLibraries.add(cachedKey)
539 if debug and cachedKey not in \
540 set(map(self._obj_key_cache.get, libraries)):
541 # XXX This is most often due to soname symlinks not in
542 # a library's directory. We could catalog symlinks in
543 # LinkageMap to avoid checking for this edge case here.
545 _("Found provider outside of findProviders:") + \
546 (" %s -> %s %s\n" % (os.path.join(directory, soname),
547 self._obj_properties[cachedKey][4], libraries)),
549 # A valid library has been found, so there is no need to
552 if debug and cachedArch == arch and \
553 cachedKey in self._obj_properties:
554 writemsg((_("Broken symlink or missing/bad soname: " + \
555 "%(dir_soname)s -> %(cachedKey)s " + \
556 "with soname %(cachedSoname)s but expecting %(soname)s") % \
557 {"dir_soname":os.path.join(directory, soname),
558 "cachedKey": self._obj_properties[cachedKey],
559 "cachedSoname": cachedSoname, "soname":soname}) + "\n",
561 # This conditional checks if there are no libraries to satisfy the
562 # soname (empty set).
563 if not validLibraries:
565 rValue.setdefault(obj, set()).add(soname)
566 # If no valid libraries have been found by this point, then
567 # there are no files named with the soname within obj's runpath,
568 # but if there are libraries (from the providers mapping), it is
569 # likely that soname symlinks or the actual libraries are
570 # missing or broken. Thus those libraries are added to rValue
571 # in order to emerge corrupt library packages.
572 for lib in libraries:
573 rValue.setdefault(lib, set()).add(soname)
575 if not os.path.isfile(lib):
576 writemsg(_("Missing library:") + " %s\n" % (lib,),
579 writemsg(_("Possibly missing symlink:") + \
580 "%s\n" % (os.path.join(os.path.dirname(lib), soname)),
584 def listProviders(self):
586 Find the providers for all object keys in LinkageMap.
588 @rtype: dict (example:
589 {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
590 @return: The return value is an object key -> providers mapping, where
591 providers is a mapping of soname -> set-of-library-paths returned
592 from the findProviders method.
598 # Iterate over all object keys within LinkageMap.
599 for obj_key in self._obj_properties:
600 rValue.setdefault(obj_key, self.findProviders(obj_key))
603 def isMasterLink(self, obj):
605 Determine whether an object is a master link.
607 @param obj: absolute path to an object
608 @type obj: string (example: '/usr/bin/foo')
611 1. True if obj is a master link
612 2. False if obj is not a master link
616 basename = os.path.basename(obj)
617 obj_key = self._obj_key(obj)
618 if obj_key not in self._obj_properties:
619 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
620 soname = self._obj_properties[obj_key][3]
621 return (len(basename) < len(soname))
623 def listLibraryObjects(self):
625 Return a list of library objects.
627 Known limitation: library objects lacking an soname are not included.
629 @rtype: list of strings
630 @return: list of paths to all providers
636 for arch_map in self._libs.values():
637 for soname_map in arch_map.values():
638 for obj_key in soname_map.providers:
639 rValue.extend(self._obj_properties[obj_key][4])
642 def getSoname(self, obj):
644 Return the soname associated with an object.
646 @param obj: absolute path to an object
647 @type obj: string (example: '/usr/bin/bar')
649 @return: soname as a string
654 if isinstance(obj, self._ObjectKey):
656 if obj_key not in self._obj_properties:
657 raise KeyError("%s not in object list" % obj_key)
658 return self._obj_properties[obj_key][3]
659 if obj not in self._obj_key_cache:
660 raise KeyError("%s not in object list" % obj)
661 return self._obj_properties[self._obj_key_cache[obj]][3]
663 def findProviders(self, obj):
665 Find providers for an object or object key.
667 This method may be called with a key from _obj_properties.
669 In some cases, not all valid libraries are returned. This may occur when
670 an soname symlink referencing a library is in an object's runpath while
671 the actual library is not. We should consider cataloging symlinks within
672 LinkageMap as this would avoid those cases and would be a better model of
673 library dependencies (since the dynamic linker actually searches for
674 files named with the soname in the runpaths).
676 @param obj: absolute path to an object or a key from _obj_properties
677 @type obj: string (example: '/usr/bin/bar') or _ObjectKey
678 @rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
679 @return: The return value is a soname -> set-of-library-paths, where
680 set-of-library-paths satisfy soname.
691 # Determine the obj_key from the arguments.
692 if isinstance(obj, self._ObjectKey):
694 if obj_key not in self._obj_properties:
695 raise KeyError("%s not in object list" % obj_key)
697 obj_key = self._obj_key(obj)
698 if obj_key not in self._obj_properties:
699 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
701 arch, needed, path, _soname, _objs = self._obj_properties[obj_key]
702 path_keys = set(self._path_key(x) for x in path.union(self._defpath))
703 for soname in needed:
704 rValue[soname] = set()
705 if arch not in self._libs or soname not in self._libs[arch]:
707 # For each potential provider of the soname, add it to rValue if it
708 # resides in the obj's runpath.
709 for provider_key in self._libs[arch][soname].providers:
710 providers = self._obj_properties[provider_key][4]
711 for provider in providers:
712 if self._path_key(os.path.dirname(provider)) in path_keys:
713 rValue[soname].add(provider)
716 def findConsumers(self, obj):
718 Find consumers of an object or object key.
720 This method may be called with a key from _obj_properties. If this
721 method is going to be called with an object key, to avoid not catching
722 shadowed libraries, do not pass new _ObjectKey instances to this method.
723 Instead pass the obj as a string.
725 In some cases, not all consumers are returned. This may occur when
726 an soname symlink referencing a library is in an object's runpath while
727 the actual library is not. For example, this problem is noticeable for
728 binutils since it's libraries are added to the path via symlinks that
729 are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
730 Failure to recognize consumers of these symlinks makes preserve-libs
731 fail to preserve binutils libs that are needed by these unrecognized
734 Note that library consumption via dlopen (common for kde plugins) is
735 currently undetected. However, it is possible to use the
736 corresponding libtool archive (*.la) files to detect such consumers
737 (revdep-rebuild is able to detect them).
739 @param obj: absolute path to an object or a key from _obj_properties
740 @type obj: string (example: '/usr/bin/bar') or _ObjectKey
741 @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
742 @return: The return value is a soname -> set-of-library-paths, where
743 set-of-library-paths satisfy soname.
754 # Determine the obj_key and the set of objects matching the arguments.
755 if isinstance(obj, self._ObjectKey):
757 if obj_key not in self._obj_properties:
758 raise KeyError("%s not in object list" % obj_key)
759 objs = self._obj_properties[obj_key][4]
762 obj_key = self._obj_key(obj)
763 if obj_key not in self._obj_properties:
764 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
766 # If there is another version of this lib with the
767 # same soname and the master link points to that
768 # other version, this lib will be shadowed and won't
769 # have any consumers.
770 if not isinstance(obj, self._ObjectKey):
771 soname = self._obj_properties[obj_key][3]
772 master_link = os.path.join(self._root,
773 os.path.dirname(obj).lstrip(os.path.sep), soname)
775 master_st = os.stat(master_link)
776 obj_st = os.stat(obj)
780 if (obj_st.st_dev, obj_st.st_ino) != \
781 (master_st.st_dev, master_st.st_ino):
784 # Determine the directory(ies) from the set of objects.
785 objs_dir_keys = set(self._path_key(os.path.dirname(x)) for x in objs)
786 defpath_keys = set(self._path_key(x) for x in self._defpath)
788 arch, _needed, _path, soname, _objs = self._obj_properties[obj_key]
789 if arch in self._libs and soname in self._libs[arch]:
790 # For each potential consumer, add it to rValue if an object from the
791 # arguments resides in the consumer's runpath.
792 for consumer_key in self._libs[arch][soname].consumers:
793 _arch, _needed, path, _soname, consumer_objs = \
794 self._obj_properties[consumer_key]
795 path_keys = defpath_keys.union(self._path_key(x) for x in path)
796 if objs_dir_keys.intersection(path_keys):
797 rValue.update(consumer_objs)
800 class vardbapi(dbapi):
802 _excluded_dirs = ["CVS", "lost+found"]
803 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
804 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
805 "|".join(_excluded_dirs) + r')$')
807 _aux_cache_version = "1"
808 _owners_cache_version = "1"
810 # Number of uncached packages to trigger cache update, since
811 # it's wasteful to update it for every vdb change.
812 _aux_cache_threshold = 5
814 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
815 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
817 def __init__(self, root, categories=None, settings=None, vartree=None):
819 The categories parameter is unused since the dbapi class
820 now has a categories property that is generated from the
823 self.root = _unicode_decode(root,
824 encoding=_encodings['content'], errors='strict')
825 if self.root[-1] != '/':
828 # Used by emerge to check whether any packages
829 # have been added or removed.
830 self._pkgs_changed = False
832 #cache for category directory mtimes
835 #cache for dependency checks
838 #cache for cp_list results
843 from portage import settings
844 self.settings = settings
846 from portage import db
847 vartree = db[root]["vartree"]
848 self.vartree = vartree
849 self._aux_cache_keys = set(
850 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
851 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
852 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
853 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
855 self._aux_cache_obj = None
856 self._aux_cache_filename = os.path.join(self.root,
857 CACHE_PATH, "vdb_metadata.pickle")
858 self._counter_path = os.path.join(root,
859 CACHE_PATH, "counter")
862 self.plib_registry = PreservedLibsRegistry(self.root,
863 os.path.join(self.root, PRIVATE_PATH, "preserved_libs_registry"))
864 except PermissionDenied:
865 # apparently this user isn't allowed to access PRIVATE_PATH
866 self.plib_registry = None
868 self.linkmap = LinkageMap(self)
869 self._owners = self._owners_db(self)
871 def getpath(self, mykey, filename=None):
872 # This is an optimized hotspot, so don't use unicode-wrapped
873 # os module and don't use os.path.join().
874 rValue = self.root + VDB_PATH + _os.sep + mykey
875 if filename is not None:
876 # If filename is always relative, we can do just
877 # rValue += _os.sep + filename
878 rValue = _os.path.join(rValue, filename)
881 def _bump_mtime(self, cpv):
883 This is called before an after any modifications, so that consumers
884 can use directory mtimes to validate caches. See bug #290428.
886 base = self.root + VDB_PATH
887 cat = catsplit(cpv)[0]
888 catdir = base + _os.sep + cat
892 for x in (catdir, base):
897 def cpv_exists(self, mykey):
898 "Tells us whether an actual ebuild exists on disk (no masking)"
899 return os.path.exists(self.getpath(mykey))
901 def cpv_counter(self, mycpv):
902 "This method will grab the COUNTER. Returns a counter value."
904 return long(self.aux_get(mycpv, ["COUNTER"])[0])
905 except (KeyError, ValueError):
907 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
908 "resetting to value of 0\n") % (mycpv,),
909 level=logging.ERROR, noiselevel=-1)
912 def cpv_inject(self, mycpv):
913 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
914 os.makedirs(self.getpath(mycpv))
915 counter = self.counter_tick(self.root, mycpv=mycpv)
916 # write local package counter so that emerge clean does the right thing
917 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
919 def isInjected(self, mycpv):
920 if self.cpv_exists(mycpv):
921 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
923 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
927 def move_ent(self, mylist, repo_match=None):
932 for atom in (origcp, newcp):
933 if not isjustname(atom):
934 raise InvalidPackageName(str(atom))
935 origmatches = self.match(origcp, use_cache=0)
939 for mycpv in origmatches:
940 mycpv_cp = cpv_getkey(mycpv)
941 if mycpv_cp != origcp:
942 # Ignore PROVIDE virtual match.
944 if repo_match is not None \
945 and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
947 mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
948 mynewcat = catsplit(newcp)[0]
949 origpath = self.getpath(mycpv)
950 if not os.path.exists(origpath):
953 if not os.path.exists(self.getpath(mynewcat)):
954 #create the directory
955 os.makedirs(self.getpath(mynewcat))
956 newpath = self.getpath(mynewcpv)
957 if os.path.exists(newpath):
958 #dest already exists; keep this puppy where it is.
960 _movefile(origpath, newpath, mysettings=self.settings)
961 self._clear_pkg_cache(self._dblink(mycpv))
962 self._clear_pkg_cache(self._dblink(mynewcpv))
964 # We need to rename the ebuild now.
965 old_pf = catsplit(mycpv)[1]
966 new_pf = catsplit(mynewcpv)[1]
969 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
970 os.path.join(newpath, new_pf + ".ebuild"))
971 except EnvironmentError as e:
972 if e.errno != errno.ENOENT:
975 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
976 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
977 fixdbentries([mylist], newpath)
980 def cp_list(self, mycp, use_cache=1):
981 mysplit=catsplit(mycp)
982 if mysplit[0] == '*':
983 mysplit[0] = mysplit[0][1:]
985 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
988 if use_cache and mycp in self.cpcache:
989 cpc = self.cpcache[mycp]
992 cat_dir = self.getpath(mysplit[0])
994 dir_list = os.listdir(cat_dir)
995 except EnvironmentError as e:
996 if e.errno == PermissionDenied.errno:
997 raise PermissionDenied(cat_dir)
1003 if self._excluded_dirs.match(x) is not None:
1007 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
1009 if len(mysplit) > 1:
1010 if ps[0] == mysplit[1]:
1011 returnme.append(mysplit[0]+"/"+x)
1012 self._cpv_sort_ascending(returnme)
1014 self.cpcache[mycp] = [mystat, returnme[:]]
1015 elif mycp in self.cpcache:
1016 del self.cpcache[mycp]
1019 def cpv_all(self, use_cache=1):
1021 Set use_cache=0 to bypass the portage.cachedir() cache in cases
1022 when the accuracy of mtime staleness checks should not be trusted
1023 (generally this is only necessary in critical sections that
1024 involve merge or unmerge of packages).
1027 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
1030 from portage import listdir
1032 def listdir(p, **kwargs):
1034 return [x for x in os.listdir(p) \
1035 if os.path.isdir(os.path.join(p, x))]
1036 except EnvironmentError as e:
1037 if e.errno == PermissionDenied.errno:
1038 raise PermissionDenied(p)
1042 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
1043 if self._excluded_dirs.match(x) is not None:
1045 if not self._category_re.match(x):
1047 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
1048 if self._excluded_dirs.match(y) is not None:
1050 subpath = x + "/" + y
1051 # -MERGING- should never be a cpv, nor should files.
1053 if catpkgsplit(subpath) is None:
1054 self.invalidentry(self.getpath(subpath))
1057 self.invalidentry(self.getpath(subpath))
1059 returnme.append(subpath)
1063 def cp_all(self, use_cache=1):
1064 mylist = self.cpv_all(use_cache=use_cache)
1070 mysplit = catpkgsplit(y)
1072 self.invalidentry(self.getpath(y))
1075 self.invalidentry(self.getpath(y))
1077 d[mysplit[0]+"/"+mysplit[1]] = None
1080 def checkblockers(self, origdep):
1083 def _clear_cache(self):
1084 self.mtdircache.clear()
1085 self.matchcache.clear()
1086 self.cpcache.clear()
1087 self._aux_cache_obj = None
1089 def _add(self, pkg_dblink):
1090 self._pkgs_changed = True
1091 self._clear_pkg_cache(pkg_dblink)
1093 def _remove(self, pkg_dblink):
1094 self._pkgs_changed = True
1095 self._clear_pkg_cache(pkg_dblink)
1097 def _clear_pkg_cache(self, pkg_dblink):
1098 # Due to 1 second mtime granularity in <python-2.5, mtime checks
1099 # are not always sufficient to invalidate vardbapi caches. Therefore,
1100 # the caches need to be actively invalidated here.
1101 self.mtdircache.pop(pkg_dblink.cat, None)
1102 self.matchcache.pop(pkg_dblink.cat, None)
1103 self.cpcache.pop(pkg_dblink.mysplit[0], None)
1104 dircache.pop(pkg_dblink.dbcatdir, None)
1106 def match(self, origdep, use_cache=1):
1107 "caching match function"
1109 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
1110 mykey = dep_getkey(mydep)
1111 mycat = catsplit(mykey)[0]
1113 if mycat in self.matchcache:
1114 del self.mtdircache[mycat]
1115 del self.matchcache[mycat]
1116 return list(self._iter_match(mydep,
1117 self.cp_list(mydep.cp, use_cache=use_cache)))
1119 curmtime = os.stat(os.path.join(self.root, VDB_PATH, mycat)).st_mtime
1120 except (IOError, OSError):
1123 if mycat not in self.matchcache or \
1124 self.mtdircache[mycat] != curmtime:
1126 self.mtdircache[mycat] = curmtime
1127 self.matchcache[mycat] = {}
1128 if mydep not in self.matchcache[mycat]:
1129 mymatch = list(self._iter_match(mydep,
1130 self.cp_list(mydep.cp, use_cache=use_cache)))
1131 self.matchcache[mycat][mydep] = mymatch
1132 return self.matchcache[mycat][mydep][:]
1134 def findname(self, mycpv):
1135 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
1137 def flush_cache(self):
1138 """If the current user has permission and the internal aux_get cache has
1139 been updated, save it to disk and mark it unmodified. This is called
1140 by emerge after it has loaded the full vdb for use in dependency
1141 calculations. Currently, the cache is only written if the user has
1142 superuser privileges (since that's required to obtain a lock), but all
1143 users have read access and benefit from faster metadata lookups (as
1144 long as at least part of the cache is still valid)."""
1145 if self._aux_cache is not None and \
1146 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
1148 self._owners.populate() # index any unindexed contents
1149 valid_nodes = set(self.cpv_all())
1150 for cpv in list(self._aux_cache["packages"]):
1151 if cpv not in valid_nodes:
1152 del self._aux_cache["packages"][cpv]
1153 del self._aux_cache["modified"]
1155 f = atomic_ofstream(self._aux_cache_filename, 'wb')
1156 pickle.dump(self._aux_cache, f, protocol=2)
1158 apply_secpass_permissions(
1159 self._aux_cache_filename, gid=portage_gid, mode=0o644)
1160 except (IOError, OSError) as e:
1162 self._aux_cache["modified"] = set()
1165 def _aux_cache(self):
1166 if self._aux_cache_obj is None:
1167 self._aux_cache_init()
1168 return self._aux_cache_obj
1170 def _aux_cache_init(self):
1173 if sys.hexversion >= 0x3000000:
1174 # Buffered io triggers extreme performance issues in
1175 # Unpickler.load() (problem observed with python-3.0.1).
1176 # Unfortunately, performance is still poor relative to
1177 # python-2.x, but buffering makes it much worse.
1178 open_kwargs["buffering"] = 0
1180 f = open(_unicode_encode(self._aux_cache_filename,
1181 encoding=_encodings['fs'], errors='strict'),
1182 mode='rb', **open_kwargs)
1183 mypickle = pickle.Unpickler(f)
1185 mypickle.find_global = None
1186 except AttributeError:
1187 # TODO: If py3k, override Unpickler.find_class().
1189 aux_cache = mypickle.load()
1192 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
1193 if isinstance(e, pickle.UnpicklingError):
1194 writemsg(_("!!! Error loading '%s': %s\n") % \
1195 (self._aux_cache_filename, str(e)), noiselevel=-1)
1198 if not aux_cache or \
1199 not isinstance(aux_cache, dict) or \
1200 aux_cache.get("version") != self._aux_cache_version or \
1201 not aux_cache.get("packages"):
1202 aux_cache = {"version": self._aux_cache_version}
1203 aux_cache["packages"] = {}
1205 owners = aux_cache.get("owners")
1206 if owners is not None:
1207 if not isinstance(owners, dict):
1209 elif "version" not in owners:
1211 elif owners["version"] != self._owners_cache_version:
1213 elif "base_names" not in owners:
1215 elif not isinstance(owners["base_names"], dict):
1221 "version" : self._owners_cache_version
1223 aux_cache["owners"] = owners
1225 aux_cache["modified"] = set()
1226 self._aux_cache_obj = aux_cache
1228 def aux_get(self, mycpv, wants):
1229 """This automatically caches selected keys that are frequently needed
1230 by emerge for dependency calculations. The cached metadata is
1231 considered valid if the mtime of the package directory has not changed
1232 since the data was cached. The cache is stored in a pickled dict
1233 object with the following format:
1235 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
1237 If an error occurs while loading the cache pickle or the version is
1238 unrecognized, the cache will simple be recreated from scratch (it is
1239 completely disposable).
1241 cache_these_wants = self._aux_cache_keys.intersection(wants)
1243 if self._aux_cache_keys_re.match(x) is not None:
1244 cache_these_wants.add(x)
1246 if not cache_these_wants:
1247 return self._aux_get(mycpv, wants)
1249 cache_these = set(self._aux_cache_keys)
1250 cache_these.update(cache_these_wants)
1252 mydir = self.getpath(mycpv)
1255 mydir_stat = os.stat(mydir)
1256 except OSError as e:
1257 if e.errno != errno.ENOENT:
1259 raise KeyError(mycpv)
1260 mydir_mtime = mydir_stat[stat.ST_MTIME]
1261 pkg_data = self._aux_cache["packages"].get(mycpv)
1262 pull_me = cache_these.union(wants)
1263 mydata = {"_mtime_" : mydir_mtime}
1265 cache_incomplete = False
1268 if pkg_data is not None:
1269 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
1272 cache_mtime, metadata = pkg_data
1273 if not isinstance(cache_mtime, (long, int)) or \
1274 not isinstance(metadata, dict):
1278 cache_mtime, metadata = pkg_data
1279 cache_valid = cache_mtime == mydir_mtime
1281 # Migrate old metadata to unicode.
1282 for k, v in metadata.items():
1283 metadata[k] = _unicode_decode(v,
1284 encoding=_encodings['repo.content'], errors='replace')
1286 mydata.update(metadata)
1287 pull_me.difference_update(mydata)
1290 # pull any needed data and cache it
1291 aux_keys = list(pull_me)
1292 for k, v in zip(aux_keys,
1293 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
1295 if not cache_valid or cache_these.difference(metadata):
1297 if cache_valid and metadata:
1298 cache_data.update(metadata)
1299 for aux_key in cache_these:
1300 cache_data[aux_key] = mydata[aux_key]
1301 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
1302 self._aux_cache["modified"].add(mycpv)
1304 if _slot_re.match(mydata['SLOT']) is None:
1305 # Empty or invalid slot triggers InvalidAtom exceptions when
1306 # generating slot atoms for packages, so translate it to '0' here.
1307 mydata['SLOT'] = _unicode_decode('0')
1309 return [mydata[x] for x in wants]
1311 def _aux_get(self, mycpv, wants, st=None):
1312 mydir = self.getpath(mycpv)
1316 except OSError as e:
1317 if e.errno == errno.ENOENT:
1318 raise KeyError(mycpv)
1319 elif e.errno == PermissionDenied.errno:
1320 raise PermissionDenied(mydir)
1323 if not stat.S_ISDIR(st.st_mode):
1324 raise KeyError(mycpv)
1328 results.append(st[stat.ST_MTIME])
1332 _unicode_encode(os.path.join(mydir, x),
1333 encoding=_encodings['fs'], errors='strict'),
1334 mode='r', encoding=_encodings['repo.content'],
1340 # Preserve \n for metadata that is known to
1341 # contain multiple lines.
1342 if self._aux_multi_line_re.match(x) is None:
1343 myd = " ".join(myd.split())
1345 myd = _unicode_decode('')
1346 if x == "EAPI" and not myd:
1347 results.append(_unicode_decode('0'))
1352 def aux_update(self, cpv, values):
1353 self._bump_mtime(cpv)
1354 cat, pkg = catsplit(cpv)
1355 mylink = dblink(cat, pkg, self.root, self.settings,
1356 treetype="vartree", vartree=self.vartree)
1357 if not mylink.exists():
1359 self._clear_pkg_cache(mylink)
1360 for k, v in values.items():
1362 mylink.setfile(k, v)
1365 os.unlink(os.path.join(self.getpath(cpv), k))
1366 except EnvironmentError:
1368 self._bump_mtime(cpv)
1370 def counter_tick(self, myroot, mycpv=None):
1371 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
1373 def get_counter_tick_core(self, myroot, mycpv=None):
1375 Use this method to retrieve the counter instead
1376 of having to trust the value of a global counter
1377 file that can lead to invalid COUNTER
1378 generation. When cache is valid, the package COUNTER
1379 files are not read and we rely on the timestamp of
1380 the package directory to validate cache. The stat
1381 calls should only take a short time, so performance
1382 is sufficient without having to rely on a potentially
1383 corrupt global counter file.
1385 The global counter file located at
1386 $CACHE_PATH/counter serves to record the
1387 counter of the last installed package and
1388 it also corresponds to the total number of
1389 installation actions that have occurred in
1390 the history of this package database.
1392 cp_list = self.cp_list
1394 for cp in self.cp_all():
1395 for cpv in cp_list(cp):
1397 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
1398 except (KeyError, OverflowError, ValueError):
1400 if counter > max_counter:
1401 max_counter = counter
1406 cfile = codecs.open(
1407 _unicode_encode(self._counter_path,
1408 encoding=_encodings['fs'], errors='strict'),
1409 mode='r', encoding=_encodings['repo.content'],
1411 except EnvironmentError as e:
1412 new_vdb = not bool(self.cpv_all())
1414 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
1415 self._counter_path, noiselevel=-1)
1416 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1421 counter = long(cfile.readline().strip())
1424 except (OverflowError, ValueError) as e:
1425 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
1426 self._counter_path, noiselevel=-1)
1427 writemsg("!!! %s\n" % str(e), noiselevel=-1)
1430 # We must ensure that we return a counter
1431 # value that is at least as large as the
1432 # highest one from the installed packages,
1433 # since having a corrupt value that is too low
1434 # can trigger incorrect AUTOCLEAN behavior due
1435 # to newly installed packages having lower
1436 # COUNTERs than the previous version in the
1438 if counter > max_counter:
1439 max_counter = counter
1441 if counter < 0 and not new_vdb:
1442 writemsg(_("!!! Initializing COUNTER to " \
1443 "value of %d\n") % max_counter, noiselevel=-1)
1445 return max_counter + 1
1447 def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
1448 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
1449 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
1453 # use same permissions as config._init_dirs()
1454 ensure_dirs(os.path.dirname(self._counter_path),
1455 gid=portage_gid, mode=0o2750, mask=0o2)
1456 # update new global counter file
1457 write_atomic(self._counter_path, str(counter))
1460 def _dblink(self, cpv):
1461 category, pf = catsplit(cpv)
1462 return dblink(category, pf, self.root,
1463 self.settings, vartree=self.vartree, treetype="vartree")
1465 def removeFromContents(self, pkg, paths, relative_paths=True):
1467 @param pkg: cpv for an installed package
1469 @param paths: paths of files to remove from contents
1470 @type paths: iterable
1472 if not hasattr(pkg, "getcontents"):
1473 pkg = self._dblink(pkg)
1475 root_len = len(root) - 1
1476 new_contents = pkg.getcontents().copy()
1479 for filename in paths:
1480 filename = _unicode_decode(filename,
1481 encoding=_encodings['content'], errors='strict')
1482 filename = normalize_path(filename)
1484 relative_filename = filename
1486 relative_filename = filename[root_len:]
1487 contents_key = pkg._match_contents(relative_filename, root)
1489 del new_contents[contents_key]
1493 self._bump_mtime(pkg.mycpv)
1494 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
1495 write_contents(new_contents, root, f)
1497 self._bump_mtime(pkg.mycpv)
1498 pkg._clear_contents_cache()
1500 class _owners_cache(object):
1502 This class maintains an hash table that serves to index package
1503 contents by mapping the basename of file to a list of possible
1504 packages that own it. This is used to optimize owner lookups
1505 by narrowing the search down to a smaller number of packages.
1508 from hashlib import md5 as _new_hash
1510 from md5 import new as _new_hash
1513 _hex_chars = int(_hash_bits / 4)
1515 def __init__(self, vardb):
1519 root_len = len(self._vardb.root)
1520 contents = self._vardb._dblink(cpv).getcontents()
1521 pkg_hash = self._hash_pkg(cpv)
1523 # Empty path is a code used to represent empty contents.
1524 self._add_path("", pkg_hash)
1526 self._add_path(x[root_len:], pkg_hash)
1527 self._vardb._aux_cache["modified"].add(cpv)
1529 def _add_path(self, path, pkg_hash):
1531 Empty path is a code that represents empty contents.
1534 name = os.path.basename(path.rstrip(os.path.sep))
1539 name_hash = self._hash_str(name)
1540 base_names = self._vardb._aux_cache["owners"]["base_names"]
1541 pkgs = base_names.get(name_hash)
1544 base_names[name_hash] = pkgs
1545 pkgs[pkg_hash] = None
1547 def _hash_str(self, s):
1548 h = self._new_hash()
1549 # Always use a constant utf_8 encoding here, since
1550 # the "default" encoding can change.
1551 h.update(_unicode_encode(s,
1552 encoding=_encodings['repo.content'],
1553 errors='backslashreplace'))
1555 h = h[-self._hex_chars:]
1559 def _hash_pkg(self, cpv):
1560 counter, mtime = self._vardb.aux_get(
1561 cpv, ["COUNTER", "_mtime_"])
1563 counter = int(counter)
1566 return (cpv, counter, mtime)
1568 class _owners_db(object):
1570 def __init__(self, vardb):
1576 def _populate(self):
1577 owners_cache = vardbapi._owners_cache(self._vardb)
1578 cached_hashes = set()
1579 base_names = self._vardb._aux_cache["owners"]["base_names"]
1581 # Take inventory of all cached package hashes.
1582 for name, hash_values in list(base_names.items()):
1583 if not isinstance(hash_values, dict):
1584 del base_names[name]
1586 cached_hashes.update(hash_values)
1588 # Create sets of valid package hashes and uncached packages.
1589 uncached_pkgs = set()
1590 hash_pkg = owners_cache._hash_pkg
1591 valid_pkg_hashes = set()
1592 for cpv in self._vardb.cpv_all():
1593 hash_value = hash_pkg(cpv)
1594 valid_pkg_hashes.add(hash_value)
1595 if hash_value not in cached_hashes:
1596 uncached_pkgs.add(cpv)
1598 # Cache any missing packages.
1599 for cpv in uncached_pkgs:
1600 owners_cache.add(cpv)
1602 # Delete any stale cache.
1603 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1605 for base_name_hash, bucket in list(base_names.items()):
1606 for hash_value in stale_hashes.intersection(bucket):
1607 del bucket[hash_value]
1609 del base_names[base_name_hash]
1613 def get_owners(self, path_iter):
1615 @return the owners as a dblink -> set(files) mapping.
1618 for owner, f in self.iter_owners(path_iter):
1619 owned_files = owners.get(owner)
1620 if owned_files is None:
1622 owners[owner] = owned_files
1626 def getFileOwnerMap(self, path_iter):
1627 owners = self.get_owners(path_iter)
1629 for pkg_dblink, files in owners.items():
1631 owner_set = file_owners.get(f)
1632 if owner_set is None:
1634 file_owners[f] = owner_set
1635 owner_set.add(pkg_dblink)
1638 def iter_owners(self, path_iter):
1640 Iterate over tuples of (dblink, path). In order to avoid
1641 consuming too many resources for too much time, resources
1642 are only allocated for the duration of a given iter_owners()
1643 call. Therefore, to maximize reuse of resources when searching
1644 for multiple files, it's best to search for them all in a single
1648 if not isinstance(path_iter, list):
1649 path_iter = list(path_iter)
1651 if len(path_iter) > 10:
1652 for x in self._iter_owners_low_mem(path_iter):
1656 owners_cache = self._populate()
1660 hash_pkg = owners_cache._hash_pkg
1661 hash_str = owners_cache._hash_str
1662 base_names = self._vardb._aux_cache["owners"]["base_names"]
1667 x = dblink_cache.get(cpv)
1669 if len(dblink_cache) > 20:
1670 # Ensure that we don't run out of memory.
1671 raise StopIteration()
1672 x = self._vardb._dblink(cpv)
1673 dblink_cache[cpv] = x
1678 path = path_iter.pop()
1679 is_basename = os.sep != path[:1]
1683 name = os.path.basename(path.rstrip(os.path.sep))
1688 name_hash = hash_str(name)
1689 pkgs = base_names.get(name_hash)
1691 if pkgs is not None:
1693 for hash_value in pkgs:
1694 if not isinstance(hash_value, tuple) or \
1695 len(hash_value) != 3:
1697 cpv, counter, mtime = hash_value
1698 if not isinstance(cpv, basestring):
1701 current_hash = hash_pkg(cpv)
1705 if current_hash != hash_value:
1709 for p in dblink(cpv).getcontents():
1710 if os.path.basename(p) == name:
1711 owners.append((cpv, p[len(root):]))
1713 if dblink(cpv).isowner(path, root):
1714 owners.append((cpv, path))
1715 except StopIteration:
1716 path_iter.append(path)
1718 dblink_cache.clear()
1720 for x in self._iter_owners_low_mem(path_iter):
1724 for cpv, p in owners:
1725 yield (dblink(cpv), p)
1727 def _iter_owners_low_mem(self, path_list):
1729 This implemention will make a short-lived dblink instance (and
1730 parse CONTENTS) for every single installed package. This is
1731 slower and but uses less memory than the method which uses the
1739 for path in path_list:
1740 is_basename = os.sep != path[:1]
1744 name = os.path.basename(path.rstrip(os.path.sep))
1745 path_info_list.append((path, name, is_basename))
1747 root = self._vardb.root
1748 for cpv in self._vardb.cpv_all():
1749 dblnk = self._vardb._dblink(cpv)
1751 for path, name, is_basename in path_info_list:
1753 for p in dblnk.getcontents():
1754 if os.path.basename(p) == name:
1755 yield dblnk, p[len(root):]
1757 if dblnk.isowner(path, root):
1760 class vartree(object):
1761 "this tree will scan a var/db/pkg database located at root (passed to init)"
1762 def __init__(self, root="/", virtual=None, clone=None, categories=None,
1765 writemsg("vartree.__init__(): deprecated " + \
1766 "use of clone parameter\n", noiselevel=-1)
1767 self.root = clone.root[:]
1768 self.dbapi = copy.deepcopy(clone.dbapi)
1770 from portage import config
1771 self.settings = config(clone=clone.settings)
1774 if settings is None:
1775 from portage import settings
1776 self.settings = settings
1777 if categories is None:
1778 categories = settings.categories
1779 self.dbapi = vardbapi(self.root, categories=categories,
1780 settings=settings, vartree=self)
1783 def getpath(self, mykey, filename=None):
1784 return self.dbapi.getpath(mykey, filename=filename)
1786 def zap(self, mycpv):
1789 def inject(self, mycpv):
1792 def get_provide(self, mycpv):
1796 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1798 myuse = myuse.split()
1799 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
1800 for myprovide in mylines:
1801 mys = catpkgsplit(myprovide)
1803 mys = myprovide.split("/")
1804 myprovides += [mys[0] + "/" + mys[1]]
1806 except SystemExit as e:
1808 except Exception as e:
1809 mydir = os.path.join(self.root, VDB_PATH, mycpv)
1810 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1813 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1815 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1818 def get_all_provides(self):
1820 for node in self.getallcpv():
1821 for mykey in self.get_provide(node):
1822 if mykey in myprovides:
1823 myprovides[mykey] += [node]
1825 myprovides[mykey] = [node]
1828 def dep_bestmatch(self, mydep, use_cache=1):
1829 "compatibility method -- all matches, not just visible ones"
1830 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1831 mymatch = best(self.dbapi.match(
1832 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1833 use_cache=use_cache))
1839 def dep_match(self, mydep, use_cache=1):
1840 "compatibility method -- we want to see all matches, not just visible ones"
1841 #mymatch = match(mydep,self.dbapi)
1842 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1848 def exists_specific(self, cpv):
1849 return self.dbapi.cpv_exists(cpv)
1851 def getallcpv(self):
1852 """temporary function, probably to be renamed --- Gets a list of all
1853 category/package-versions installed on the system."""
1854 return self.dbapi.cpv_all()
1856 def getallnodes(self):
1857 """new behavior: these are all *unmasked* nodes. There may or may not be available
1858 masked package for nodes in this nodes list."""
1859 return self.dbapi.cp_all()
1861 def getebuildpath(self, fullpackage):
1862 cat, package = catsplit(fullpackage)
1863 return self.getpath(fullpackage, filename=package+".ebuild")
1865 def getslot(self, mycatpkg):
1866 "Get a slot for a catpkg; assume it exists."
1868 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1875 class dblink(object):
1877 This class provides an interface to the installed package database
1878 At present this is implemented as a text backend in /var/db/pkg.
1882 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1884 _contents_re = re.compile(r'^(' + \
1885 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1886 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1887 r'(?P<sym>(sym) (.+) -> (.+) (\d+))' + \
1891 # When looping over files for merge/unmerge, temporarily yield to the
1892 # scheduler each time this many files are processed.
1893 _file_merge_yield_interval = 20
1895 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
1896 vartree=None, blockers=None, scheduler=None):
1898 Creates a DBlink object for a given CPV.
1899 The given CPV may not be present in the database already.
1901 @param cat: Category
1903 @param pkg: Package (PV)
1905 @param myroot: Typically ${ROOT}
1906 @type myroot: String (Path)
1907 @param mysettings: Typically portage.config
1908 @type mysettings: An instance of portage.config
1909 @param treetype: one of ['porttree','bintree','vartree']
1910 @type treetype: String
1911 @param vartree: an instance of vartree corresponding to myroot.
1912 @type vartree: vartree
1917 self.mycpv = self.cat + "/" + self.pkg
1918 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1919 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1920 self.treetype = treetype
1922 from portage import db
1923 vartree = db[myroot]["vartree"]
1924 self.vartree = vartree
1925 self._blockers = blockers
1926 self._scheduler = scheduler
1928 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
1929 self.dbcatdir = self.dbroot+"/"+cat
1930 self.dbpkgdir = self.dbcatdir+"/"+pkg
1931 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1932 self.dbdir = self.dbpkgdir
1934 self._lock_vdb = None
1936 self.settings = mysettings
1937 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1940 protect_obj = ConfigProtect(myroot,
1941 portage.util.shlex_split(mysettings.get("CONFIG_PROTECT", "")),
1942 portage.util.shlex_split(
1943 mysettings.get("CONFIG_PROTECT_MASK", "")))
1944 self.updateprotect = protect_obj.updateprotect
1945 self.isprotected = protect_obj.isprotected
1946 self._installed_instance = None
1947 self.contentscache = None
1948 self._contents_inodes = None
1949 self._contents_basenames = None
1950 self._linkmap_broken = False
1951 self._md5_merge_map = {}
1952 self._hash_key = (self.myroot, self.mycpv)
1955 return hash(self._hash_key)
1957 def __eq__(self, other):
1958 return isinstance(other, dblink) and \
1959 self._hash_key == other._hash_key
1963 raise AssertionError("Lock already held.")
1964 # At least the parent needs to exist for the lock file.
1965 ensure_dirs(self.dbroot)
1966 self._lock_vdb = lockdir(self.dbroot)
1970 unlockdir(self._lock_vdb)
1971 self._lock_vdb = None
1974 "return path to location of db information (for >>> informational display)"
1978 "does the db entry exist? boolean."
1979 return os.path.exists(self.dbdir)
1983 Remove this entry from the database
1985 if not os.path.exists(self.dbdir):
1988 # Check validity of self.dbdir before attempting to remove it.
1989 if not self.dbdir.startswith(self.dbroot):
1990 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1991 self.dbdir, noiselevel=-1)
1994 shutil.rmtree(self.dbdir)
1995 # If empty, remove parent category directory.
1997 os.rmdir(os.path.dirname(self.dbdir))
2000 self.vartree.dbapi._remove(self)
2002 def clearcontents(self):
2004 For a given db entry (self), erase the CONTENTS values.
2006 if os.path.exists(self.dbdir+"/CONTENTS"):
2007 os.unlink(self.dbdir+"/CONTENTS")
2009 def _clear_contents_cache(self):
2010 self.contentscache = None
2011 self._contents_inodes = None
2012 self._contents_basenames = None
2014 def getcontents(self):
2016 Get the installed files of a given package (aka what that package installed)
2018 contents_file = os.path.join(self.dbdir, "CONTENTS")
2019 if self.contentscache is not None:
2020 return self.contentscache
2023 myc = codecs.open(_unicode_encode(contents_file,
2024 encoding=_encodings['fs'], errors='strict'),
2025 mode='r', encoding=_encodings['repo.content'],
2027 except EnvironmentError as e:
2028 if e.errno != errno.ENOENT:
2031 self.contentscache = pkgfiles
2033 mylines = myc.readlines()
2036 normalize_needed = self._normalize_needed
2037 contents_re = self._contents_re
2038 obj_index = contents_re.groupindex['obj']
2039 dir_index = contents_re.groupindex['dir']
2040 sym_index = contents_re.groupindex['sym']
2041 myroot = self.myroot
2042 if myroot == os.path.sep:
2046 for pos, line in enumerate(mylines):
2047 if null_byte in line:
2048 # Null bytes are a common indication of corruption.
2049 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
2051 line = line.rstrip("\n")
2052 m = contents_re.match(line)
2054 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
2057 if m.group(obj_index) is not None:
2059 #format: type, mtime, md5sum
2060 data = (m.group(base+1), m.group(base+4), m.group(base+3))
2061 elif m.group(dir_index) is not None:
2064 data = (m.group(base+1),)
2065 elif m.group(sym_index) is not None:
2067 #format: type, mtime, dest
2068 data = (m.group(base+1), m.group(base+4), m.group(base+3))
2070 # This won't happen as long the regular expression
2071 # is written to only match valid entries.
2072 raise AssertionError(_("required group not found " + \
2073 "in CONTENTS entry: '%s'") % line)
2075 path = m.group(base+2)
2076 if normalize_needed.search(path) is not None:
2077 path = normalize_path(path)
2078 if not path.startswith(os.path.sep):
2079 path = os.path.sep + path
2081 if myroot is not None:
2082 path = os.path.join(myroot, path.lstrip(os.path.sep))
2084 pkgfiles[path] = data
2087 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
2088 for pos, e in errors:
2089 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
2090 self.contentscache = pkgfiles
2093 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
2094 ldpath_mtimes=None, others_in_slot=None):
2097 Unmerges a given package (CPV)
2102 @param pkgfiles: files to unmerge (generally self.getcontents() )
2103 @type pkgfiles: Dictionary
2104 @param trimworld: Remove CPV from world file if True, not if False
2105 @type trimworld: Boolean
2106 @param cleanup: cleanup to pass to doebuild (see doebuild)
2107 @type cleanup: Boolean
2108 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
2109 @type ldpath_mtimes: Dictionary
2110 @param others_in_slot: all dblink instances in this slot, excluding self
2111 @type others_in_slot: list
2114 1. os.EX_OK if everything went well.
2115 2. return code of the failed phase (for prerm, postrm, cleanrm)
2118 The caller must ensure that lockdb() and unlockdb() are called
2119 before and after this method.
2121 self.vartree.dbapi._bump_mtime(self.mycpv)
2122 showMessage = self._display_merge
2123 if self.vartree.dbapi._categories is not None:
2124 self.vartree.dbapi._categories = None
2125 # When others_in_slot is supplied, the security check has already been
2126 # done for this slot, so it shouldn't be repeated until the next
2127 # replacement or unmerge operation.
2128 if others_in_slot is None:
2129 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
2130 slot_matches = self.vartree.dbapi.match(
2131 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
2133 for cur_cpv in slot_matches:
2134 if cur_cpv == self.mycpv:
2136 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2137 self.vartree.root, self.settings, vartree=self.vartree,
2138 treetype="vartree"))
2140 retval = self._security_check([self] + others_in_slot)
2144 contents = self.getcontents()
2145 # Now, don't assume that the name of the ebuild is the same as the
2146 # name of the dir; the package may have been moved.
2148 ebuild_phase = "prerm"
2150 mystuff = os.listdir(self.dbdir)
2152 if x.endswith(".ebuild"):
2153 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
2154 if x[:-7] != self.pkg:
2155 # Clean up after vardbapi.move_ent() breakage in
2156 # portage versions before 2.1.2
2157 os.rename(os.path.join(self.dbdir, x), myebuildpath)
2158 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
2161 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
2164 doebuild_environment(myebuildpath, "prerm", self.myroot,
2165 self.settings, 0, 0, self.vartree.dbapi)
2166 except UnsupportedAPIException as e:
2167 # Sometimes this happens due to corruption of the EAPI file.
2168 writemsg(_("!!! FAILED prerm: %s\n") % \
2169 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
2170 writemsg("%s\n" % str(e), noiselevel=-1)
2173 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
2174 ensure_dirs(os.path.dirname(catdir), uid=portage_uid,
2175 gid=portage_gid, mode=0o70, mask=0)
2177 builddir_lock = None
2179 scheduler = self._scheduler
2184 catdir_lock = lockdir(catdir)
2186 uid=portage_uid, gid=portage_gid,
2188 builddir_lock = lockdir(
2189 self.settings["PORTAGE_BUILDDIR"])
2191 unlockdir(catdir_lock)
2195 prepare_build_dirs(self.myroot, self.settings, 1)
2196 log_path = self.settings.get("PORTAGE_LOG_FILE")
2198 if scheduler is None:
2199 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
2200 self.settings, cleanup=cleanup, use_cache=0,
2201 mydbapi=self.vartree.dbapi, tree=self.treetype,
2202 vartree=self.vartree)
2204 retval = scheduler.dblinkEbuildPhase(
2205 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
2207 # XXX: Decide how to handle failures here.
2208 if retval != os.EX_OK:
2210 writemsg(_("!!! FAILED prerm: %s\n") % retval, noiselevel=-1)
2212 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
2213 self._clear_contents_cache()
2215 # Remove the registration of preserved libs for this pkg instance
2216 plib_registry = self.vartree.dbapi.plib_registry
2217 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
2218 self.vartree.dbapi.cpv_counter(self.mycpv))
2221 ebuild_phase = "postrm"
2222 if scheduler is None:
2223 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
2224 self.settings, use_cache=0, tree=self.treetype,
2225 mydbapi=self.vartree.dbapi, vartree=self.vartree)
2227 retval = scheduler.dblinkEbuildPhase(
2228 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
2230 # XXX: Decide how to handle failures here.
2231 if retval != os.EX_OK:
2233 writemsg(_("!!! FAILED postrm: %s\n") % retval, noiselevel=-1)
2235 # Skip this if another package in the same slot has just been
2236 # merged on top of this package, since the other package has
2237 # already called LinkageMap.rebuild() and passed it's NEEDED file
2238 # in as an argument.
2239 if not others_in_slot:
2240 self._linkmap_rebuild(exclude_pkgs=(self.mycpv,))
2242 # remove preserved libraries that don't have any consumers left
2243 cpv_lib_map = self._find_unused_preserved_libs()
2245 self._remove_preserved_libs(cpv_lib_map)
2246 for cpv, removed in cpv_lib_map.items():
2247 if not self.vartree.dbapi.cpv_exists(cpv):
2248 for dblnk in others_in_slot:
2249 if dblnk.mycpv == cpv:
2250 # This one just got merged so it doesn't
2251 # register with cpv_exists() yet.
2252 self.vartree.dbapi.removeFromContents(
2256 self.vartree.dbapi.removeFromContents(cpv, removed)
2258 # Prune any preserved libs that may have
2259 # been unmerged with this package.
2260 self.vartree.dbapi.plib_registry.pruneNonExisting()
2263 self.vartree.dbapi._bump_mtime(self.mycpv)
2267 if retval != os.EX_OK:
2269 msg = _("The '%(ebuild_phase)s' "
2270 "phase of the '%(cpv)s' package "
2271 "has failed with exit value %(retval)s.") % \
2272 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
2274 from textwrap import wrap
2275 msg_lines.extend(wrap(msg, 72))
2276 msg_lines.append("")
2278 ebuild_name = os.path.basename(myebuildpath)
2279 ebuild_dir = os.path.dirname(myebuildpath)
2280 msg = _("The problem occurred while executing "
2281 "the ebuild file named '%(ebuild_name)s' "
2282 "located in the '%(ebuild_dir)s' directory. "
2283 "If necessary, manually remove "
2284 "the environment.bz2 file and/or the "
2285 "ebuild file located in that directory.") % \
2286 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
2287 msg_lines.extend(wrap(msg, 72))
2288 msg_lines.append("")
2291 "of the environment.bz2 file is "
2292 "preferred since it may allow the "
2293 "removal phases to execute successfully. "
2294 "The ebuild will be "
2295 "sourced and the eclasses "
2296 "from the current portage tree will be used "
2297 "when necessary. Removal of "
2298 "the ebuild file will cause the "
2299 "pkg_prerm() and pkg_postrm() removal "
2300 "phases to be skipped entirely.")
2301 msg_lines.extend(wrap(msg, 72))
2303 self._eerror(ebuild_phase, msg_lines)
2305 # process logs created during pre/postrm
2306 elog_process(self.mycpv, self.settings)
2307 if retval == os.EX_OK:
2308 if scheduler is None:
2309 doebuild(myebuildpath, "cleanrm", self.myroot,
2310 self.settings, tree="vartree",
2311 mydbapi=self.vartree.dbapi,
2312 vartree=self.vartree)
2314 scheduler.dblinkEbuildPhase(
2315 self, self.vartree.dbapi,
2316 myebuildpath, "cleanrm")
2318 unlockdir(builddir_lock)
2320 if myebuildpath and not catdir_lock:
2321 # Lock catdir for removal if empty.
2322 catdir_lock = lockdir(catdir)
2327 except OSError as e:
2328 if e.errno not in (errno.ENOENT,
2329 errno.ENOTEMPTY, errno.EEXIST):
2332 unlockdir(catdir_lock)
2334 if log_path is not None:
2336 if not failures and 'unmerge-logs' not in self.settings.features:
2343 st = os.stat(log_path)
2353 if log_path is not None and os.path.exists(log_path):
2354 # Restore this since it gets lost somewhere above and it
2355 # needs to be set for _display_merge() to be able to log.
2356 # Note that the log isn't necessarily supposed to exist
2357 # since if PORT_LOGDIR is unset then it's a temp file
2358 # so it gets cleaned above.
2359 self.settings["PORTAGE_LOG_FILE"] = log_path
2361 self.settings.pop("PORTAGE_LOG_FILE", None)
2363 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
2364 contents=contents, env=self.settings.environ(),
2365 writemsg_level=self._display_merge)
2368 def _display_merge(self, msg, level=0, noiselevel=0):
2369 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
2371 if self._scheduler is not None:
2372 self._scheduler.dblinkDisplayMerge(self, msg,
2373 level=level, noiselevel=noiselevel)
2375 writemsg_level(msg, level=level, noiselevel=noiselevel)
2377 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
2380 Unmerges the contents of a package from the liveFS
2381 Removes the VDB entry for self
2383 @param pkgfiles: typically self.getcontents()
2384 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
2385 @param others_in_slot: all dblink instances in this slot, excluding self
2386 @type others_in_slot: list
2391 perf_md5 = perform_md5
2392 showMessage = self._display_merge
2393 scheduler = self._scheduler
2396 showMessage(_("No package files given... Grabbing a set.\n"))
2397 pkgfiles = self.getcontents()
2399 if others_in_slot is None:
2401 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
2402 slot_matches = self.vartree.dbapi.match(
2403 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
2404 for cur_cpv in slot_matches:
2405 if cur_cpv == self.mycpv:
2407 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2408 self.vartree.root, self.settings,
2409 vartree=self.vartree, treetype="vartree"))
2411 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
2413 dest_root_len = len(dest_root) - 1
2415 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
2416 cfgfiledict = grabdict(conf_mem_file)
2419 unmerge_orphans = "unmerge-orphans" in self.settings.features
2422 self.updateprotect()
2423 mykeys = list(pkgfiles)
2427 #process symlinks second-to-last, directories last.
2429 ignored_unlink_errnos = (
2430 errno.EBUSY, errno.ENOENT,
2431 errno.ENOTDIR, errno.EISDIR)
2432 ignored_rmdir_errnos = (
2433 errno.EEXIST, errno.ENOTEMPTY,
2434 errno.EBUSY, errno.ENOENT,
2435 errno.ENOTDIR, errno.EISDIR)
2436 modprotect = os.path.join(self.vartree.root, "lib/modules/")
2438 def unlink(file_name, lstatobj):
2440 if lstatobj.st_flags != 0:
2441 bsd_chflags.lchflags(file_name, 0)
2442 parent_name = os.path.dirname(file_name)
2443 # Use normal stat/chflags for the parent since we want to
2444 # follow any symlinks to the real parent directory.
2445 pflags = os.stat(parent_name).st_flags
2447 bsd_chflags.chflags(parent_name, 0)
2449 if not stat.S_ISLNK(lstatobj.st_mode):
2450 # Remove permissions to ensure that any hardlinks to
2451 # suid/sgid files are rendered harmless.
2452 os.chmod(file_name, 0)
2453 os.unlink(file_name)
2454 except OSError as ose:
2455 # If the chmod or unlink fails, you are in trouble.
2456 # With Prefix this can be because the file is owned
2457 # by someone else (a screwup by root?), on a normal
2458 # system maybe filesystem corruption. In any case,
2459 # if we backtrace and die here, we leave the system
2460 # in a totally undefined state, hence we just bleed
2461 # like hell and continue to hopefully finish all our
2462 # administrative and pkg_postinst stuff.
2463 self._eerror("postrm",
2464 ["Could not chmod or unlink '%s': %s" % \
2467 if bsd_chflags and pflags != 0:
2468 # Restore the parent flags we saved before unlinking
2469 bsd_chflags.chflags(parent_name, pflags)
2471 def show_unmerge(zing, desc, file_type, file_name):
2472 showMessage("%s %s %s %s\n" % \
2473 (zing, desc.ljust(8), file_type, file_name))
2476 unmerge_desc["cfgpro"] = _("cfgpro")
2477 unmerge_desc["replaced"] = _("replaced")
2478 unmerge_desc["!dir"] = _("!dir")
2479 unmerge_desc["!empty"] = _("!empty")
2480 unmerge_desc["!fif"] = _("!fif")
2481 unmerge_desc["!found"] = _("!found")
2482 unmerge_desc["!md5"] = _("!md5")
2483 unmerge_desc["!mtime"] = _("!mtime")
2484 unmerge_desc["!obj"] = _("!obj")
2485 unmerge_desc["!sym"] = _("!sym")
2487 for i, objkey in enumerate(mykeys):
2489 if scheduler is not None and \
2490 0 == i % self._file_merge_yield_interval:
2491 scheduler.scheduleYield()
2493 obj = normalize_path(objkey)
2496 _unicode_encode(obj,
2497 encoding=_encodings['merge'], errors='strict')
2498 except UnicodeEncodeError:
2499 # The package appears to have been merged with a
2500 # different value of sys.getfilesystemencoding(),
2501 # so fall back to utf_8 if appropriate.
2503 _unicode_encode(obj,
2504 encoding=_encodings['fs'], errors='strict')
2505 except UnicodeEncodeError:
2509 perf_md5 = portage.checksum.perform_md5
2511 file_data = pkgfiles[objkey]
2512 file_type = file_data[0]
2515 statobj = os.stat(obj)
2520 lstatobj = os.lstat(obj)
2521 except (OSError, AttributeError):
2523 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2524 if lstatobj is None:
2525 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
2527 if obj.startswith(dest_root):
2528 relative_path = obj[dest_root_len:]
2530 for dblnk in others_in_slot:
2531 if dblnk.isowner(relative_path, dest_root):
2535 # A new instance of this package claims the file, so
2537 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
2539 elif relative_path in cfgfiledict:
2540 stale_confmem.append(relative_path)
2541 # next line includes a tweak to protect modules from being unmerged,
2542 # but we don't protect modules from being overwritten if they are
2543 # upgraded. We effectively only want one half of the config protection
2544 # functionality for /lib/modules. For portage-ng both capabilities
2545 # should be able to be independently specified.
2546 # TODO: For rebuilds, re-parent previous modules to the new
2547 # installed instance (so they are not orphans). For normal
2548 # uninstall (not rebuild/reinstall), remove the modules along
2549 # with all other files (leave no orphans).
2550 if obj.startswith(modprotect):
2551 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
2554 # Don't unlink symlinks to directories here since that can
2555 # remove /lib and /usr/lib symlinks.
2556 if unmerge_orphans and \
2557 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2558 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2559 not self.isprotected(obj):
2561 unlink(obj, lstatobj)
2562 except EnvironmentError as e:
2563 if e.errno not in ignored_unlink_errnos:
2566 show_unmerge("<<<", "", file_type, obj)
2569 lmtime = str(lstatobj[stat.ST_MTIME])
2570 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2571 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
2574 if pkgfiles[objkey][0] == "dir":
2575 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
2576 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
2579 elif pkgfiles[objkey][0] == "sym":
2581 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
2583 # Go ahead and unlink symlinks to directories here when
2584 # they're actually recorded as symlinks in the contents.
2585 # Normally, symlinks such as /lib -> lib64 are not recorded
2586 # as symlinks in the contents of a package. If a package
2587 # installs something into ${D}/lib/, it is recorded in the
2588 # contents as a directory even if it happens to correspond
2589 # to a symlink when it's merged to the live filesystem.
2591 unlink(obj, lstatobj)
2592 show_unmerge("<<<", "", file_type, obj)
2593 except (OSError, IOError) as e:
2594 if e.errno not in ignored_unlink_errnos:
2597 show_unmerge("!!!", "", file_type, obj)
2598 elif pkgfiles[objkey][0] == "obj":
2599 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2600 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2604 mymd5 = perf_md5(obj, calc_prelink=1)
2605 except FileNotFound as e:
2606 # the file has disappeared between now and our stat call
2607 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2610 # string.lower is needed because db entries used to be in upper-case. The
2611 # string.lower allows for backwards compatibility.
2612 if mymd5 != pkgfiles[objkey][2].lower():
2613 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
2616 unlink(obj, lstatobj)
2617 except (OSError, IOError) as e:
2618 if e.errno not in ignored_unlink_errnos:
2621 show_unmerge("<<<", "", file_type, obj)
2622 elif pkgfiles[objkey][0] == "fif":
2623 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2624 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
2626 show_unmerge("---", "", file_type, obj)
2627 elif pkgfiles[objkey][0] == "dev":
2628 show_unmerge("---", "", file_type, obj)
2636 lstatobj = os.lstat(obj)
2637 if lstatobj.st_flags != 0:
2638 bsd_chflags.lchflags(obj, 0)
2639 parent_name = os.path.dirname(obj)
2640 # Use normal stat/chflags for the parent since we want to
2641 # follow any symlinks to the real parent directory.
2642 pflags = os.stat(parent_name).st_flags
2644 bsd_chflags.chflags(parent_name, 0)
2648 if bsd_chflags and pflags != 0:
2649 # Restore the parent flags we saved before unlinking
2650 bsd_chflags.chflags(parent_name, pflags)
2651 show_unmerge("<<<", "", "dir", obj)
2652 except EnvironmentError as e:
2653 if e.errno not in ignored_rmdir_errnos:
2655 if e.errno != errno.ENOENT:
2656 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
2659 # Remove stale entries from config memory.
2661 for filename in stale_confmem:
2662 del cfgfiledict[filename]
2663 writedict(cfgfiledict, conf_mem_file)
2665 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2666 self.vartree.zap(self.mycpv)
2668 def isowner(self, filename, destroot):
2670 Check if a file belongs to this package. This may
2671 result in a stat call for the parent directory of
2672 every installed file, since the inode numbers are
2673 used to work around the problem of ambiguous paths
2674 caused by symlinked directories. The results of
2675 stat calls are cached to optimize multiple calls
2684 1. True if this package owns the file.
2685 2. False if this package does not own the file.
2687 return bool(self._match_contents(filename, destroot))
2689 def _match_contents(self, filename, destroot):
2691 The matching contents entry is returned, which is useful
2692 since the path may differ from the one given by the caller,
2696 @return: the contents entry corresponding to the given path, or False
2697 if the file is not owned by this package.
2700 filename = _unicode_decode(filename,
2701 encoding=_encodings['content'], errors='strict')
2703 destroot = _unicode_decode(destroot,
2704 encoding=_encodings['content'], errors='strict')
2706 # The given filename argument might have a different encoding than the
2707 # the filenames contained in the contents, so use separate wrapped os
2708 # modules for each. The basename is more likely to contain non-ascii
2709 # characters than the directory path, so use os_filename_arg for all
2710 # operations involving the basename of the filename arg.
2711 os_filename_arg = _os_merge
2715 _unicode_encode(filename,
2716 encoding=_encodings['merge'], errors='strict')
2717 except UnicodeEncodeError:
2718 # The package appears to have been merged with a
2719 # different value of sys.getfilesystemencoding(),
2720 # so fall back to utf_8 if appropriate.
2722 _unicode_encode(filename,
2723 encoding=_encodings['fs'], errors='strict')
2724 except UnicodeEncodeError:
2727 os_filename_arg = portage.os
2729 destfile = normalize_path(
2730 os_filename_arg.path.join(destroot,
2731 filename.lstrip(os_filename_arg.path.sep)))
2733 pkgfiles = self.getcontents()
2734 if pkgfiles and destfile in pkgfiles:
2737 basename = os_filename_arg.path.basename(destfile)
2738 if self._contents_basenames is None:
2743 encoding=_encodings['merge'],
2745 except UnicodeEncodeError:
2746 # The package appears to have been merged with a
2747 # different value of sys.getfilesystemencoding(),
2748 # so fall back to utf_8 if appropriate.
2752 encoding=_encodings['fs'],
2754 except UnicodeEncodeError:
2759 self._contents_basenames = set(
2760 os.path.basename(x) for x in pkgfiles)
2761 if basename not in self._contents_basenames:
2762 # This is a shortcut that, in most cases, allows us to
2763 # eliminate this package as an owner without the need
2764 # to examine inode numbers of parent directories.
2767 # Use stat rather than lstat since we want to follow
2768 # any symlinks to the real parent directory.
2769 parent_path = os_filename_arg.path.dirname(destfile)
2771 parent_stat = os_filename_arg.stat(parent_path)
2772 except EnvironmentError as e:
2773 if e.errno != errno.ENOENT:
2777 if self._contents_inodes is None:
2783 encoding=_encodings['merge'],
2785 except UnicodeEncodeError:
2786 # The package appears to have been merged with a
2787 # different value of sys.getfilesystemencoding(),
2788 # so fall back to utf_8 if appropriate.
2792 encoding=_encodings['fs'],
2794 except UnicodeEncodeError:
2799 self._contents_inodes = {}
2800 parent_paths = set()
2802 p_path = os.path.dirname(x)
2803 if p_path in parent_paths:
2805 parent_paths.add(p_path)
2811 inode_key = (s.st_dev, s.st_ino)
2812 # Use lists of paths in case multiple
2813 # paths reference the same inode.
2814 p_path_list = self._contents_inodes.get(inode_key)
2815 if p_path_list is None:
2817 self._contents_inodes[inode_key] = p_path_list
2818 if p_path not in p_path_list:
2819 p_path_list.append(p_path)
2821 p_path_list = self._contents_inodes.get(
2822 (parent_stat.st_dev, parent_stat.st_ino))
2824 for p_path in p_path_list:
2825 x = os_filename_arg.path.join(p_path, basename)
2831 def _linkmap_rebuild(self, **kwargs):
2832 if self._linkmap_broken:
2835 self.vartree.dbapi.linkmap.rebuild(**kwargs)
2836 except CommandNotFound as e:
2837 self._linkmap_broken = True
2838 self._display_merge(_("!!! Disabling preserve-libs " \
2839 "due to error: Command Not Found: %s\n") % (e,),
2840 level=logging.ERROR, noiselevel=-1)
2842 def _find_libs_to_preserve(self):
2844 Get set of relative paths for libraries to be preserved. The file
2845 paths are selected from self._installed_instance.getcontents().
2847 if self._linkmap_broken or not \
2848 (self._installed_instance is not None and \
2849 "preserve-libs" in self.settings.features):
2853 linkmap = self.vartree.dbapi.linkmap
2854 installed_instance = self._installed_instance
2855 old_contents = installed_instance.getcontents()
2857 root_len = len(root) - 1
2858 lib_graph = digraph()
2861 def path_to_node(path):
2862 node = path_node_map.get(path)
2864 node = LinkageMap._LibGraphNode(path, root)
2865 alt_path_node = lib_graph.get(node)
2866 if alt_path_node is not None:
2867 node = alt_path_node
2868 node.alt_paths.add(path)
2869 path_node_map[path] = node
2873 provider_nodes = set()
2874 # Create provider nodes and add them to the graph.
2875 for f_abs in old_contents:
2879 _unicode_encode(f_abs,
2880 encoding=_encodings['merge'], errors='strict')
2881 except UnicodeEncodeError:
2882 # The package appears to have been merged with a
2883 # different value of sys.getfilesystemencoding(),
2884 # so fall back to utf_8 if appropriate.
2886 _unicode_encode(f_abs,
2887 encoding=_encodings['fs'], errors='strict')
2888 except UnicodeEncodeError:
2893 f = f_abs[root_len:]
2894 if self.isowner(f, root):
2897 consumers = linkmap.findConsumers(f)
2902 provider_node = path_to_node(f)
2903 lib_graph.add(provider_node, None)
2904 provider_nodes.add(provider_node)
2905 consumer_map[provider_node] = consumers
2907 # Create consumer nodes and add them to the graph.
2908 # Note that consumers can also be providers.
2909 for provider_node, consumers in consumer_map.items():
2911 if self.isowner(c, root):
2913 consumer_node = path_to_node(c)
2914 if installed_instance.isowner(c, root) and \
2915 consumer_node not in provider_nodes:
2916 # This is not a provider, so it will be uninstalled.
2918 lib_graph.add(provider_node, consumer_node)
2920 # Locate nodes which should be preserved. They consist of all
2921 # providers that are reachable from consumers that are not
2922 # providers themselves.
2923 preserve_nodes = set()
2924 for consumer_node in lib_graph.root_nodes():
2925 if consumer_node in provider_nodes:
2927 # Preserve all providers that are reachable from this consumer.
2928 node_stack = lib_graph.child_nodes(consumer_node)
2930 provider_node = node_stack.pop()
2931 if provider_node in preserve_nodes:
2933 preserve_nodes.add(provider_node)
2934 node_stack.extend(lib_graph.child_nodes(provider_node))
2936 preserve_paths = set()
2937 for preserve_node in preserve_nodes:
2938 # Make sure that at least one of the paths is not a symlink.
2939 # This prevents symlinks from being erroneously preserved by
2940 # themselves when the old instance installed symlinks that
2941 # the new instance does not install.
2943 for f in preserve_node.alt_paths:
2944 f_abs = os.path.join(root, f.lstrip(os.sep))
2946 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2953 preserve_paths.update(preserve_node.alt_paths)
2955 return preserve_paths
2957 def _add_preserve_libs_to_contents(self, preserve_paths):
2959 Preserve libs returned from _find_libs_to_preserve().
2962 if not preserve_paths:
2966 showMessage = self._display_merge
2969 # Copy contents entries from the old package to the new one.
2970 new_contents = self.getcontents().copy()
2971 old_contents = self._installed_instance.getcontents()
2972 for f in sorted(preserve_paths):
2973 f = _unicode_decode(f,
2974 encoding=_encodings['content'], errors='strict')
2975 f_abs = os.path.join(root, f.lstrip(os.sep))
2976 contents_entry = old_contents.get(f_abs)
2977 if contents_entry is None:
2978 # This will probably never happen, but it might if one of the
2979 # paths returned from findConsumers() refers to one of the libs
2980 # that should be preserved yet the path is not listed in the
2981 # contents. Such a path might belong to some other package, so
2982 # it shouldn't be preserved here.
2983 showMessage(_("!!! File '%s' will not be preserved "
2984 "due to missing contents entry\n") % (f_abs,),
2985 level=logging.ERROR, noiselevel=-1)
2986 preserve_paths.remove(f)
2988 new_contents[f_abs] = contents_entry
2989 obj_type = contents_entry[0]
2990 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2992 # Add parent directories to contents if necessary.
2993 parent_dir = os.path.dirname(f_abs)
2994 while len(parent_dir) > len(root):
2995 new_contents[parent_dir] = ["dir"]
2997 parent_dir = os.path.dirname(parent_dir)
2998 if prev == parent_dir:
3000 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
3001 write_contents(new_contents, root, outfile)
3003 self._clear_contents_cache()
3005 def _find_unused_preserved_libs(self):
3007 Find preserved libraries that don't have any consumers left.
3010 if self._linkmap_broken:
3013 # Since preserved libraries can be consumers of other preserved
3014 # libraries, use a graph to track consumer relationships.
3015 plib_dict = self.vartree.dbapi.plib_registry.getPreservedLibs()
3016 lib_graph = digraph()
3017 preserved_nodes = set()
3018 preserved_paths = set()
3023 def path_to_node(path):
3024 node = path_node_map.get(path)
3026 node = LinkageMap._LibGraphNode(path, root)
3027 alt_path_node = lib_graph.get(node)
3028 if alt_path_node is not None:
3029 node = alt_path_node
3030 node.alt_paths.add(path)
3031 path_node_map[path] = node
3034 linkmap = self.vartree.dbapi.linkmap
3035 for cpv, plibs in plib_dict.items():
3037 path_cpv_map[f] = cpv
3038 preserved_node = path_to_node(f)
3039 if not preserved_node.file_exists():
3041 lib_graph.add(preserved_node, None)
3042 preserved_paths.add(f)
3043 preserved_nodes.add(preserved_node)
3044 for c in self.vartree.dbapi.linkmap.findConsumers(f):
3045 consumer_node = path_to_node(c)
3046 if not consumer_node.file_exists():
3048 # Note that consumers may also be providers.
3049 lib_graph.add(preserved_node, consumer_node)
3051 # Eliminate consumers having providers with the same soname as an
3052 # installed library that is not preserved. This eliminates
3053 # libraries that are erroneously preserved due to a move from one
3054 # directory to another.
3056 for preserved_node in preserved_nodes:
3057 soname = linkmap.getSoname(preserved_node)
3058 for consumer_node in lib_graph.parent_nodes(preserved_node):
3059 if consumer_node in preserved_nodes:
3061 providers = provider_cache.get(consumer_node)
3062 if providers is None:
3063 providers = linkmap.findProviders(consumer_node)
3064 provider_cache[consumer_node] = providers
3065 providers = providers.get(soname)
3066 if providers is None:
3068 for provider in providers:
3069 if provider in preserved_paths:
3071 provider_node = path_to_node(provider)
3072 if not provider_node.file_exists():
3074 if provider_node in preserved_nodes:
3076 # An alternative provider seems to be
3077 # installed, so drop this edge.
3078 lib_graph.remove_edge(preserved_node, consumer_node)
3082 while not lib_graph.empty():
3083 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
3086 lib_graph.difference_update(root_nodes)
3088 for node in root_nodes:
3089 unlink_list.update(node.alt_paths)
3090 unlink_list = sorted(unlink_list)
3091 for obj in unlink_list:
3092 cpv = path_cpv_map.get(obj)
3094 # This means that a symlink is in the preserved libs
3095 # registry, but the actual lib it points to is not.
3096 self._display_merge(_("!!! symlink to lib is preserved, "
3097 "but not the lib itself:\n!!! '%s'\n") % (obj,),
3098 level=logging.ERROR, noiselevel=-1)
3100 removed = cpv_lib_map.get(cpv)
3103 cpv_lib_map[cpv] = removed
3108 def _remove_preserved_libs(self, cpv_lib_map):
3110 Remove files returned from _find_unused_preserved_libs().
3115 files_to_remove = set()
3116 for files in cpv_lib_map.values():
3117 files_to_remove.update(files)
3118 files_to_remove = sorted(files_to_remove)
3119 showMessage = self._display_merge
3123 for obj in files_to_remove:
3124 obj = os.path.join(root, obj.lstrip(os.sep))
3125 parent_dirs.add(os.path.dirname(obj))
3126 if os.path.islink(obj):
3132 except OSError as e:
3133 if e.errno != errno.ENOENT:
3137 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
3140 # Remove empty parent directories if possible.
3142 x = parent_dirs.pop()
3149 x = os.path.dirname(x)
3153 self.vartree.dbapi.plib_registry.pruneNonExisting()
3155 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
3159 collision_ignore = set([normalize_path(myignore) for myignore in \
3160 portage.util.shlex_split(
3161 self.settings.get("COLLISION_IGNORE", ""))])
3163 # For collisions with preserved libraries, the current package
3164 # will assume ownership and the libraries will be unregistered.
3165 plib_dict = self.vartree.dbapi.plib_registry.getPreservedLibs()
3168 for cpv, paths in plib_dict.items():
3169 plib_paths.update(paths)
3171 plib_cpv_map[f] = cpv
3172 plib_inodes = self._lstat_inode_map(plib_paths)
3173 plib_collisions = {}
3175 showMessage = self._display_merge
3176 scheduler = self._scheduler
3179 destroot = normalize_path(destroot).rstrip(os.path.sep) + \
3181 showMessage(_(" %s checking %d files for package collisions\n") % \
3182 (colorize("GOOD", "*"), len(mycontents)))
3183 for i, f in enumerate(mycontents):
3184 if i % 1000 == 0 and i != 0:
3185 showMessage(_("%d files checked ...\n") % i)
3187 if scheduler is not None and \
3188 0 == i % self._file_merge_yield_interval:
3189 scheduler.scheduleYield()
3191 dest_path = normalize_path(
3192 os.path.join(destroot, f.lstrip(os.path.sep)))
3194 dest_lstat = os.lstat(dest_path)
3195 except EnvironmentError as e:
3196 if e.errno == errno.ENOENT:
3199 elif e.errno == errno.ENOTDIR:
3201 # A non-directory is in a location where this package
3202 # expects to have a directory.
3204 parent_path = dest_path
3205 while len(parent_path) > len(destroot):
3206 parent_path = os.path.dirname(parent_path)
3208 dest_lstat = os.lstat(parent_path)
3210 except EnvironmentError as e:
3211 if e.errno != errno.ENOTDIR:
3215 raise AssertionError(
3216 "unable to find non-directory " + \
3217 "parent for '%s'" % dest_path)
3218 dest_path = parent_path
3219 f = os.path.sep + dest_path[len(destroot):]
3227 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
3230 cpv = plib_cpv_map[path]
3231 paths = plib_collisions.get(cpv)
3234 plib_collisions[cpv] = paths
3236 # The current package will assume ownership and the
3237 # libraries will be unregistered, so exclude this
3238 # path from the normal collisions.
3242 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
3243 for ver in mypkglist:
3244 if ver.isowner(f, destroot):
3247 if not isowned and self.isprotected(full_path):
3251 if collision_ignore:
3252 if f in collision_ignore:
3255 for myignore in collision_ignore:
3256 if f.startswith(myignore + os.path.sep):
3260 collisions.append(f)
3261 return collisions, plib_collisions
3263 def _lstat_inode_map(self, path_iter):
3265 Use lstat to create a map of the form:
3266 {(st_dev, st_ino) : set([path1, path2, ...])}
3267 Multiple paths may reference the same inode due to hardlinks.
3268 All lstat() calls are relative to self.myroot.
3276 path = os.path.join(root, f.lstrip(os.sep))
3279 except OSError as e:
3280 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3284 key = (st.st_dev, st.st_ino)
3285 paths = inode_map.get(key)
3288 inode_map[key] = paths
3292 def _security_check(self, installed_instances):
3293 if not installed_instances:
3298 showMessage = self._display_merge
3299 scheduler = self._scheduler
3302 for dblnk in installed_instances:
3303 file_paths.update(dblnk.getcontents())
3306 for i, path in enumerate(file_paths):
3308 if scheduler is not None and \
3309 0 == i % self._file_merge_yield_interval:
3310 scheduler.scheduleYield()
3314 _unicode_encode(path,
3315 encoding=_encodings['merge'], errors='strict')
3316 except UnicodeEncodeError:
3317 # The package appears to have been merged with a
3318 # different value of sys.getfilesystemencoding(),
3319 # so fall back to utf_8 if appropriate.
3321 _unicode_encode(path,
3322 encoding=_encodings['fs'], errors='strict')
3323 except UnicodeEncodeError:
3330 except OSError as e:
3331 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3335 if not stat.S_ISREG(s.st_mode):
3337 path = os.path.realpath(path)
3338 if path in real_paths:
3340 real_paths.add(path)
3341 if s.st_nlink > 1 and \
3342 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
3343 k = (s.st_dev, s.st_ino)
3344 inode_map.setdefault(k, []).append((path, s))
3345 suspicious_hardlinks = []
3346 for path_list in inode_map.values():
3347 path, s = path_list[0]
3348 if len(path_list) == s.st_nlink:
3349 # All hardlinks seem to be owned by this package.
3351 suspicious_hardlinks.append(path_list)
3352 if not suspicious_hardlinks:
3356 msg.append(_("suid/sgid file(s) "
3357 "with suspicious hardlink(s):"))
3359 for path_list in suspicious_hardlinks:
3360 for path, s in path_list:
3361 msg.append("\t%s" % path)
3363 msg.append(_("See the Gentoo Security Handbook "
3364 "guide for advice on how to proceed."))
3366 self._eerror("preinst", msg)
3370 def _eqawarn(self, phase, lines):
3371 from portage.elog.messages import eqawarn as _eqawarn
3372 if self._scheduler is None:
3374 _eqawarn(l, phase=phase, key=self.settings.mycpv)
3376 self._scheduler.dblinkElog(self,
3377 phase, _eqawarn, lines)
3379 def _eerror(self, phase, lines):
3380 from portage.elog.messages import eerror as _eerror
3381 if self._scheduler is None:
3383 _eerror(l, phase=phase, key=self.settings.mycpv)
3385 self._scheduler.dblinkElog(self,
3386 phase, _eerror, lines)
3388 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
3389 mydbapi=None, prev_mtimes=None):
3392 This function does the following:
3394 calls self._preserve_libs if FEATURES=preserve-libs
3395 calls self._collision_protect if FEATURES=collision-protect
3396 calls doebuild(mydo=pkg_preinst)
3397 Merges the package to the livefs
3398 unmerges old version (if required)
3399 calls doebuild(mydo=pkg_postinst)
3403 @param srcroot: Typically this is ${D}
3404 @type srcroot: String (Path)
3405 @param destroot: Path to merge to (usually ${ROOT})
3406 @type destroot: String (Path)
3407 @param inforoot: root of the vardb entry ?
3408 @type inforoot: String (Path)
3409 @param myebuild: path to the ebuild that we are processing
3410 @type myebuild: String (Path)
3411 @param mydbapi: dbapi which is handed to doebuild.
3412 @type mydbapi: portdbapi instance
3413 @param prev_mtimes: { Filename:mtime } mapping for env_update
3414 @type prev_mtimes: Dictionary
3420 secondhand is a list of symlinks that have been skipped due to their target
3421 not existing; we will merge these symlinks at a later time.
3426 srcroot = _unicode_decode(srcroot,
3427 encoding=_encodings['content'], errors='strict')
3428 destroot = _unicode_decode(destroot,
3429 encoding=_encodings['content'], errors='strict')
3430 inforoot = _unicode_decode(inforoot,
3431 encoding=_encodings['content'], errors='strict')
3432 myebuild = _unicode_decode(myebuild,
3433 encoding=_encodings['content'], errors='strict')
3435 showMessage = self._display_merge
3436 scheduler = self._scheduler
3438 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
3439 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
3441 if not os.path.isdir(srcroot):
3442 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
3443 level=logging.ERROR, noiselevel=-1)
3447 for var_name in ('CHOST', 'SLOT'):
3448 if var_name == 'CHOST' and self.cat == 'virtual':
3450 os.unlink(os.path.join(inforoot, var_name))
3456 val = codecs.open(_unicode_encode(
3457 os.path.join(inforoot, var_name),
3458 encoding=_encodings['fs'], errors='strict'),
3459 mode='r', encoding=_encodings['repo.content'],
3460 errors='replace').readline().strip()
3461 except EnvironmentError as e:
3462 if e.errno != errno.ENOENT:
3467 if var_name == 'SLOT':
3470 if not slot.strip():
3471 slot = self.settings.get(var_name, '')
3472 if not slot.strip():
3473 showMessage(_("!!! SLOT is undefined\n"),
3474 level=logging.ERROR, noiselevel=-1)
3476 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
3478 if val != self.settings.get(var_name, ''):
3479 self._eqawarn('preinst',
3480 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
3481 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
3484 self._eerror("preinst", lines)
3486 if not os.path.exists(self.dbcatdir):
3487 os.makedirs(self.dbcatdir)
3490 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
3491 otherversions.append(v.split("/")[1])
3493 cp = self.mysplit[0]
3494 slot_atom = "%s:%s" % (cp, slot)
3496 # filter any old-style virtual matches
3497 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
3498 if cpv_getkey(cpv) == cp]
3500 if self.mycpv not in slot_matches and \
3501 self.vartree.dbapi.cpv_exists(self.mycpv):
3502 # handle multislot or unapplied slotmove
3503 slot_matches.append(self.mycpv)
3506 from portage import config
3507 for cur_cpv in slot_matches:
3508 # Clone the config in case one of these has to be unmerged since
3509 # we need it to have private ${T} etc... for things like elog.
3510 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
3511 self.vartree.root, config(clone=self.settings),
3512 vartree=self.vartree, treetype="vartree",
3513 scheduler=self._scheduler))
3515 retval = self._security_check(others_in_slot)
3519 self.settings["REPLACING_VERSIONS"] = " ".join(
3520 [portage.versions.cpv_getversion(other.mycpv) for other in others_in_slot] )
3521 self.settings.backup_changes("REPLACING_VERSIONS")
3524 # Used by self.isprotected().
3527 for dblnk in others_in_slot:
3528 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
3529 if cur_counter > max_counter:
3530 max_counter = cur_counter
3532 self._installed_instance = max_dblnk
3534 # We check for unicode encoding issues after src_install. However,
3535 # the check must be repeated here for binary packages (it's
3536 # inexpensive since we call os.walk() here anyway).
3541 unicode_error = False
3545 paths_with_newlines = []
3546 srcroot_len = len(srcroot)
3549 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
3551 parent = _unicode_decode(parent,
3552 encoding=_encodings['merge'], errors='strict')
3553 except UnicodeDecodeError:
3554 new_parent = _unicode_decode(parent,
3555 encoding=_encodings['merge'], errors='replace')
3556 new_parent = _unicode_encode(new_parent,
3557 encoding=_encodings['merge'], errors='backslashreplace')
3558 new_parent = _unicode_decode(new_parent,
3559 encoding=_encodings['merge'], errors='replace')
3560 os.rename(parent, new_parent)
3561 unicode_error = True
3562 unicode_errors.append(new_parent[srcroot_len:])
3567 fname = _unicode_decode(fname,
3568 encoding=_encodings['merge'], errors='strict')
3569 except UnicodeDecodeError:
3570 fpath = portage._os.path.join(
3571 parent.encode(_encodings['merge']), fname)
3572 new_fname = _unicode_decode(fname,
3573 encoding=_encodings['merge'], errors='replace')
3574 new_fname = _unicode_encode(new_fname,
3575 encoding=_encodings['merge'], errors='backslashreplace')
3576 new_fname = _unicode_decode(new_fname,
3577 encoding=_encodings['merge'], errors='replace')
3578 new_fpath = os.path.join(parent, new_fname)
3579 os.rename(fpath, new_fpath)
3580 unicode_error = True
3581 unicode_errors.append(new_fpath[srcroot_len:])
3585 fpath = os.path.join(parent, fname)
3587 relative_path = fpath[srcroot_len:]
3589 if "\n" in relative_path:
3590 paths_with_newlines.append(relative_path)
3592 file_mode = os.lstat(fpath).st_mode
3593 if stat.S_ISREG(file_mode):
3594 myfilelist.append(relative_path)
3595 elif stat.S_ISLNK(file_mode):
3596 # Note: os.walk puts symlinks to directories in the "dirs"
3597 # list and it does not traverse them since that could lead
3598 # to an infinite recursion loop.
3599 mylinklist.append(relative_path)
3604 if not unicode_error:
3608 eerror(portage._merge_unicode_error(unicode_errors))
3610 if paths_with_newlines:
3612 msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
3614 paths_with_newlines.sort()
3615 for f in paths_with_newlines:
3616 msg.append("\t/%s" % (f.replace("\n", "\\n")))
3618 msg.append(_("package %s NOT merged") % self.mycpv)
3623 # If there are no files to merge, and an installed package in the same
3624 # slot has files, it probably means that something went wrong.
3625 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
3626 not myfilelist and not mylinklist and others_in_slot:
3627 installed_files = None
3628 for other_dblink in others_in_slot:
3629 installed_files = other_dblink.getcontents()
3630 if not installed_files:
3632 from textwrap import wrap
3636 "new_cpv":self.mycpv,
3637 "old_cpv":other_dblink.mycpv
3639 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3640 "any files, but the currently installed '%(old_cpv)s'"
3641 " package has the following files: ") % d, wrap_width))
3643 msg.extend(sorted(installed_files))
3645 msg.append(_("package %s NOT merged") % self.mycpv)
3648 _("Manually run `emerge --unmerge =%s` if you "
3649 "really want to remove the above files. Set "
3650 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3651 "/etc/make.conf if you do not want to "
3652 "abort in cases like this.") % other_dblink.mycpv,
3658 # check for package collisions
3660 if self._blockers is not None:
3661 # This is only supposed to be called when
3662 # the vdb is locked, like it is here.
3663 blockers = self._blockers()
3664 if blockers is None:
3666 collisions, plib_collisions = \
3667 self._collision_protect(srcroot, destroot,
3668 others_in_slot + blockers, myfilelist + mylinklist)
3670 # Make sure the ebuild environment is initialized and that ${T}/elog
3671 # exists for logging of collision-protect eerror messages.
3672 if myebuild is None:
3673 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3674 doebuild_environment(myebuild, "preinst", destroot,
3675 self.settings, 0, 0, mydbapi)
3676 prepare_build_dirs(destroot, self.settings, cleanup)
3679 collision_protect = "collision-protect" in self.settings.features
3680 protect_owned = "protect-owned" in self.settings.features
3681 msg = _("This package will overwrite one or more files that"
3682 " may belong to other packages (see list below).")
3683 if not (collision_protect or protect_owned):
3684 msg += _(" Add either \"collision-protect\" or"
3685 " \"protect-owned\" to FEATURES in"
3686 " make.conf if you would like the merge to abort"
3687 " in cases like this. See the make.conf man page for"
3688 " more information about these features.")
3689 if self.settings.get("PORTAGE_QUIET") != "1":
3690 msg += _(" You can use a command such as"
3691 " `portageq owners / <filename>` to identify the"
3692 " installed package that owns a file. If portageq"
3693 " reports that only one package owns a file then do NOT"
3694 " file a bug report. A bug report is only useful if it"
3695 " identifies at least two or more packages that are known"
3696 " to install the same file(s)."
3697 " If a collision occurs and you"
3698 " can not explain where the file came from then you"
3699 " should simply ignore the collision since there is not"
3700 " enough information to determine if a real problem"
3701 " exists. Please do NOT file a bug report at"
3702 " http://bugs.gentoo.org unless you report exactly which"
3703 " two packages install the same file(s). Once again,"
3704 " please do NOT file a bug report unless you have"
3705 " completely understood the above message.")
3707 self.settings["EBUILD_PHASE"] = "preinst"
3708 from textwrap import wrap
3710 if collision_protect:
3712 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3714 msg.append(_("Detected file collision(s):"))
3717 for f in collisions:
3718 msg.append("\t%s" % \
3719 os.path.join(destroot, f.lstrip(os.path.sep)))
3724 if collision_protect or protect_owned:
3727 msg.append(_("Searching all installed"
3728 " packages for file collisions..."))
3730 msg.append(_("Press Ctrl-C to Stop"))
3734 if len(collisions) > 20:
3735 # get_owners is slow for large numbers of files, so
3736 # don't look them all up.
3737 collisions = collisions[:20]
3738 owners = self.vartree.dbapi._owners.get_owners(collisions)
3739 self.vartree.dbapi.flush_cache()
3741 for pkg, owned_files in owners.items():
3744 msg.append("%s" % cpv)
3745 for f in sorted(owned_files):
3746 msg.append("\t%s" % os.path.join(destroot,
3747 f.lstrip(os.path.sep)))
3752 eerror([_("None of the installed"
3753 " packages claim the file(s)."), ""])
3755 # The explanation about the collision and how to solve
3756 # it may not be visible via a scrollback buffer, especially
3757 # if the number of file collisions is large. Therefore,
3758 # show a summary at the end.
3759 if collision_protect:
3760 msg = _("Package '%s' NOT merged due to file collisions.") % \
3762 elif protect_owned and owners:
3763 msg = _("Package '%s' NOT merged due to file collisions.") % \
3766 msg = _("Package '%s' merged despite file collisions.") % \
3768 msg += _(" If necessary, refer to your elog "
3769 "messages for the whole content of the above message.")
3770 eerror(wrap(msg, 70))
3772 if collision_protect or (protect_owned and owners):
3775 # The merge process may move files out of the image directory,
3776 # which causes invalidation of the .installed flag.
3778 os.unlink(os.path.join(
3779 os.path.dirname(normalize_path(srcroot)), ".installed"))
3780 except OSError as e:
3781 if e.errno != errno.ENOENT:
3785 self.dbdir = self.dbtmpdir
3787 ensure_dirs(self.dbtmpdir)
3789 # run preinst script
3790 if scheduler is None:
3791 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % {"cpv":self.mycpv, "destroot":destroot})
3792 a = doebuild(myebuild, "preinst", destroot, self.settings,
3793 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
3794 vartree=self.vartree)
3796 a = scheduler.dblinkEbuildPhase(
3797 self, mydbapi, myebuild, "preinst")
3799 # XXX: Decide how to handle failures here.
3801 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3802 level=logging.ERROR, noiselevel=-1)
3805 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3806 for x in os.listdir(inforoot):
3807 self.copyfile(inforoot+"/"+x)
3809 # write local package counter for recording
3810 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
3811 codecs.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3812 encoding=_encodings['fs'], errors='strict'),
3813 'w', encoding=_encodings['repo.content'], errors='backslashreplace'
3814 ).write(str(counter))
3816 # open CONTENTS file (possibly overwriting old one) for recording
3817 outfile = codecs.open(_unicode_encode(
3818 os.path.join(self.dbtmpdir, 'CONTENTS'),
3819 encoding=_encodings['fs'], errors='strict'),
3820 mode='w', encoding=_encodings['repo.content'],
3821 errors='backslashreplace')
3823 self.updateprotect()
3825 #if we have a file containing previously-merged config file md5sums, grab it.
3826 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
3827 cfgfiledict = grabdict(conf_mem_file)
3828 cfgfiledict_orig = cfgfiledict.copy()
3829 if "NOCONFMEM" in self.settings:
3830 cfgfiledict["IGNORE"]=1
3832 cfgfiledict["IGNORE"]=0
3834 # Always behave like --noconfmem is enabled for downgrades
3835 # so that people who don't know about this option are less
3836 # likely to get confused when doing upgrade/downgrade cycles.
3837 pv_split = catpkgsplit(self.mycpv)[1:]
3838 for other in others_in_slot:
3839 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3840 cfgfiledict["IGNORE"] = 1
3843 # Don't bump mtimes on merge since some application require
3844 # preservation of timestamps. This means that the unmerge phase must
3845 # check to see if file belongs to an installed instance in the same
3849 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3850 prevmask = os.umask(0)
3853 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3854 # "second hand" of symlinks to merge later
3855 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
3858 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3859 # broken symlinks. We'll merge them too.
3861 while len(secondhand) and len(secondhand)!=lastlen:
3862 # clear the thirdhand. Anything from our second hand that
3863 # couldn't get merged will be added to thirdhand.
3866 if self.mergeme(srcroot, destroot, outfile, thirdhand,
3867 secondhand, cfgfiledict, mymtime):
3871 lastlen = len(secondhand)
3873 # our thirdhand now becomes our secondhand. It's ok to throw
3874 # away secondhand since thirdhand contains all the stuff that
3875 # couldn't be merged.
3876 secondhand = thirdhand
3879 # force merge of remaining symlinks (broken or circular; oh well)
3880 if self.mergeme(srcroot, destroot, outfile, None,
3881 secondhand, cfgfiledict, mymtime):
3883 self._md5_merge_map.clear()
3888 #if we opened it, close it
3892 # write out our collection of md5sums
3893 cfgfiledict.pop("IGNORE", None)
3894 if cfgfiledict != cfgfiledict_orig:
3895 ensure_dirs(os.path.dirname(conf_mem_file),
3896 gid=portage_gid, mode=0o2750, mask=0o2)
3897 writedict(cfgfiledict, conf_mem_file)
3899 # These caches are populated during collision-protect and the data
3900 # they contain is now invalid. It's very important to invalidate
3901 # the contents_inodes cache so that FEATURES=unmerge-orphans
3902 # doesn't unmerge anything that belongs to this package that has
3904 for dblnk in others_in_slot:
3905 dblnk._clear_contents_cache()
3906 self._clear_contents_cache()
3908 linkmap = self.vartree.dbapi.linkmap
3909 self._linkmap_rebuild(include_file=os.path.join(inforoot,
3910 linkmap._needed_aux_key))
3912 # Preserve old libs if they are still in use
3913 preserve_paths = self._find_libs_to_preserve()
3915 self._add_preserve_libs_to_contents(preserve_paths)
3917 # If portage is reinstalling itself, remove the old
3918 # version now since we want to use the temporary
3919 # PORTAGE_BIN_PATH that will be removed when we return.
3920 reinstall_self = False
3921 if self.myroot == "/" and \
3922 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3923 reinstall_self = True
3925 if scheduler is None:
3926 def emerge_log(msg):
3929 emerge_log = scheduler.dblinkEmergeLog
3931 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
3934 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3936 others_in_slot.append(self) # self has just been merged
3937 for dblnk in list(others_in_slot):
3940 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3942 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3943 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
3944 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3945 dblnk._linkmap_broken = self._linkmap_broken
3946 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
3947 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
3948 unmerge_rval = dblnk.unmerge(trimworld=0,
3949 ldpath_mtimes=prev_mtimes, others_in_slot=others_in_slot)
3951 if unmerge_rval == os.EX_OK:
3952 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
3954 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
3956 # TODO: Check status and abort if necessary.
3958 showMessage(_(">>> Original instance of package unmerged safely.\n"))
3960 if len(others_in_slot) > 1:
3961 showMessage(colorize("WARN", _("WARNING:"))
3962 + _(" AUTOCLEAN is disabled. This can cause serious"
3963 " problems due to overlapping packages.\n"),
3964 level=logging.WARN, noiselevel=-1)
3966 # We hold both directory locks.
3967 self.dbdir = self.dbpkgdir
3969 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3971 # keep track of the libs we preserved
3973 self.vartree.dbapi.plib_registry.register(self.mycpv,
3974 slot, counter, sorted(preserve_paths))
3976 # Check for file collisions with blocking packages
3977 # and remove any colliding files from their CONTENTS
3978 # since they now belong to this package.
3979 self._clear_contents_cache()
3980 contents = self.getcontents()
3981 destroot_len = len(destroot) - 1
3982 for blocker in blockers:
3983 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3984 relative_paths=False)
3986 # Unregister any preserved libs that this package has overwritten
3987 # and update the contents of the packages that owned them.
3988 plib_registry = self.vartree.dbapi.plib_registry
3989 plib_dict = plib_registry.getPreservedLibs()
3990 for cpv, paths in plib_collisions.items():
3991 if cpv not in plib_dict:
3993 if cpv == self.mycpv:
3996 slot, counter = self.vartree.dbapi.aux_get(
3997 cpv, ["SLOT", "COUNTER"])
4000 remaining = [f for f in plib_dict[cpv] if f not in paths]
4001 plib_registry.register(cpv, slot, counter, remaining)
4002 self.vartree.dbapi.removeFromContents(cpv, paths)
4004 self.vartree.dbapi._add(self)
4005 contents = self.getcontents()
4008 self.settings["PORTAGE_UPDATE_ENV"] = \
4009 os.path.join(self.dbpkgdir, "environment.bz2")
4010 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
4012 if scheduler is None:
4013 a = doebuild(myebuild, "postinst", destroot, self.settings,
4014 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
4015 vartree=self.vartree)
4017 showMessage(_(">>> %s merged.\n") % self.mycpv)
4019 a = scheduler.dblinkEbuildPhase(
4020 self, mydbapi, myebuild, "postinst")
4022 self.settings.pop("PORTAGE_UPDATE_ENV", None)
4025 # It's stupid to bail out here, so keep going regardless of
4026 # phase return code.
4027 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
4028 level=logging.ERROR, noiselevel=-1)
4031 for v in otherversions:
4032 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
4035 #update environment settings, library paths. DO NOT change symlinks.
4036 env_update(makelinks=(not downgrade),
4037 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
4038 contents=contents, env=self.settings.environ(),
4039 writemsg_level=self._display_merge)
4041 # For gcc upgrades, preserved libs have to be removed after the
4042 # the library path has been updated.
4043 self._linkmap_rebuild()
4044 cpv_lib_map = self._find_unused_preserved_libs()
4046 self._remove_preserved_libs(cpv_lib_map)
4047 for cpv, removed in cpv_lib_map.items():
4048 if not self.vartree.dbapi.cpv_exists(cpv):
4050 self.vartree.dbapi.removeFromContents(cpv, removed)
4054 def _new_backup_path(self, p):
4056 The works for any type path, such as a regular file, symlink,
4057 or directory. The parent directory is assumed to exist.
4058 The returned filename is of the form p + '.backup.' + x, where
4059 x guarantees that the returned path does not exist yet.
4066 backup_p = p + '.backup.' + str(x).rjust(4, '0')
4074 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
4077 This function handles actual merging of the package contents to the livefs.
4078 It also handles config protection.
4080 @param srcroot: Where are we copying files from (usually ${D})
4081 @type srcroot: String (Path)
4082 @param destroot: Typically ${ROOT}
4083 @type destroot: String (Path)
4084 @param outfile: File to log operations to
4085 @type outfile: File Object
4086 @param secondhand: A set of items to merge in pass two (usually
4087 or symlinks that point to non-existing files that may get merged later)
4088 @type secondhand: List
4089 @param stufftomerge: Either a diretory to merge, or a list of items.
4090 @type stufftomerge: String or List
4091 @param cfgfiledict: { File:mtime } mapping for config_protected files
4092 @type cfgfiledict: Dictionary
4093 @param thismtime: The current time (typically long(time.time())
4094 @type thismtime: Long
4095 @rtype: None or Boolean
4102 showMessage = self._display_merge
4103 writemsg = self._display_merge
4104 scheduler = self._scheduler
4109 srcroot = normalize_path(srcroot).rstrip(sep) + sep
4110 destroot = normalize_path(destroot).rstrip(sep) + sep
4112 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
4113 if isinstance(stufftomerge, basestring):
4114 #A directory is specified. Figure out protection paths, listdir() it and process it.
4115 mergelist = os.listdir(join(srcroot, stufftomerge))
4116 offset = stufftomerge
4118 mergelist = stufftomerge
4121 for i, x in enumerate(mergelist):
4123 if scheduler is not None and \
4124 0 == i % self._file_merge_yield_interval:
4125 scheduler.scheduleYield()
4127 mysrc = join(srcroot, offset, x)
4128 mydest = join(destroot, offset, x)
4129 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
4130 myrealdest = join(sep, offset, x)
4131 # stat file once, test using S_* macros many times (faster that way)
4132 mystat = os.lstat(mysrc)
4133 mymode = mystat[stat.ST_MODE]
4134 # handy variables; mydest is the target object on the live filesystems;
4135 # mysrc is the source object in the temporary install dir
4137 mydstat = os.lstat(mydest)
4138 mydmode = mydstat.st_mode
4139 except OSError as e:
4140 if e.errno != errno.ENOENT:
4143 #dest file doesn't exist
4147 if stat.S_ISLNK(mymode):
4148 # we are merging a symbolic link
4149 myabsto = abssymlink(mysrc)
4150 if myabsto.startswith(srcroot):
4151 myabsto = myabsto[len(srcroot):]
4152 myabsto = myabsto.lstrip(sep)
4153 myto = os.readlink(mysrc)
4154 if self.settings and self.settings["D"]:
4155 if myto.startswith(self.settings["D"]):
4156 myto = myto[len(self.settings["D"]):]
4157 # myrealto contains the path of the real file to which this symlink points.
4158 # we can simply test for existence of this file to see if the target has been merged yet
4159 myrealto = normalize_path(os.path.join(destroot, myabsto))
4162 if not stat.S_ISLNK(mydmode):
4163 if stat.S_ISDIR(mydmode):
4164 # directory in the way: we can't merge a symlink over a directory
4165 # we won't merge this, continue with next file...
4168 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
4169 # Kill file blocking installation of symlink to dir #71787
4171 elif self.isprotected(mydest):
4172 # Use md5 of the target in ${D} if it exists...
4174 newmd5 = perform_md5(join(srcroot, myabsto))
4175 except FileNotFound:
4176 # Maybe the target is merged already.
4178 newmd5 = perform_md5(myrealto)
4179 except FileNotFound:
4181 mydest = new_protect_filename(mydest, newmd5=newmd5)
4183 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
4184 if (secondhand != None) and (not os.path.exists(myrealto)):
4185 # either the target directory doesn't exist yet or the target file doesn't exist -- or
4186 # the target is a broken symlink. We will add this file to our "second hand" and merge
4188 secondhand.append(mysrc[len(srcroot):])
4190 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
4191 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4192 sstat=mystat, mysettings=self.settings,
4193 encoding=_encodings['merge'])
4195 showMessage(">>> %s -> %s\n" % (mydest, myto))
4196 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
4198 showMessage(_("!!! Failed to move file.\n"),
4199 level=logging.ERROR, noiselevel=-1)
4200 showMessage("!!! %s -> %s\n" % (mydest, myto),
4201 level=logging.ERROR, noiselevel=-1)
4203 elif stat.S_ISDIR(mymode):
4204 # we are merging a directory
4206 # destination exists
4209 # Save then clear flags on dest.
4210 dflags = mydstat.st_flags
4212 bsd_chflags.lchflags(mydest, 0)
4214 if not os.access(mydest, os.W_OK):
4215 pkgstuff = pkgsplit(self.pkg)
4216 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
4217 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
4218 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
4219 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
4220 writemsg(_("!!! And finish by running this: env-update\n\n"))
4223 if stat.S_ISDIR(mydmode) or \
4224 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
4225 # a symlink to an existing directory will work for us; keep it:
4226 showMessage("--- %s/\n" % mydest)
4228 bsd_chflags.lchflags(mydest, dflags)
4230 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
4231 backup_dest = self._new_backup_path(mydest)
4234 msg.append("Installation of a directory is blocked by a file:")
4235 msg.append(" '%s'" % mydest)
4236 msg.append("This file will be renamed to a different name:")
4237 msg.append(" '%s'" % backup_dest)
4239 self._eerror("preinst", msg)
4240 if movefile(mydest, backup_dest,
4241 mysettings=self.settings,
4242 encoding=_encodings['merge']) is None:
4244 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
4245 level=logging.ERROR, noiselevel=-1)
4246 #now create our directory
4247 if self.settings.selinux_enabled():
4248 _selinux_merge.mkdir(mydest, mysrc)
4252 bsd_chflags.lchflags(mydest, dflags)
4253 os.chmod(mydest, mystat[0])
4254 os.chown(mydest, mystat[4], mystat[5])
4255 showMessage(">>> %s/\n" % mydest)
4257 #destination doesn't exist
4258 if self.settings.selinux_enabled():
4259 _selinux_merge.mkdir(mydest, mysrc)
4262 os.chmod(mydest, mystat[0])
4263 os.chown(mydest, mystat[4], mystat[5])
4264 showMessage(">>> %s/\n" % mydest)
4265 outfile.write("dir "+myrealdest+"\n")
4266 # recurse and merge this directory
4267 if self.mergeme(srcroot, destroot, outfile, secondhand,
4268 join(offset, x), cfgfiledict, thismtime):
4270 elif stat.S_ISREG(mymode):
4271 # we are merging a regular file
4272 mymd5 = perform_md5(mysrc, calc_prelink=1)
4273 # calculate config file protection stuff
4274 mydestdir = os.path.dirname(mydest)
4278 protected = self.isprotected(mydest)
4280 # destination file exists
4282 if stat.S_ISDIR(mydmode):
4283 # install of destination is blocked by an existing directory with the same name
4284 newdest = self._new_backup_path(mydest)
4287 msg.append("Installation of a regular file is blocked by a directory:")
4288 msg.append(" '%s'" % mydest)
4289 msg.append("This file will be merged with a different name:")
4290 msg.append(" '%s'" % newdest)
4292 self._eerror("preinst", msg)
4295 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
4296 # install of destination is blocked by an existing regular file,
4297 # or by a symlink to an existing regular file;
4298 # now, config file management may come into play.
4299 # we only need to tweak mydest if cfg file management is in play.
4301 # we have a protection path; enable config file management.
4303 destmd5 = perform_md5(mydest, calc_prelink=1)
4304 if mymd5 == destmd5:
4305 #file already in place; simply update mtimes of destination
4308 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
4309 """ An identical update has previously been
4310 merged. Skip it unless the user has chosen
4312 moveme = cfgfiledict["IGNORE"]
4313 cfgprot = cfgfiledict["IGNORE"]
4316 mymtime = mystat[stat.ST_MTIME]
4321 # Merging a new file, so update confmem.
4322 cfgfiledict[myrealdest] = [mymd5]
4323 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
4324 """A previously remembered update has been
4325 accepted, so it is removed from confmem."""
4326 del cfgfiledict[myrealdest]
4329 mydest = new_protect_filename(mydest, newmd5=mymd5)
4331 # whether config protection or not, we merge the new file the
4332 # same way. Unless moveme=0 (blocking directory)
4334 # Do not hardlink files unless they are in the same
4335 # directory, since otherwise tar may not be able to
4336 # extract a tarball of the resulting hardlinks due to
4337 # 'Invalid cross-device link' errors (depends on layout of
4338 # mount points). Also, don't hardlink zero-byte files since
4339 # it doesn't save any space, and don't hardlink
4340 # CONFIG_PROTECTed files since config files shouldn't be
4341 # hardlinked to eachother (for example, shadow installs
4342 # several identical config files inside /etc/pam.d/).
4343 parent_dir = os.path.dirname(myrealdest)
4344 hardlink_key = (parent_dir, mymd5, mystat.st_size,
4345 mystat.st_mode, mystat.st_uid, mystat.st_gid)
4347 hardlink_candidates = None
4348 if not protected and mystat.st_size != 0:
4349 hardlink_candidates = self._md5_merge_map.get(hardlink_key)
4350 if hardlink_candidates is None:
4351 hardlink_candidates = []
4352 self._md5_merge_map[hardlink_key] = hardlink_candidates
4354 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4355 sstat=mystat, mysettings=self.settings,
4356 hardlink_candidates=hardlink_candidates,
4357 encoding=_encodings['merge'])
4360 if hardlink_candidates is not None:
4361 hardlink_candidates.append(mydest)
4365 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
4366 showMessage("%s %s\n" % (zing,mydest))
4368 # we are merging a fifo or device node
4371 # destination doesn't exist
4372 if movefile(mysrc, mydest, newmtime=thismtime,
4373 sstat=mystat, mysettings=self.settings,
4374 encoding=_encodings['merge']) is not None:
4378 if stat.S_ISFIFO(mymode):
4379 outfile.write("fif %s\n" % myrealdest)
4381 outfile.write("dev %s\n" % myrealdest)
4382 showMessage(zing + " " + mydest + "\n")
4384 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
4385 mydbapi=None, prev_mtimes=None):
4387 If portage is reinstalling itself, create temporary
4388 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
4389 to avoid relying on the new versions which may be
4390 incompatible. Register an atexit hook to clean up the
4391 temporary directories. Pre-load elog modules here since
4392 we won't be able to later if they get unmerged (happens
4393 when namespace changes).
4395 if self.vartree.dbapi._categories is not None:
4396 self.vartree.dbapi._categories = None
4397 if self.myroot == "/" and \
4398 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]) and \
4399 not self.vartree.dbapi.cpv_exists(self.mycpv):
4400 # Load lazily referenced portage submodules into memory,
4401 # so imports won't fail during portage upgrade/downgrade.
4402 portage.proxy.lazyimport._preload_portage_submodules()
4403 settings = self.settings
4404 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
4405 from tempfile import mkdtemp
4407 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
4408 # /tmp, it can't be mounted with the "noexec" option.
4409 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
4410 settings["PORTAGE_TMPDIR"])
4411 from portage.process import atexit_register
4412 atexit_register(shutil.rmtree, base_path_tmp)
4414 for subdir in "bin", "pym":
4415 var_name = "PORTAGE_%s_PATH" % subdir.upper()
4416 var_orig = settings[var_name]
4417 var_new = os.path.join(base_path_tmp, subdir)
4418 settings[var_name] = var_new
4419 settings.backup_changes(var_name)
4420 shutil.copytree(var_orig, var_new, symlinks=True)
4421 os.chmod(var_new, dir_perms)
4422 os.chmod(base_path_tmp, dir_perms)
4423 # This serves so pre-load the modules.
4424 elog_process(self.mycpv, self.settings)
4426 return self._merge(mergeroot, inforoot,
4427 myroot, myebuild=myebuild, cleanup=cleanup,
4428 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
4430 def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
4431 mydbapi=None, prev_mtimes=None):
4434 self.vartree.dbapi._bump_mtime(self.mycpv)
4436 self.vartree.dbapi.plib_registry.load()
4437 self.vartree.dbapi.plib_registry.pruneNonExisting()
4438 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
4439 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
4441 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
4442 # fail-clean is enabled, and the success/die hooks have
4443 # already been called by _emerge.EbuildPhase (via
4444 # self._scheduler.dblinkEbuildPhase) prior to cleaning.
4445 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
4447 if retval == os.EX_OK:
4448 phase = 'success_hooks'
4452 if self._scheduler is None:
4453 _spawn_misc_sh(self.settings, [phase],
4456 self._scheduler.dblinkEbuildPhase(
4457 self, mydbapi, myebuild, phase)
4459 elog_process(self.mycpv, self.settings)
4461 if 'noclean' not in self.settings.features and \
4462 (retval == os.EX_OK or \
4463 'fail-clean' in self.settings.features):
4464 if myebuild is None:
4465 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
4467 if self._scheduler is None:
4468 doebuild(myebuild, "clean", myroot,
4469 self.settings, tree=self.treetype,
4470 mydbapi=mydbapi, vartree=self.vartree)
4472 self._scheduler.dblinkEbuildPhase(
4473 self, mydbapi, myebuild, "clean")
4476 self.vartree.dbapi.linkmap._clear_cache()
4478 self.vartree.dbapi._bump_mtime(self.mycpv)
4481 def getstring(self,name):
4482 "returns contents of a file with whitespace converted to spaces"
4483 if not os.path.exists(self.dbdir+"/"+name):
4485 mydata = codecs.open(
4486 _unicode_encode(os.path.join(self.dbdir, name),
4487 encoding=_encodings['fs'], errors='strict'),
4488 mode='r', encoding=_encodings['repo.content'], errors='replace'
4490 return " ".join(mydata)
4492 def copyfile(self,fname):
4493 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4495 def getfile(self,fname):
4496 if not os.path.exists(self.dbdir+"/"+fname):
4498 return codecs.open(_unicode_encode(os.path.join(self.dbdir, fname),
4499 encoding=_encodings['fs'], errors='strict'),
4500 mode='r', encoding=_encodings['repo.content'], errors='replace'
4503 def setfile(self,fname,data):
4505 if fname == 'environment.bz2' or not isinstance(data, basestring):
4506 kwargs['mode'] = 'wb'
4508 kwargs['mode'] = 'w'
4509 kwargs['encoding'] = _encodings['repo.content']
4510 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
4512 def getelements(self,ename):
4513 if not os.path.exists(self.dbdir+"/"+ename):
4515 mylines = codecs.open(_unicode_encode(
4516 os.path.join(self.dbdir, ename),
4517 encoding=_encodings['fs'], errors='strict'),
4518 mode='r', encoding=_encodings['repo.content'], errors='replace'
4522 for y in x[:-1].split():
4526 def setelements(self,mylist,ename):
4527 myelement = codecs.open(_unicode_encode(
4528 os.path.join(self.dbdir, ename),
4529 encoding=_encodings['fs'], errors='strict'),
4530 mode='w', encoding=_encodings['repo.content'],
4531 errors='backslashreplace')
4533 myelement.write(x+"\n")
4536 def isregular(self):
4537 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4538 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
4540 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
4541 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
4543 if not os.access(myroot, os.W_OK):
4544 writemsg(_("Permission denied: access('%s', W_OK)\n") % myroot,
4547 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
4548 vartree=vartree, blockers=blockers, scheduler=scheduler)
4549 return mylink.merge(pkgloc, infloc, myroot, myebuild,
4550 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
4552 def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None,
4553 ldpath_mtimes=None, scheduler=None):
4554 mylink = dblink(cat, pkg, myroot, mysettings, treetype="vartree",
4555 vartree=vartree, scheduler=scheduler)
4556 vartree = mylink.vartree
4560 vartree.dbapi.plib_registry.load()
4561 vartree.dbapi.plib_registry.pruneNonExisting()
4562 retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
4563 ldpath_mtimes=ldpath_mtimes)
4564 if retval == os.EX_OK:
4569 vartree.dbapi.linkmap._clear_cache()
4572 def write_contents(contents, root, f):
4574 Write contents to any file like object. The file will be left open.
4576 root_len = len(root) - 1
4577 for filename in sorted(contents):
4578 entry_data = contents[filename]
4579 entry_type = entry_data[0]
4580 relative_filename = filename[root_len:]
4581 if entry_type == "obj":
4582 entry_type, mtime, md5sum = entry_data
4583 line = "%s %s %s %s\n" % \
4584 (entry_type, relative_filename, md5sum, mtime)
4585 elif entry_type == "sym":
4586 entry_type, mtime, link = entry_data
4587 line = "%s %s -> %s %s\n" % \
4588 (entry_type, relative_filename, link, mtime)
4589 else: # dir, dev, fif
4590 line = "%s %s\n" % (entry_type, relative_filename)
4593 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4599 encoding=_encodings['merge'],
4601 except UnicodeEncodeError:
4602 # The package appears to have been merged with a
4603 # different value of sys.getfilesystemencoding(),
4604 # so fall back to utf_8 if appropriate.
4608 encoding=_encodings['fs'],
4610 except UnicodeEncodeError:
4615 from portage.util import normalize_path
4617 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4619 maxval = len(contents)
4622 onProgress(maxval, 0)
4623 paths = list(contents)
4628 lst = os.lstat(path)
4629 except OSError as e:
4630 if e.errno != errno.ENOENT:
4634 onProgress(maxval, curval)
4636 contents_type = contents[path][0]
4637 if path.startswith(root):
4638 arcname = path[len(root):]
4640 raise ValueError("invalid root argument: '%s'" % root)
4642 if 'dir' == contents_type and \
4643 not stat.S_ISDIR(lst.st_mode) and \
4644 os.path.isdir(live_path):
4645 # Even though this was a directory in the original ${D}, it exists
4646 # as a symlink to a directory in the live filesystem. It must be
4647 # recorded as a real directory in the tar file to ensure that tar
4648 # can properly extract it's children.
4649 live_path = os.path.realpath(live_path)
4650 tarinfo = tar.gettarinfo(live_path, arcname)
4652 if stat.S_ISREG(lst.st_mode):
4653 # break hardlinks due to bug #185305
4654 tarinfo.type = tarfile.REGTYPE
4655 if protect and protect(path):
4656 # Create an empty file as a place holder in order to avoid
4657 # potential collision-protect issues.
4658 f = tempfile.TemporaryFile()
4659 f.write(_unicode_encode(
4660 "# empty file because --include-config=n " + \
4661 "when `quickpkg` was used\n"))
4664 tarinfo.size = os.fstat(f.fileno()).st_size
4665 tar.addfile(tarinfo, f)
4668 f = open(_unicode_encode(path,
4669 encoding=object.__getattribute__(os, '_encoding'),
4670 errors='strict'), 'rb')
4672 tar.addfile(tarinfo, f)
4676 tar.addfile(tarinfo)
4678 onProgress(maxval, curval)