1 # Copyright 1998-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
14 'use_reduce,_slot_re',
15 'portage.elog:elog_process',
16 'portage.locks:lockdir,unlockdir',
17 'portage.output:bold,colorize',
18 'portage.package.ebuild.doebuild:doebuild_environment,' + \
20 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
21 'portage.update:fixdbentries',
22 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
23 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
24 'grabdict,normalize_path,new_protect_filename',
25 'portage.util.digraph:digraph',
26 'portage.util.env_update:env_update',
27 'portage.util.listdir:dircache,listdir',
28 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
29 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
30 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
34 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
35 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
36 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
37 from portage.dbapi import dbapi
38 from portage.exception import CommandNotFound, \
39 InvalidData, InvalidPackageName, \
40 FileNotFound, PermissionDenied, UnsupportedAPIException
41 from portage.localization import _
42 from portage.util.movefile import movefile
44 from portage import abssymlink, _movefile, bsd_chflags
46 # This is a special version of the os module, wrapped for unicode support.
47 from portage import os
48 from portage import _encodings
49 from portage import _os_merge
50 from portage import _selinux_merge
51 from portage import _unicode_decode
52 from portage import _unicode_encode
54 from _emerge.AsynchronousLock import AsynchronousLock
55 from _emerge.EbuildBuildDir import EbuildBuildDir
56 from _emerge.PollScheduler import PollScheduler
57 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
61 import re, shutil, stat, errno, subprocess
71 import cPickle as pickle
75 if sys.hexversion >= 0x3000000:
79 class vardbapi(dbapi):
81 _excluded_dirs = ["CVS", "lost+found"]
82 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
83 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
84 "|".join(_excluded_dirs) + r')$')
86 _aux_cache_version = "1"
87 _owners_cache_version = "1"
89 # Number of uncached packages to trigger cache update, since
90 # it's wasteful to update it for every vdb change.
91 _aux_cache_threshold = 5
93 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
94 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
96 def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
98 The categories parameter is unused since the dbapi class
99 now has a categories property that is generated from the
103 # Used by emerge to check whether any packages
104 # have been added or removed.
105 self._pkgs_changed = False
107 #cache for category directory mtimes
110 #cache for dependency checks
113 #cache for cp_list results
118 settings = portage.settings
119 self.settings = settings
120 self.root = settings['ROOT']
122 if _unused_param is not None and _unused_param != self.root:
123 warnings.warn("The first parameter of the " + \
124 "portage.dbapi.vartree.vardbapi" + \
125 " constructor is now unused. Use " + \
126 "settings['ROOT'] instead.",
127 DeprecationWarning, stacklevel=2)
129 self._eroot = settings['EROOT']
131 vartree = portage.db[self.root]["vartree"]
132 self.vartree = vartree
133 self._aux_cache_keys = set(
134 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
135 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
136 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
137 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
139 self._aux_cache_obj = None
140 self._aux_cache_filename = os.path.join(self._eroot,
141 CACHE_PATH, "vdb_metadata.pickle")
142 self._counter_path = os.path.join(self._eroot,
143 CACHE_PATH, "counter")
145 self._plib_registry = None
146 if _ENABLE_PRESERVE_LIBS:
148 self._plib_registry = PreservedLibsRegistry(self._eroot,
149 os.path.join(self._eroot, PRIVATE_PATH,
150 "preserved_libs_registry"))
151 except PermissionDenied:
152 # apparently this user isn't allowed to access PRIVATE_PATH
156 if _ENABLE_DYN_LINK_MAP:
157 self._linkmap = LinkageMap(self)
158 self._owners = self._owners_db(self)
160 def getpath(self, mykey, filename=None):
161 # This is an optimized hotspot, so don't use unicode-wrapped
162 # os module and don't use os.path.join().
163 rValue = self._eroot + VDB_PATH + _os.sep + mykey
164 if filename is not None:
165 # If filename is always relative, we can do just
166 # rValue += _os.sep + filename
167 rValue = _os.path.join(rValue, filename)
170 def _bump_mtime(self, cpv):
172 This is called before an after any modifications, so that consumers
173 can use directory mtimes to validate caches. See bug #290428.
175 base = self._eroot + VDB_PATH
176 cat = catsplit(cpv)[0]
177 catdir = base + _os.sep + cat
181 for x in (catdir, base):
186 def cpv_exists(self, mykey):
187 "Tells us whether an actual ebuild exists on disk (no masking)"
188 return os.path.exists(self.getpath(mykey))
190 def cpv_counter(self, mycpv):
191 "This method will grab the COUNTER. Returns a counter value."
193 return long(self.aux_get(mycpv, ["COUNTER"])[0])
194 except (KeyError, ValueError):
196 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
197 "resetting to value of 0\n") % (mycpv,),
198 level=logging.ERROR, noiselevel=-1)
201 def cpv_inject(self, mycpv):
202 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
203 ensure_dirs(self.getpath(mycpv))
204 counter = self.counter_tick(mycpv=mycpv)
205 # write local package counter so that emerge clean does the right thing
206 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
208 def isInjected(self, mycpv):
209 if self.cpv_exists(mycpv):
210 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
212 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
216 def move_ent(self, mylist, repo_match=None):
221 for atom in (origcp, newcp):
222 if not isjustname(atom):
223 raise InvalidPackageName(str(atom))
224 origmatches = self.match(origcp, use_cache=0)
228 for mycpv in origmatches:
229 mycpv_cp = cpv_getkey(mycpv)
230 if mycpv_cp != origcp:
231 # Ignore PROVIDE virtual match.
233 if repo_match is not None \
234 and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
236 mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
237 mynewcat = catsplit(newcp)[0]
238 origpath = self.getpath(mycpv)
239 if not os.path.exists(origpath):
242 if not os.path.exists(self.getpath(mynewcat)):
243 #create the directory
244 ensure_dirs(self.getpath(mynewcat))
245 newpath = self.getpath(mynewcpv)
246 if os.path.exists(newpath):
247 #dest already exists; keep this puppy where it is.
249 _movefile(origpath, newpath, mysettings=self.settings)
250 self._clear_pkg_cache(self._dblink(mycpv))
251 self._clear_pkg_cache(self._dblink(mynewcpv))
253 # We need to rename the ebuild now.
254 old_pf = catsplit(mycpv)[1]
255 new_pf = catsplit(mynewcpv)[1]
258 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
259 os.path.join(newpath, new_pf + ".ebuild"))
260 except EnvironmentError as e:
261 if e.errno != errno.ENOENT:
264 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
265 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
266 fixdbentries([mylist], newpath)
269 def cp_list(self, mycp, use_cache=1):
270 mysplit=catsplit(mycp)
271 if mysplit[0] == '*':
272 mysplit[0] = mysplit[0][1:]
274 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
277 if use_cache and mycp in self.cpcache:
278 cpc = self.cpcache[mycp]
281 cat_dir = self.getpath(mysplit[0])
283 dir_list = os.listdir(cat_dir)
284 except EnvironmentError as e:
285 if e.errno == PermissionDenied.errno:
286 raise PermissionDenied(cat_dir)
292 if self._excluded_dirs.match(x) is not None:
296 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
299 if ps[0] == mysplit[1]:
300 returnme.append(mysplit[0]+"/"+x)
301 self._cpv_sort_ascending(returnme)
303 self.cpcache[mycp] = [mystat, returnme[:]]
304 elif mycp in self.cpcache:
305 del self.cpcache[mycp]
308 def cpv_all(self, use_cache=1):
310 Set use_cache=0 to bypass the portage.cachedir() cache in cases
311 when the accuracy of mtime staleness checks should not be trusted
312 (generally this is only necessary in critical sections that
313 involve merge or unmerge of packages).
316 basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
319 from portage import listdir
321 def listdir(p, **kwargs):
323 return [x for x in os.listdir(p) \
324 if os.path.isdir(os.path.join(p, x))]
325 except EnvironmentError as e:
326 if e.errno == PermissionDenied.errno:
327 raise PermissionDenied(p)
331 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
332 if self._excluded_dirs.match(x) is not None:
334 if not self._category_re.match(x):
336 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
337 if self._excluded_dirs.match(y) is not None:
339 subpath = x + "/" + y
340 # -MERGING- should never be a cpv, nor should files.
342 if catpkgsplit(subpath) is None:
343 self.invalidentry(self.getpath(subpath))
346 self.invalidentry(self.getpath(subpath))
348 returnme.append(subpath)
352 def cp_all(self, use_cache=1):
353 mylist = self.cpv_all(use_cache=use_cache)
359 mysplit = catpkgsplit(y)
361 self.invalidentry(self.getpath(y))
364 self.invalidentry(self.getpath(y))
366 d[mysplit[0]+"/"+mysplit[1]] = None
369 def checkblockers(self, origdep):
372 def _clear_cache(self):
373 self.mtdircache.clear()
374 self.matchcache.clear()
376 self._aux_cache_obj = None
378 def _add(self, pkg_dblink):
379 self._pkgs_changed = True
380 self._clear_pkg_cache(pkg_dblink)
382 def _remove(self, pkg_dblink):
383 self._pkgs_changed = True
384 self._clear_pkg_cache(pkg_dblink)
386 def _clear_pkg_cache(self, pkg_dblink):
387 # Due to 1 second mtime granularity in <python-2.5, mtime checks
388 # are not always sufficient to invalidate vardbapi caches. Therefore,
389 # the caches need to be actively invalidated here.
390 self.mtdircache.pop(pkg_dblink.cat, None)
391 self.matchcache.pop(pkg_dblink.cat, None)
392 self.cpcache.pop(pkg_dblink.mysplit[0], None)
393 dircache.pop(pkg_dblink.dbcatdir, None)
395 def match(self, origdep, use_cache=1):
396 "caching match function"
398 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
399 mykey = dep_getkey(mydep)
400 mycat = catsplit(mykey)[0]
402 if mycat in self.matchcache:
403 del self.mtdircache[mycat]
404 del self.matchcache[mycat]
405 return list(self._iter_match(mydep,
406 self.cp_list(mydep.cp, use_cache=use_cache)))
408 curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
409 except (IOError, OSError):
412 if mycat not in self.matchcache or \
413 self.mtdircache[mycat] != curmtime:
415 self.mtdircache[mycat] = curmtime
416 self.matchcache[mycat] = {}
417 if mydep not in self.matchcache[mycat]:
418 mymatch = list(self._iter_match(mydep,
419 self.cp_list(mydep.cp, use_cache=use_cache)))
420 self.matchcache[mycat][mydep] = mymatch
421 return self.matchcache[mycat][mydep][:]
423 def findname(self, mycpv):
424 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
426 def flush_cache(self):
427 """If the current user has permission and the internal aux_get cache has
428 been updated, save it to disk and mark it unmodified. This is called
429 by emerge after it has loaded the full vdb for use in dependency
430 calculations. Currently, the cache is only written if the user has
431 superuser privileges (since that's required to obtain a lock), but all
432 users have read access and benefit from faster metadata lookups (as
433 long as at least part of the cache is still valid)."""
434 if self._aux_cache is not None and \
435 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
437 self._owners.populate() # index any unindexed contents
438 valid_nodes = set(self.cpv_all())
439 for cpv in list(self._aux_cache["packages"]):
440 if cpv not in valid_nodes:
441 del self._aux_cache["packages"][cpv]
442 del self._aux_cache["modified"]
444 f = atomic_ofstream(self._aux_cache_filename, 'wb')
445 pickle.dump(self._aux_cache, f, protocol=2)
447 apply_secpass_permissions(
448 self._aux_cache_filename, gid=portage_gid, mode=0o644)
449 except (IOError, OSError) as e:
451 self._aux_cache["modified"] = set()
454 def _aux_cache(self):
455 if self._aux_cache_obj is None:
456 self._aux_cache_init()
457 return self._aux_cache_obj
459 def _aux_cache_init(self):
462 if sys.hexversion >= 0x3000000:
463 # Buffered io triggers extreme performance issues in
464 # Unpickler.load() (problem observed with python-3.0.1).
465 # Unfortunately, performance is still poor relative to
466 # python-2.x, but buffering makes it much worse.
467 open_kwargs["buffering"] = 0
469 f = open(_unicode_encode(self._aux_cache_filename,
470 encoding=_encodings['fs'], errors='strict'),
471 mode='rb', **open_kwargs)
472 mypickle = pickle.Unpickler(f)
474 mypickle.find_global = None
475 except AttributeError:
476 # TODO: If py3k, override Unpickler.find_class().
478 aux_cache = mypickle.load()
481 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
482 if isinstance(e, pickle.UnpicklingError):
483 writemsg(_("!!! Error loading '%s': %s\n") % \
484 (self._aux_cache_filename, str(e)), noiselevel=-1)
487 if not aux_cache or \
488 not isinstance(aux_cache, dict) or \
489 aux_cache.get("version") != self._aux_cache_version or \
490 not aux_cache.get("packages"):
491 aux_cache = {"version": self._aux_cache_version}
492 aux_cache["packages"] = {}
494 owners = aux_cache.get("owners")
495 if owners is not None:
496 if not isinstance(owners, dict):
498 elif "version" not in owners:
500 elif owners["version"] != self._owners_cache_version:
502 elif "base_names" not in owners:
504 elif not isinstance(owners["base_names"], dict):
510 "version" : self._owners_cache_version
512 aux_cache["owners"] = owners
514 aux_cache["modified"] = set()
515 self._aux_cache_obj = aux_cache
517 def aux_get(self, mycpv, wants):
518 """This automatically caches selected keys that are frequently needed
519 by emerge for dependency calculations. The cached metadata is
520 considered valid if the mtime of the package directory has not changed
521 since the data was cached. The cache is stored in a pickled dict
522 object with the following format:
524 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
526 If an error occurs while loading the cache pickle or the version is
527 unrecognized, the cache will simple be recreated from scratch (it is
528 completely disposable).
530 cache_these_wants = self._aux_cache_keys.intersection(wants)
532 if self._aux_cache_keys_re.match(x) is not None:
533 cache_these_wants.add(x)
535 if not cache_these_wants:
536 return self._aux_get(mycpv, wants)
538 cache_these = set(self._aux_cache_keys)
539 cache_these.update(cache_these_wants)
541 mydir = self.getpath(mycpv)
544 mydir_stat = os.stat(mydir)
546 if e.errno != errno.ENOENT:
548 raise KeyError(mycpv)
549 mydir_mtime = mydir_stat[stat.ST_MTIME]
550 pkg_data = self._aux_cache["packages"].get(mycpv)
551 pull_me = cache_these.union(wants)
552 mydata = {"_mtime_" : mydir_mtime}
554 cache_incomplete = False
557 if pkg_data is not None:
558 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
561 cache_mtime, metadata = pkg_data
562 if not isinstance(cache_mtime, (long, int)) or \
563 not isinstance(metadata, dict):
567 cache_mtime, metadata = pkg_data
568 cache_valid = cache_mtime == mydir_mtime
570 # Migrate old metadata to unicode.
571 for k, v in metadata.items():
572 metadata[k] = _unicode_decode(v,
573 encoding=_encodings['repo.content'], errors='replace')
575 mydata.update(metadata)
576 pull_me.difference_update(mydata)
579 # pull any needed data and cache it
580 aux_keys = list(pull_me)
581 for k, v in zip(aux_keys,
582 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
584 if not cache_valid or cache_these.difference(metadata):
586 if cache_valid and metadata:
587 cache_data.update(metadata)
588 for aux_key in cache_these:
589 cache_data[aux_key] = mydata[aux_key]
590 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
591 self._aux_cache["modified"].add(mycpv)
593 if _slot_re.match(mydata['SLOT']) is None:
594 # Empty or invalid slot triggers InvalidAtom exceptions when
595 # generating slot atoms for packages, so translate it to '0' here.
596 mydata['SLOT'] = _unicode_decode('0')
598 return [mydata[x] for x in wants]
600 def _aux_get(self, mycpv, wants, st=None):
601 mydir = self.getpath(mycpv)
606 if e.errno == errno.ENOENT:
607 raise KeyError(mycpv)
608 elif e.errno == PermissionDenied.errno:
609 raise PermissionDenied(mydir)
612 if not stat.S_ISDIR(st.st_mode):
613 raise KeyError(mycpv)
617 results.append(st[stat.ST_MTIME])
621 _unicode_encode(os.path.join(mydir, x),
622 encoding=_encodings['fs'], errors='strict'),
623 mode='r', encoding=_encodings['repo.content'],
629 # Preserve \n for metadata that is known to
630 # contain multiple lines.
631 if self._aux_multi_line_re.match(x) is None:
632 myd = " ".join(myd.split())
634 myd = _unicode_decode('')
635 if x == "EAPI" and not myd:
636 results.append(_unicode_decode('0'))
641 def aux_update(self, cpv, values):
642 mylink = self._dblink(cpv)
643 if not mylink.exists():
645 self._bump_mtime(cpv)
646 self._clear_pkg_cache(mylink)
647 for k, v in values.items():
652 os.unlink(os.path.join(self.getpath(cpv), k))
653 except EnvironmentError:
655 self._bump_mtime(cpv)
657 def counter_tick(self, myroot=None, mycpv=None):
659 @param myroot: ignored, self._eroot is used instead
661 return self.counter_tick_core(incrementing=1, mycpv=mycpv)
663 def get_counter_tick_core(self, myroot=None, mycpv=None):
665 Use this method to retrieve the counter instead
666 of having to trust the value of a global counter
667 file that can lead to invalid COUNTER
668 generation. When cache is valid, the package COUNTER
669 files are not read and we rely on the timestamp of
670 the package directory to validate cache. The stat
671 calls should only take a short time, so performance
672 is sufficient without having to rely on a potentially
673 corrupt global counter file.
675 The global counter file located at
676 $CACHE_PATH/counter serves to record the
677 counter of the last installed package and
678 it also corresponds to the total number of
679 installation actions that have occurred in
680 the history of this package database.
682 @param myroot: ignored, self._eroot is used instead
685 cp_list = self.cp_list
687 for cp in self.cp_all():
688 for cpv in cp_list(cp):
690 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
691 except (KeyError, OverflowError, ValueError):
693 if counter > max_counter:
694 max_counter = counter
700 _unicode_encode(self._counter_path,
701 encoding=_encodings['fs'], errors='strict'),
702 mode='r', encoding=_encodings['repo.content'],
704 except EnvironmentError as e:
705 new_vdb = not bool(self.cpv_all())
707 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
708 self._counter_path, noiselevel=-1)
709 writemsg("!!! %s\n" % str(e), noiselevel=-1)
714 counter = long(cfile.readline().strip())
717 except (OverflowError, ValueError) as e:
718 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
719 self._counter_path, noiselevel=-1)
720 writemsg("!!! %s\n" % str(e), noiselevel=-1)
723 # We must ensure that we return a counter
724 # value that is at least as large as the
725 # highest one from the installed packages,
726 # since having a corrupt value that is too low
727 # can trigger incorrect AUTOCLEAN behavior due
728 # to newly installed packages having lower
729 # COUNTERs than the previous version in the
731 if counter > max_counter:
732 max_counter = counter
734 if counter < 0 and not new_vdb:
735 writemsg(_("!!! Initializing COUNTER to " \
736 "value of %d\n") % max_counter, noiselevel=-1)
738 return max_counter + 1
740 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
742 This method will grab the next COUNTER value and record it back
743 to the global file. Returns new counter value.
745 @param myroot: ignored, self._eroot is used instead
748 counter = self.get_counter_tick_core(mycpv=mycpv) - 1
752 # use same permissions as config._init_dirs()
753 ensure_dirs(os.path.dirname(self._counter_path),
754 gid=portage_gid, mode=0o2750, mask=0o2)
755 # update new global counter file
756 write_atomic(self._counter_path, str(counter))
759 def _dblink(self, cpv):
760 category, pf = catsplit(cpv)
761 return dblink(category, pf, settings=self.settings,
762 vartree=self.vartree, treetype="vartree")
764 def removeFromContents(self, pkg, paths, relative_paths=True):
766 @param pkg: cpv for an installed package
768 @param paths: paths of files to remove from contents
769 @type paths: iterable
771 if not hasattr(pkg, "getcontents"):
772 pkg = self._dblink(pkg)
774 root_len = len(root) - 1
775 new_contents = pkg.getcontents().copy()
778 for filename in paths:
779 filename = _unicode_decode(filename,
780 encoding=_encodings['content'], errors='strict')
781 filename = normalize_path(filename)
783 relative_filename = filename
785 relative_filename = filename[root_len:]
786 contents_key = pkg._match_contents(relative_filename)
788 del new_contents[contents_key]
792 self._bump_mtime(pkg.mycpv)
793 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
794 write_contents(new_contents, root, f)
796 self._bump_mtime(pkg.mycpv)
797 pkg._clear_contents_cache()
799 class _owners_cache(object):
801 This class maintains an hash table that serves to index package
802 contents by mapping the basename of file to a list of possible
803 packages that own it. This is used to optimize owner lookups
804 by narrowing the search down to a smaller number of packages.
807 from hashlib import md5 as _new_hash
809 from md5 import new as _new_hash
812 _hex_chars = int(_hash_bits / 4)
814 def __init__(self, vardb):
818 root_len = len(self._vardb._eroot)
819 contents = self._vardb._dblink(cpv).getcontents()
820 pkg_hash = self._hash_pkg(cpv)
822 # Empty path is a code used to represent empty contents.
823 self._add_path("", pkg_hash)
825 self._add_path(x[root_len:], pkg_hash)
826 self._vardb._aux_cache["modified"].add(cpv)
828 def _add_path(self, path, pkg_hash):
830 Empty path is a code that represents empty contents.
833 name = os.path.basename(path.rstrip(os.path.sep))
838 name_hash = self._hash_str(name)
839 base_names = self._vardb._aux_cache["owners"]["base_names"]
840 pkgs = base_names.get(name_hash)
843 base_names[name_hash] = pkgs
844 pkgs[pkg_hash] = None
846 def _hash_str(self, s):
848 # Always use a constant utf_8 encoding here, since
849 # the "default" encoding can change.
850 h.update(_unicode_encode(s,
851 encoding=_encodings['repo.content'],
852 errors='backslashreplace'))
854 h = h[-self._hex_chars:]
858 def _hash_pkg(self, cpv):
859 counter, mtime = self._vardb.aux_get(
860 cpv, ["COUNTER", "_mtime_"])
862 counter = int(counter)
865 return (cpv, counter, mtime)
867 class _owners_db(object):
869 def __init__(self, vardb):
875 def _populate(self, scheduler=None):
876 owners_cache = vardbapi._owners_cache(self._vardb)
877 cached_hashes = set()
878 base_names = self._vardb._aux_cache["owners"]["base_names"]
880 # Take inventory of all cached package hashes.
881 for name, hash_values in list(base_names.items()):
882 if not isinstance(hash_values, dict):
885 cached_hashes.update(hash_values)
887 # Create sets of valid package hashes and uncached packages.
888 uncached_pkgs = set()
889 hash_pkg = owners_cache._hash_pkg
890 valid_pkg_hashes = set()
891 for cpv in self._vardb.cpv_all():
892 hash_value = hash_pkg(cpv)
893 valid_pkg_hashes.add(hash_value)
894 if hash_value not in cached_hashes:
895 uncached_pkgs.add(cpv)
897 # Cache any missing packages.
898 for cpv in uncached_pkgs:
900 if scheduler is not None:
901 scheduler.scheduleYield()
903 owners_cache.add(cpv)
905 # Delete any stale cache.
906 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
908 for base_name_hash, bucket in list(base_names.items()):
909 for hash_value in stale_hashes.intersection(bucket):
910 del bucket[hash_value]
912 del base_names[base_name_hash]
916 def get_owners(self, path_iter, scheduler=None):
918 @return the owners as a dblink -> set(files) mapping.
921 for owner, f in self.iter_owners(path_iter, scheduler=scheduler):
922 owned_files = owners.get(owner)
923 if owned_files is None:
925 owners[owner] = owned_files
929 def getFileOwnerMap(self, path_iter):
930 owners = self.get_owners(path_iter)
932 for pkg_dblink, files in owners.items():
934 owner_set = file_owners.get(f)
935 if owner_set is None:
937 file_owners[f] = owner_set
938 owner_set.add(pkg_dblink)
941 def iter_owners(self, path_iter, scheduler=None):
943 Iterate over tuples of (dblink, path). In order to avoid
944 consuming too many resources for too much time, resources
945 are only allocated for the duration of a given iter_owners()
946 call. Therefore, to maximize reuse of resources when searching
947 for multiple files, it's best to search for them all in a single
951 if not isinstance(path_iter, list):
952 path_iter = list(path_iter)
954 if len(path_iter) > 10:
955 for x in self._iter_owners_low_mem(path_iter,
956 scheduler=scheduler):
960 owners_cache = self._populate(scheduler=scheduler)
964 hash_pkg = owners_cache._hash_pkg
965 hash_str = owners_cache._hash_str
966 base_names = self._vardb._aux_cache["owners"]["base_names"]
971 x = dblink_cache.get(cpv)
973 if len(dblink_cache) > 20:
974 # Ensure that we don't run out of memory.
975 raise StopIteration()
976 x = self._vardb._dblink(cpv)
977 dblink_cache[cpv] = x
982 path = path_iter.pop()
983 is_basename = os.sep != path[:1]
987 name = os.path.basename(path.rstrip(os.path.sep))
992 name_hash = hash_str(name)
993 pkgs = base_names.get(name_hash)
997 for hash_value in pkgs:
998 if not isinstance(hash_value, tuple) or \
999 len(hash_value) != 3:
1001 cpv, counter, mtime = hash_value
1002 if not isinstance(cpv, basestring):
1005 current_hash = hash_pkg(cpv)
1009 if current_hash != hash_value:
1013 for p in dblink(cpv).getcontents():
1014 if os.path.basename(p) == name:
1015 owners.append((cpv, p[len(root):]))
1017 if dblink(cpv).isowner(path):
1018 owners.append((cpv, path))
1020 if scheduler is not None:
1021 scheduler.scheduleYield()
1023 except StopIteration:
1024 path_iter.append(path)
1026 dblink_cache.clear()
1028 for x in self._iter_owners_low_mem(path_iter,
1029 scheduler=scheduler):
1033 for cpv, p in owners:
1034 yield (dblink(cpv), p)
1036 def _iter_owners_low_mem(self, path_list, scheduler=None):
1038 This implemention will make a short-lived dblink instance (and
1039 parse CONTENTS) for every single installed package. This is
1040 slower and but uses less memory than the method which uses the
1048 for path in path_list:
1049 is_basename = os.sep != path[:1]
1053 name = os.path.basename(path.rstrip(os.path.sep))
1054 path_info_list.append((path, name, is_basename))
1056 root = self._vardb._eroot
1057 for cpv in self._vardb.cpv_all():
1059 if scheduler is not None:
1060 scheduler.scheduleYield()
1062 dblnk = self._vardb._dblink(cpv)
1064 for path, name, is_basename in path_info_list:
1066 for p in dblnk.getcontents():
1067 if os.path.basename(p) == name:
1068 yield dblnk, p[len(root):]
1070 if dblnk.isowner(path):
1073 class vartree(object):
1074 "this tree will scan a var/db/pkg database located at root (passed to init)"
1075 def __init__(self, root=None, virtual=None, categories=None,
1078 if settings is None:
1079 settings = portage.settings
1080 self.root = settings['ROOT']
1082 if root is not None and root != self.root:
1083 warnings.warn("The 'root' parameter of the " + \
1084 "portage.dbapi.vartree.vartree" + \
1085 " constructor is now unused. Use " + \
1086 "settings['ROOT'] instead.",
1087 DeprecationWarning, stacklevel=2)
1089 self.settings = settings
1090 self.dbapi = vardbapi(settings=settings, vartree=self)
1093 def getpath(self, mykey, filename=None):
1094 return self.dbapi.getpath(mykey, filename=filename)
1096 def zap(self, mycpv):
1099 def inject(self, mycpv):
1102 def get_provide(self, mycpv):
1106 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1108 myuse = myuse.split()
1109 mylines = use_reduce(mylines, uselist=myuse, flat=True)
1110 for myprovide in mylines:
1111 mys = catpkgsplit(myprovide)
1113 mys = myprovide.split("/")
1114 myprovides += [mys[0] + "/" + mys[1]]
1116 except SystemExit as e:
1118 except Exception as e:
1119 mydir = self.dbapi.getpath(mycpv)
1120 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1123 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1125 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1128 def get_all_provides(self):
1130 for node in self.getallcpv():
1131 for mykey in self.get_provide(node):
1132 if mykey in myprovides:
1133 myprovides[mykey] += [node]
1135 myprovides[mykey] = [node]
1138 def dep_bestmatch(self, mydep, use_cache=1):
1139 "compatibility method -- all matches, not just visible ones"
1140 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1141 mymatch = best(self.dbapi.match(
1142 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1143 use_cache=use_cache))
1149 def dep_match(self, mydep, use_cache=1):
1150 "compatibility method -- we want to see all matches, not just visible ones"
1151 #mymatch = match(mydep,self.dbapi)
1152 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1158 def exists_specific(self, cpv):
1159 return self.dbapi.cpv_exists(cpv)
1161 def getallcpv(self):
1162 """temporary function, probably to be renamed --- Gets a list of all
1163 category/package-versions installed on the system."""
1164 return self.dbapi.cpv_all()
1166 def getallnodes(self):
1167 """new behavior: these are all *unmasked* nodes. There may or may not be available
1168 masked package for nodes in this nodes list."""
1169 return self.dbapi.cp_all()
1171 def getebuildpath(self, fullpackage):
1172 cat, package = catsplit(fullpackage)
1173 return self.getpath(fullpackage, filename=package+".ebuild")
1175 def getslot(self, mycatpkg):
1176 "Get a slot for a catpkg; assume it exists."
1178 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1185 class dblink(object):
1187 This class provides an interface to the installed package database
1188 At present this is implemented as a text backend in /var/db/pkg.
1192 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1194 _contents_re = re.compile(r'^(' + \
1195 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1196 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1197 r'(?P<sym>(sym) (.+) -> (.+) (\d+))' + \
1201 # When looping over files for merge/unmerge, temporarily yield to the
1202 # scheduler each time this many files are processed.
1203 _file_merge_yield_interval = 20
1205 def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
1206 vartree=None, blockers=None, scheduler=None):
1208 Creates a DBlink object for a given CPV.
1209 The given CPV may not be present in the database already.
1211 @param cat: Category
1213 @param pkg: Package (PV)
1215 @param myroot: ignored, settings['ROOT'] is used instead
1216 @type myroot: String (Path)
1217 @param settings: Typically portage.settings
1218 @type settings: portage.config
1219 @param treetype: one of ['porttree','bintree','vartree']
1220 @type treetype: String
1221 @param vartree: an instance of vartree corresponding to myroot.
1222 @type vartree: vartree
1225 if settings is None:
1226 raise TypeError("settings argument is required")
1228 mysettings = settings
1229 myroot = settings['ROOT']
1232 self.mycpv = self.cat + "/" + self.pkg
1233 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1234 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1235 self.treetype = treetype
1237 vartree = portage.db[myroot]["vartree"]
1238 self.vartree = vartree
1239 self._blockers = blockers
1240 self._scheduler = scheduler
1242 # WARNING: EROOT support is experimental and may be incomplete
1243 # for cases in which EPREFIX is non-empty.
1244 self._eroot = mysettings['EROOT']
1245 self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
1246 self.dbcatdir = self.dbroot+"/"+cat
1247 self.dbpkgdir = self.dbcatdir+"/"+pkg
1248 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1249 self.dbdir = self.dbpkgdir
1251 self._lock_vdb = None
1253 self.settings = mysettings
1254 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1257 self._installed_instance = None
1258 self.contentscache = None
1259 self._contents_inodes = None
1260 self._contents_basenames = None
1261 self._linkmap_broken = False
1262 self._md5_merge_map = {}
1263 self._hash_key = (self.myroot, self.mycpv)
1264 self._protect_obj = None
1267 return hash(self._hash_key)
1269 def __eq__(self, other):
1270 return isinstance(other, dblink) and \
1271 self._hash_key == other._hash_key
1273 def _get_protect_obj(self):
1275 if self._protect_obj is None:
1276 self._protect_obj = ConfigProtect(self._eroot,
1277 portage.util.shlex_split(
1278 self.settings.get("CONFIG_PROTECT", "")),
1279 portage.util.shlex_split(
1280 self.settings.get("CONFIG_PROTECT_MASK", "")))
1282 return self._protect_obj
1284 def isprotected(self, obj):
1285 return self._get_protect_obj().isprotected(obj)
1287 def updateprotect(self):
1288 self._get_protect_obj().updateprotect()
1292 raise AssertionError("Lock already held.")
1293 # At least the parent needs to exist for the lock file.
1294 ensure_dirs(self.dbroot)
1295 if self._scheduler is None:
1296 self._lock_vdb = lockdir(self.dbroot)
1298 async_lock = AsynchronousLock(path=self.dbroot,
1299 scheduler=self._scheduler)
1302 self._lock_vdb = async_lock.lock_obj
1306 unlockdir(self._lock_vdb)
1307 self._lock_vdb = None
1310 "return path to location of db information (for >>> informational display)"
1314 "does the db entry exist? boolean."
1315 return os.path.exists(self.dbdir)
1319 Remove this entry from the database
1321 if not os.path.exists(self.dbdir):
1324 # Check validity of self.dbdir before attempting to remove it.
1325 if not self.dbdir.startswith(self.dbroot):
1326 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1327 self.dbdir, noiselevel=-1)
1330 shutil.rmtree(self.dbdir)
1331 # If empty, remove parent category directory.
1333 os.rmdir(os.path.dirname(self.dbdir))
1336 self.vartree.dbapi._remove(self)
1338 def clearcontents(self):
1340 For a given db entry (self), erase the CONTENTS values.
1342 if os.path.exists(self.dbdir+"/CONTENTS"):
1343 os.unlink(self.dbdir+"/CONTENTS")
1345 def _clear_contents_cache(self):
1346 self.contentscache = None
1347 self._contents_inodes = None
1348 self._contents_basenames = None
1350 def getcontents(self):
1352 Get the installed files of a given package (aka what that package installed)
1354 contents_file = os.path.join(self.dbdir, "CONTENTS")
1355 if self.contentscache is not None:
1356 return self.contentscache
1359 myc = codecs.open(_unicode_encode(contents_file,
1360 encoding=_encodings['fs'], errors='strict'),
1361 mode='r', encoding=_encodings['repo.content'],
1363 except EnvironmentError as e:
1364 if e.errno != errno.ENOENT:
1367 self.contentscache = pkgfiles
1369 mylines = myc.readlines()
1372 normalize_needed = self._normalize_needed
1373 contents_re = self._contents_re
1374 obj_index = contents_re.groupindex['obj']
1375 dir_index = contents_re.groupindex['dir']
1376 sym_index = contents_re.groupindex['sym']
1377 # CONTENTS files already contain EPREFIX
1378 myroot = self.settings['ROOT']
1379 if myroot == os.path.sep:
1383 for pos, line in enumerate(mylines):
1384 if null_byte in line:
1385 # Null bytes are a common indication of corruption.
1386 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
1388 line = line.rstrip("\n")
1389 m = contents_re.match(line)
1391 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
1394 if m.group(obj_index) is not None:
1396 #format: type, mtime, md5sum
1397 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1398 elif m.group(dir_index) is not None:
1401 data = (m.group(base+1),)
1402 elif m.group(sym_index) is not None:
1404 #format: type, mtime, dest
1405 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1407 # This won't happen as long the regular expression
1408 # is written to only match valid entries.
1409 raise AssertionError(_("required group not found " + \
1410 "in CONTENTS entry: '%s'") % line)
1412 path = m.group(base+2)
1413 if normalize_needed.search(path) is not None:
1414 path = normalize_path(path)
1415 if not path.startswith(os.path.sep):
1416 path = os.path.sep + path
1418 if myroot is not None:
1419 path = os.path.join(myroot, path.lstrip(os.path.sep))
1421 pkgfiles[path] = data
1424 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
1425 for pos, e in errors:
1426 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
1427 self.contentscache = pkgfiles
1430 def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
1431 ldpath_mtimes=None, others_in_slot=None):
1434 Unmerges a given package (CPV)
1439 @param pkgfiles: files to unmerge (generally self.getcontents() )
1440 @type pkgfiles: Dictionary
1441 @param trimworld: Unused
1442 @type trimworld: Boolean
1443 @param cleanup: cleanup to pass to doebuild (see doebuild)
1444 @type cleanup: Boolean
1445 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1446 @type ldpath_mtimes: Dictionary
1447 @param others_in_slot: all dblink instances in this slot, excluding self
1448 @type others_in_slot: list
1451 1. os.EX_OK if everything went well.
1452 2. return code of the failed phase (for prerm, postrm, cleanrm)
1455 The caller must ensure that lockdb() and unlockdb() are called
1456 before and after this method.
1459 if trimworld is not None:
1460 warnings.warn("The trimworld parameter of the " + \
1461 "portage.dbapi.vartree.dblink.unmerge()" + \
1462 " method is now unused.",
1463 DeprecationWarning, stacklevel=2)
1465 self.vartree.dbapi._bump_mtime(self.mycpv)
1466 showMessage = self._display_merge
1467 if self.vartree.dbapi._categories is not None:
1468 self.vartree.dbapi._categories = None
1469 # When others_in_slot is supplied, the security check has already been
1470 # done for this slot, so it shouldn't be repeated until the next
1471 # replacement or unmerge operation.
1472 if others_in_slot is None:
1473 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1474 slot_matches = self.vartree.dbapi.match(
1475 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1477 for cur_cpv in slot_matches:
1478 if cur_cpv == self.mycpv:
1480 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1481 settings=self.settings, vartree=self.vartree,
1482 treetype="vartree"))
1484 retval = self._security_check([self] + others_in_slot)
1488 contents = self.getcontents()
1489 # Now, don't assume that the name of the ebuild is the same as the
1490 # name of the dir; the package may have been moved.
1492 ebuild_phase = "prerm"
1494 mystuff = os.listdir(self.dbdir)
1496 if x.endswith(".ebuild"):
1497 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1498 if x[:-7] != self.pkg:
1499 # Clean up after vardbapi.move_ent() breakage in
1500 # portage versions before 2.1.2
1501 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1502 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1505 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1508 doebuild_environment(myebuildpath, "prerm",
1509 settings=self.settings, db=self.vartree.dbapi)
1510 except UnsupportedAPIException as e:
1511 # Sometimes this happens due to corruption of the EAPI file.
1512 writemsg(_("!!! FAILED prerm: %s\n") % \
1513 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1514 writemsg("%s\n" % str(e), noiselevel=-1)
1517 builddir_lock = None
1518 scheduler = self._scheduler
1523 builddir_lock = EbuildBuildDir(
1524 scheduler=(scheduler or PollScheduler().sched_iface),
1525 settings=self.settings)
1526 builddir_lock.lock()
1528 prepare_build_dirs(settings=self.settings, cleanup=True)
1529 log_path = self.settings.get("PORTAGE_LOG_FILE")
1531 if scheduler is None:
1532 retval = _spawn_phase('prerm', self.settings)
1534 retval = scheduler.dblinkEbuildPhase(
1535 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1537 # XXX: Decide how to handle failures here.
1538 if retval != os.EX_OK:
1540 writemsg(_("!!! FAILED prerm: %s\n") % retval, noiselevel=-1)
1542 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1543 self._clear_contents_cache()
1545 # Remove the registration of preserved libs for this pkg instance
1546 plib_registry = self.vartree.dbapi._plib_registry
1547 if plib_registry is None:
1548 # preserve-libs is entirely disabled
1551 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
1552 self.vartree.dbapi.cpv_counter(self.mycpv))
1555 ebuild_phase = "postrm"
1556 if scheduler is None:
1557 retval = _spawn_phase(ebuild_phase, self.settings)
1559 retval = scheduler.dblinkEbuildPhase(
1560 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1562 # XXX: Decide how to handle failures here.
1563 if retval != os.EX_OK:
1565 writemsg(_("!!! FAILED postrm: %s\n") % retval, noiselevel=-1)
1567 # Skip this if another package in the same slot has just been
1568 # merged on top of this package, since the other package has
1569 # already called LinkageMap.rebuild() and passed it's NEEDED file
1570 # in as an argument.
1571 if not others_in_slot:
1572 self._linkmap_rebuild(exclude_pkgs=(self.mycpv,))
1574 # remove preserved libraries that don't have any consumers left
1575 cpv_lib_map = self._find_unused_preserved_libs()
1577 self._remove_preserved_libs(cpv_lib_map)
1578 for cpv, removed in cpv_lib_map.items():
1579 if not self.vartree.dbapi.cpv_exists(cpv):
1580 for dblnk in others_in_slot:
1581 if dblnk.mycpv == cpv:
1582 # This one just got merged so it doesn't
1583 # register with cpv_exists() yet.
1584 self.vartree.dbapi.removeFromContents(
1588 self.vartree.dbapi.removeFromContents(cpv, removed)
1590 # Prune any preserved libs that may have
1591 # been unmerged with this package.
1592 if plib_registry is None:
1593 # preserve-libs is entirely disabled
1596 plib_registry.pruneNonExisting()
1599 self.vartree.dbapi._bump_mtime(self.mycpv)
1603 if retval != os.EX_OK:
1605 msg = _("The '%(ebuild_phase)s' "
1606 "phase of the '%(cpv)s' package "
1607 "has failed with exit value %(retval)s.") % \
1608 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
1610 from textwrap import wrap
1611 msg_lines.extend(wrap(msg, 72))
1612 msg_lines.append("")
1614 ebuild_name = os.path.basename(myebuildpath)
1615 ebuild_dir = os.path.dirname(myebuildpath)
1616 msg = _("The problem occurred while executing "
1617 "the ebuild file named '%(ebuild_name)s' "
1618 "located in the '%(ebuild_dir)s' directory. "
1619 "If necessary, manually remove "
1620 "the environment.bz2 file and/or the "
1621 "ebuild file located in that directory.") % \
1622 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
1623 msg_lines.extend(wrap(msg, 72))
1624 msg_lines.append("")
1627 "of the environment.bz2 file is "
1628 "preferred since it may allow the "
1629 "removal phases to execute successfully. "
1630 "The ebuild will be "
1631 "sourced and the eclasses "
1632 "from the current portage tree will be used "
1633 "when necessary. Removal of "
1634 "the ebuild file will cause the "
1635 "pkg_prerm() and pkg_postrm() removal "
1636 "phases to be skipped entirely.")
1637 msg_lines.extend(wrap(msg, 72))
1639 self._eerror(ebuild_phase, msg_lines)
1641 # process logs created during pre/postrm
1642 elog_process(self.mycpv, self.settings)
1644 if retval == os.EX_OK:
1645 # myebuildpath might be None, so ensure
1646 # it has a sane value for the clean phase,
1647 # even though it won't really be sourced.
1648 myebuildpath = os.path.join(self.dbdir,
1649 self.pkg + ".ebuild")
1650 doebuild_environment(myebuildpath, "cleanrm",
1651 settings=self.settings, db=self.vartree.dbapi)
1652 if scheduler is None:
1653 _spawn_phase("cleanrm", self.settings)
1655 scheduler.dblinkEbuildPhase(
1656 self, self.vartree.dbapi,
1657 myebuildpath, "cleanrm")
1659 if builddir_lock is not None:
1660 builddir_lock.unlock()
1662 if log_path is not None:
1664 if not failures and 'unmerge-logs' not in self.settings.features:
1671 st = os.stat(log_path)
1681 if log_path is not None and os.path.exists(log_path):
1682 # Restore this since it gets lost somewhere above and it
1683 # needs to be set for _display_merge() to be able to log.
1684 # Note that the log isn't necessarily supposed to exist
1685 # since if PORT_LOGDIR is unset then it's a temp file
1686 # so it gets cleaned above.
1687 self.settings["PORTAGE_LOG_FILE"] = log_path
1689 self.settings.pop("PORTAGE_LOG_FILE", None)
1691 env_update(target_root=self.settings['ROOT'],
1692 prev_mtimes=ldpath_mtimes,
1693 contents=contents, env=self.settings.environ(),
1694 writemsg_level=self._display_merge)
1697 def _display_merge(self, msg, level=0, noiselevel=0):
1698 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1700 if self._scheduler is not None:
1701 self._scheduler.dblinkDisplayMerge(self, msg,
1702 level=level, noiselevel=noiselevel)
1704 writemsg_level(msg, level=level, noiselevel=noiselevel)
1706 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1709 Unmerges the contents of a package from the liveFS
1710 Removes the VDB entry for self
1712 @param pkgfiles: typically self.getcontents()
1713 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1714 @param others_in_slot: all dblink instances in this slot, excluding self
1715 @type others_in_slot: list
1720 perf_md5 = perform_md5
1721 showMessage = self._display_merge
1722 scheduler = self._scheduler
1725 showMessage(_("No package files given... Grabbing a set.\n"))
1726 pkgfiles = self.getcontents()
1728 if others_in_slot is None:
1730 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1731 slot_matches = self.vartree.dbapi.match(
1732 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1733 for cur_cpv in slot_matches:
1734 if cur_cpv == self.mycpv:
1736 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1737 settings=self.settings,
1738 vartree=self.vartree, treetype="vartree"))
1740 dest_root = self._eroot
1741 dest_root_len = len(dest_root) - 1
1743 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1744 cfgfiledict = grabdict(conf_mem_file)
1747 unmerge_orphans = "unmerge-orphans" in self.settings.features
1748 calc_prelink = "prelink-checksums" in self.settings.features
1751 self.updateprotect()
1752 mykeys = list(pkgfiles)
1756 #process symlinks second-to-last, directories last.
1758 ignored_unlink_errnos = (
1759 errno.EBUSY, errno.ENOENT,
1760 errno.ENOTDIR, errno.EISDIR)
1761 ignored_rmdir_errnos = (
1762 errno.EEXIST, errno.ENOTEMPTY,
1763 errno.EBUSY, errno.ENOENT,
1764 errno.ENOTDIR, errno.EISDIR)
1765 modprotect = os.path.join(self._eroot, "lib/modules/")
1767 def unlink(file_name, lstatobj):
1769 if lstatobj.st_flags != 0:
1770 bsd_chflags.lchflags(file_name, 0)
1771 parent_name = os.path.dirname(file_name)
1772 # Use normal stat/chflags for the parent since we want to
1773 # follow any symlinks to the real parent directory.
1774 pflags = os.stat(parent_name).st_flags
1776 bsd_chflags.chflags(parent_name, 0)
1778 if not stat.S_ISLNK(lstatobj.st_mode):
1779 # Remove permissions to ensure that any hardlinks to
1780 # suid/sgid files are rendered harmless.
1781 os.chmod(file_name, 0)
1782 os.unlink(file_name)
1783 except OSError as ose:
1784 # If the chmod or unlink fails, you are in trouble.
1785 # With Prefix this can be because the file is owned
1786 # by someone else (a screwup by root?), on a normal
1787 # system maybe filesystem corruption. In any case,
1788 # if we backtrace and die here, we leave the system
1789 # in a totally undefined state, hence we just bleed
1790 # like hell and continue to hopefully finish all our
1791 # administrative and pkg_postinst stuff.
1792 self._eerror("postrm",
1793 ["Could not chmod or unlink '%s': %s" % \
1796 if bsd_chflags and pflags != 0:
1797 # Restore the parent flags we saved before unlinking
1798 bsd_chflags.chflags(parent_name, pflags)
1800 def show_unmerge(zing, desc, file_type, file_name):
1801 showMessage("%s %s %s %s\n" % \
1802 (zing, desc.ljust(8), file_type, file_name))
1805 unmerge_desc["cfgpro"] = _("cfgpro")
1806 unmerge_desc["replaced"] = _("replaced")
1807 unmerge_desc["!dir"] = _("!dir")
1808 unmerge_desc["!empty"] = _("!empty")
1809 unmerge_desc["!fif"] = _("!fif")
1810 unmerge_desc["!found"] = _("!found")
1811 unmerge_desc["!md5"] = _("!md5")
1812 unmerge_desc["!mtime"] = _("!mtime")
1813 unmerge_desc["!obj"] = _("!obj")
1814 unmerge_desc["!sym"] = _("!sym")
1816 real_root = self.settings['ROOT']
1817 real_root_len = len(real_root) - 1
1819 for i, objkey in enumerate(mykeys):
1821 if scheduler is not None and \
1822 0 == i % self._file_merge_yield_interval:
1823 scheduler.scheduleYield()
1825 obj = normalize_path(objkey)
1828 _unicode_encode(obj,
1829 encoding=_encodings['merge'], errors='strict')
1830 except UnicodeEncodeError:
1831 # The package appears to have been merged with a
1832 # different value of sys.getfilesystemencoding(),
1833 # so fall back to utf_8 if appropriate.
1835 _unicode_encode(obj,
1836 encoding=_encodings['fs'], errors='strict')
1837 except UnicodeEncodeError:
1841 perf_md5 = portage.checksum.perform_md5
1843 file_data = pkgfiles[objkey]
1844 file_type = file_data[0]
1847 statobj = os.stat(obj)
1852 lstatobj = os.lstat(obj)
1853 except (OSError, AttributeError):
1855 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1856 if lstatobj is None:
1857 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
1859 # don't use EROOT, CONTENTS entries already contain EPREFIX
1860 if obj.startswith(real_root):
1861 relative_path = obj[real_root_len:]
1863 for dblnk in others_in_slot:
1864 if dblnk.isowner(relative_path):
1868 # A new instance of this package claims the file, so
1870 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
1872 elif relative_path in cfgfiledict:
1873 stale_confmem.append(relative_path)
1874 # next line includes a tweak to protect modules from being unmerged,
1875 # but we don't protect modules from being overwritten if they are
1876 # upgraded. We effectively only want one half of the config protection
1877 # functionality for /lib/modules. For portage-ng both capabilities
1878 # should be able to be independently specified.
1879 # TODO: For rebuilds, re-parent previous modules to the new
1880 # installed instance (so they are not orphans). For normal
1881 # uninstall (not rebuild/reinstall), remove the modules along
1882 # with all other files (leave no orphans).
1883 if obj.startswith(modprotect):
1884 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
1887 # Don't unlink symlinks to directories here since that can
1888 # remove /lib and /usr/lib symlinks.
1889 if unmerge_orphans and \
1890 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1891 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1892 not self.isprotected(obj):
1894 unlink(obj, lstatobj)
1895 except EnvironmentError as e:
1896 if e.errno not in ignored_unlink_errnos:
1899 show_unmerge("<<<", "", file_type, obj)
1902 lmtime = str(lstatobj[stat.ST_MTIME])
1903 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
1904 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
1907 if pkgfiles[objkey][0] == "dir":
1908 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
1909 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
1912 elif pkgfiles[objkey][0] == "sym":
1914 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
1916 # Go ahead and unlink symlinks to directories here when
1917 # they're actually recorded as symlinks in the contents.
1918 # Normally, symlinks such as /lib -> lib64 are not recorded
1919 # as symlinks in the contents of a package. If a package
1920 # installs something into ${D}/lib/, it is recorded in the
1921 # contents as a directory even if it happens to correspond
1922 # to a symlink when it's merged to the live filesystem.
1924 unlink(obj, lstatobj)
1925 show_unmerge("<<<", "", file_type, obj)
1926 except (OSError, IOError) as e:
1927 if e.errno not in ignored_unlink_errnos:
1930 show_unmerge("!!!", "", file_type, obj)
1931 elif pkgfiles[objkey][0] == "obj":
1932 if statobj is None or not stat.S_ISREG(statobj.st_mode):
1933 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
1937 mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
1938 except FileNotFound as e:
1939 # the file has disappeared between now and our stat call
1940 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
1943 # string.lower is needed because db entries used to be in upper-case. The
1944 # string.lower allows for backwards compatibility.
1945 if mymd5 != pkgfiles[objkey][2].lower():
1946 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
1949 unlink(obj, lstatobj)
1950 except (OSError, IOError) as e:
1951 if e.errno not in ignored_unlink_errnos:
1954 show_unmerge("<<<", "", file_type, obj)
1955 elif pkgfiles[objkey][0] == "fif":
1956 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
1957 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
1959 show_unmerge("---", "", file_type, obj)
1960 elif pkgfiles[objkey][0] == "dev":
1961 show_unmerge("---", "", file_type, obj)
1969 lstatobj = os.lstat(obj)
1970 if lstatobj.st_flags != 0:
1971 bsd_chflags.lchflags(obj, 0)
1972 parent_name = os.path.dirname(obj)
1973 # Use normal stat/chflags for the parent since we want to
1974 # follow any symlinks to the real parent directory.
1975 pflags = os.stat(parent_name).st_flags
1977 bsd_chflags.chflags(parent_name, 0)
1981 if bsd_chflags and pflags != 0:
1982 # Restore the parent flags we saved before unlinking
1983 bsd_chflags.chflags(parent_name, pflags)
1984 show_unmerge("<<<", "", "dir", obj)
1985 except EnvironmentError as e:
1986 if e.errno not in ignored_rmdir_errnos:
1988 if e.errno != errno.ENOENT:
1989 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
1992 # Remove stale entries from config memory.
1994 for filename in stale_confmem:
1995 del cfgfiledict[filename]
1996 writedict(cfgfiledict, conf_mem_file)
1998 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1999 self.vartree.zap(self.mycpv)
2001 def isowner(self, filename, destroot=None):
2003 Check if a file belongs to this package. This may
2004 result in a stat call for the parent directory of
2005 every installed file, since the inode numbers are
2006 used to work around the problem of ambiguous paths
2007 caused by symlinked directories. The results of
2008 stat calls are cached to optimize multiple calls
2017 1. True if this package owns the file.
2018 2. False if this package does not own the file.
2021 if destroot is not None and destroot != self._eroot:
2022 warnings.warn("The second parameter of the " + \
2023 "portage.dbapi.vartree.dblink.isowner()" + \
2024 " is now unused. Instead " + \
2025 "self.settings['EROOT'] will be used.",
2026 DeprecationWarning, stacklevel=2)
2028 return bool(self._match_contents(filename))
2030 def _match_contents(self, filename, destroot=None):
2032 The matching contents entry is returned, which is useful
2033 since the path may differ from the one given by the caller,
2037 @return: the contents entry corresponding to the given path, or False
2038 if the file is not owned by this package.
2041 filename = _unicode_decode(filename,
2042 encoding=_encodings['content'], errors='strict')
2044 if destroot is not None and destroot != self._eroot:
2045 warnings.warn("The second parameter of the " + \
2046 "portage.dbapi.vartree.dblink._match_contents()" + \
2047 " is now unused. Instead " + \
2048 "self.settings['EROOT'] will be used.",
2049 DeprecationWarning, stacklevel=2)
2051 # don't use EROOT here, image already contains EPREFIX
2052 destroot = self.settings['ROOT']
2054 # The given filename argument might have a different encoding than the
2055 # the filenames contained in the contents, so use separate wrapped os
2056 # modules for each. The basename is more likely to contain non-ascii
2057 # characters than the directory path, so use os_filename_arg for all
2058 # operations involving the basename of the filename arg.
2059 os_filename_arg = _os_merge
2063 _unicode_encode(filename,
2064 encoding=_encodings['merge'], errors='strict')
2065 except UnicodeEncodeError:
2066 # The package appears to have been merged with a
2067 # different value of sys.getfilesystemencoding(),
2068 # so fall back to utf_8 if appropriate.
2070 _unicode_encode(filename,
2071 encoding=_encodings['fs'], errors='strict')
2072 except UnicodeEncodeError:
2075 os_filename_arg = portage.os
2077 destfile = normalize_path(
2078 os_filename_arg.path.join(destroot,
2079 filename.lstrip(os_filename_arg.path.sep)))
2081 pkgfiles = self.getcontents()
2082 if pkgfiles and destfile in pkgfiles:
2085 basename = os_filename_arg.path.basename(destfile)
2086 if self._contents_basenames is None:
2091 encoding=_encodings['merge'],
2093 except UnicodeEncodeError:
2094 # The package appears to have been merged with a
2095 # different value of sys.getfilesystemencoding(),
2096 # so fall back to utf_8 if appropriate.
2100 encoding=_encodings['fs'],
2102 except UnicodeEncodeError:
2107 self._contents_basenames = set(
2108 os.path.basename(x) for x in pkgfiles)
2109 if basename not in self._contents_basenames:
2110 # This is a shortcut that, in most cases, allows us to
2111 # eliminate this package as an owner without the need
2112 # to examine inode numbers of parent directories.
2115 # Use stat rather than lstat since we want to follow
2116 # any symlinks to the real parent directory.
2117 parent_path = os_filename_arg.path.dirname(destfile)
2119 parent_stat = os_filename_arg.stat(parent_path)
2120 except EnvironmentError as e:
2121 if e.errno != errno.ENOENT:
2125 if self._contents_inodes is None:
2131 encoding=_encodings['merge'],
2133 except UnicodeEncodeError:
2134 # The package appears to have been merged with a
2135 # different value of sys.getfilesystemencoding(),
2136 # so fall back to utf_8 if appropriate.
2140 encoding=_encodings['fs'],
2142 except UnicodeEncodeError:
2147 self._contents_inodes = {}
2148 parent_paths = set()
2150 p_path = os.path.dirname(x)
2151 if p_path in parent_paths:
2153 parent_paths.add(p_path)
2159 inode_key = (s.st_dev, s.st_ino)
2160 # Use lists of paths in case multiple
2161 # paths reference the same inode.
2162 p_path_list = self._contents_inodes.get(inode_key)
2163 if p_path_list is None:
2165 self._contents_inodes[inode_key] = p_path_list
2166 if p_path not in p_path_list:
2167 p_path_list.append(p_path)
2169 p_path_list = self._contents_inodes.get(
2170 (parent_stat.st_dev, parent_stat.st_ino))
2172 for p_path in p_path_list:
2173 x = os_filename_arg.path.join(p_path, basename)
2179 def _linkmap_rebuild(self, **kwargs):
2181 Rebuild the self._linkmap if it's not broken due to missing
2182 scanelf binary. Also, return early if preserve-libs is disabled
2183 and the preserve-libs registry is empty.
2185 if self._linkmap_broken or \
2186 self.vartree.dbapi._linkmap is None or \
2187 self.vartree.dbapi._plib_registry is None or \
2188 ("preserve-libs" not in self.settings.features and \
2189 not self.vartree.dbapi._plib_registry.hasEntries()):
2192 self.vartree.dbapi._linkmap.rebuild(**kwargs)
2193 except CommandNotFound as e:
2194 self._linkmap_broken = True
2195 self._display_merge(_("!!! Disabling preserve-libs " \
2196 "due to error: Command Not Found: %s\n") % (e,),
2197 level=logging.ERROR, noiselevel=-1)
2199 def _find_libs_to_preserve(self):
2201 Get set of relative paths for libraries to be preserved. The file
2202 paths are selected from self._installed_instance.getcontents().
2204 if self._linkmap_broken or \
2205 self.vartree.dbapi._linkmap is None or \
2206 self.vartree.dbapi._plib_registry is None or \
2207 self._installed_instance is None or \
2208 "preserve-libs" not in self.settings.features:
2212 linkmap = self.vartree.dbapi._linkmap
2213 installed_instance = self._installed_instance
2214 old_contents = installed_instance.getcontents()
2216 root_len = len(root) - 1
2217 lib_graph = digraph()
2220 def path_to_node(path):
2221 node = path_node_map.get(path)
2223 node = LinkageMap._LibGraphNode(path, root)
2224 alt_path_node = lib_graph.get(node)
2225 if alt_path_node is not None:
2226 node = alt_path_node
2227 node.alt_paths.add(path)
2228 path_node_map[path] = node
2232 provider_nodes = set()
2233 # Create provider nodes and add them to the graph.
2234 for f_abs in old_contents:
2238 _unicode_encode(f_abs,
2239 encoding=_encodings['merge'], errors='strict')
2240 except UnicodeEncodeError:
2241 # The package appears to have been merged with a
2242 # different value of sys.getfilesystemencoding(),
2243 # so fall back to utf_8 if appropriate.
2245 _unicode_encode(f_abs,
2246 encoding=_encodings['fs'], errors='strict')
2247 except UnicodeEncodeError:
2252 f = f_abs[root_len:]
2256 consumers = linkmap.findConsumers(f)
2261 provider_node = path_to_node(f)
2262 lib_graph.add(provider_node, None)
2263 provider_nodes.add(provider_node)
2264 consumer_map[provider_node] = consumers
2266 # Create consumer nodes and add them to the graph.
2267 # Note that consumers can also be providers.
2268 for provider_node, consumers in consumer_map.items():
2272 consumer_node = path_to_node(c)
2273 if installed_instance.isowner(c) and \
2274 consumer_node not in provider_nodes:
2275 # This is not a provider, so it will be uninstalled.
2277 lib_graph.add(provider_node, consumer_node)
2279 # Locate nodes which should be preserved. They consist of all
2280 # providers that are reachable from consumers that are not
2281 # providers themselves.
2282 preserve_nodes = set()
2283 for consumer_node in lib_graph.root_nodes():
2284 if consumer_node in provider_nodes:
2286 # Preserve all providers that are reachable from this consumer.
2287 node_stack = lib_graph.child_nodes(consumer_node)
2289 provider_node = node_stack.pop()
2290 if provider_node in preserve_nodes:
2292 preserve_nodes.add(provider_node)
2293 node_stack.extend(lib_graph.child_nodes(provider_node))
2295 preserve_paths = set()
2296 for preserve_node in preserve_nodes:
2297 # Make sure that at least one of the paths is not a symlink.
2298 # This prevents symlinks from being erroneously preserved by
2299 # themselves when the old instance installed symlinks that
2300 # the new instance does not install.
2302 for f in preserve_node.alt_paths:
2303 f_abs = os.path.join(root, f.lstrip(os.sep))
2305 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2312 preserve_paths.update(preserve_node.alt_paths)
2314 return preserve_paths
2316 def _add_preserve_libs_to_contents(self, preserve_paths):
2318 Preserve libs returned from _find_libs_to_preserve().
2321 if not preserve_paths:
2325 showMessage = self._display_merge
2328 # Copy contents entries from the old package to the new one.
2329 new_contents = self.getcontents().copy()
2330 old_contents = self._installed_instance.getcontents()
2331 for f in sorted(preserve_paths):
2332 f = _unicode_decode(f,
2333 encoding=_encodings['content'], errors='strict')
2334 f_abs = os.path.join(root, f.lstrip(os.sep))
2335 contents_entry = old_contents.get(f_abs)
2336 if contents_entry is None:
2337 # This will probably never happen, but it might if one of the
2338 # paths returned from findConsumers() refers to one of the libs
2339 # that should be preserved yet the path is not listed in the
2340 # contents. Such a path might belong to some other package, so
2341 # it shouldn't be preserved here.
2342 showMessage(_("!!! File '%s' will not be preserved "
2343 "due to missing contents entry\n") % (f_abs,),
2344 level=logging.ERROR, noiselevel=-1)
2345 preserve_paths.remove(f)
2347 new_contents[f_abs] = contents_entry
2348 obj_type = contents_entry[0]
2349 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2351 # Add parent directories to contents if necessary.
2352 parent_dir = os.path.dirname(f_abs)
2353 while len(parent_dir) > len(root):
2354 new_contents[parent_dir] = ["dir"]
2356 parent_dir = os.path.dirname(parent_dir)
2357 if prev == parent_dir:
2359 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
2360 write_contents(new_contents, root, outfile)
2362 self._clear_contents_cache()
2364 def _find_unused_preserved_libs(self):
2366 Find preserved libraries that don't have any consumers left.
2369 if self._linkmap_broken or \
2370 self.vartree.dbapi._linkmap is None or \
2371 self.vartree.dbapi._plib_registry is None or \
2372 not self.vartree.dbapi._plib_registry.hasEntries():
2375 # Since preserved libraries can be consumers of other preserved
2376 # libraries, use a graph to track consumer relationships.
2377 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2378 lib_graph = digraph()
2379 preserved_nodes = set()
2380 preserved_paths = set()
2385 def path_to_node(path):
2386 node = path_node_map.get(path)
2388 node = LinkageMap._LibGraphNode(path, root)
2389 alt_path_node = lib_graph.get(node)
2390 if alt_path_node is not None:
2391 node = alt_path_node
2392 node.alt_paths.add(path)
2393 path_node_map[path] = node
2396 linkmap = self.vartree.dbapi._linkmap
2397 for cpv, plibs in plib_dict.items():
2399 path_cpv_map[f] = cpv
2400 preserved_node = path_to_node(f)
2401 if not preserved_node.file_exists():
2403 lib_graph.add(preserved_node, None)
2404 preserved_paths.add(f)
2405 preserved_nodes.add(preserved_node)
2406 for c in self.vartree.dbapi._linkmap.findConsumers(f):
2407 consumer_node = path_to_node(c)
2408 if not consumer_node.file_exists():
2410 # Note that consumers may also be providers.
2411 lib_graph.add(preserved_node, consumer_node)
2413 # Eliminate consumers having providers with the same soname as an
2414 # installed library that is not preserved. This eliminates
2415 # libraries that are erroneously preserved due to a move from one
2416 # directory to another.
2418 for preserved_node in preserved_nodes:
2419 soname = linkmap.getSoname(preserved_node)
2420 for consumer_node in lib_graph.parent_nodes(preserved_node):
2421 if consumer_node in preserved_nodes:
2423 providers = provider_cache.get(consumer_node)
2424 if providers is None:
2425 providers = linkmap.findProviders(consumer_node)
2426 provider_cache[consumer_node] = providers
2427 providers = providers.get(soname)
2428 if providers is None:
2430 for provider in providers:
2431 if provider in preserved_paths:
2433 provider_node = path_to_node(provider)
2434 if not provider_node.file_exists():
2436 if provider_node in preserved_nodes:
2438 # An alternative provider seems to be
2439 # installed, so drop this edge.
2440 lib_graph.remove_edge(preserved_node, consumer_node)
2444 while not lib_graph.empty():
2445 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
2448 lib_graph.difference_update(root_nodes)
2450 for node in root_nodes:
2451 unlink_list.update(node.alt_paths)
2452 unlink_list = sorted(unlink_list)
2453 for obj in unlink_list:
2454 cpv = path_cpv_map.get(obj)
2456 # This means that a symlink is in the preserved libs
2457 # registry, but the actual lib it points to is not.
2458 self._display_merge(_("!!! symlink to lib is preserved, "
2459 "but not the lib itself:\n!!! '%s'\n") % (obj,),
2460 level=logging.ERROR, noiselevel=-1)
2462 removed = cpv_lib_map.get(cpv)
2465 cpv_lib_map[cpv] = removed
2470 def _remove_preserved_libs(self, cpv_lib_map):
2472 Remove files returned from _find_unused_preserved_libs().
2477 files_to_remove = set()
2478 for files in cpv_lib_map.values():
2479 files_to_remove.update(files)
2480 files_to_remove = sorted(files_to_remove)
2481 showMessage = self._display_merge
2485 for obj in files_to_remove:
2486 obj = os.path.join(root, obj.lstrip(os.sep))
2487 parent_dirs.add(os.path.dirname(obj))
2488 if os.path.islink(obj):
2494 except OSError as e:
2495 if e.errno != errno.ENOENT:
2499 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
2502 # Remove empty parent directories if possible.
2504 x = parent_dirs.pop()
2511 x = os.path.dirname(x)
2515 self.vartree.dbapi._plib_registry.pruneNonExisting()
2517 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
2521 collision_ignore = set([normalize_path(myignore) for myignore in \
2522 portage.util.shlex_split(
2523 self.settings.get("COLLISION_IGNORE", ""))])
2525 # For collisions with preserved libraries, the current package
2526 # will assume ownership and the libraries will be unregistered.
2527 if self.vartree.dbapi._plib_registry is None:
2528 # preserve-libs is entirely disabled
2533 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2536 for cpv, paths in plib_dict.items():
2537 plib_paths.update(paths)
2539 plib_cpv_map[f] = cpv
2540 plib_inodes = self._lstat_inode_map(plib_paths)
2542 plib_collisions = {}
2544 showMessage = self._display_merge
2545 scheduler = self._scheduler
2548 destroot = self.settings['ROOT']
2549 showMessage(_(" %s checking %d files for package collisions\n") % \
2550 (colorize("GOOD", "*"), len(mycontents)))
2551 for i, f in enumerate(mycontents):
2552 if i % 1000 == 0 and i != 0:
2553 showMessage(_("%d files checked ...\n") % i)
2555 if scheduler is not None and \
2556 0 == i % self._file_merge_yield_interval:
2557 scheduler.scheduleYield()
2559 dest_path = normalize_path(
2560 os.path.join(destroot, f.lstrip(os.path.sep)))
2562 dest_lstat = os.lstat(dest_path)
2563 except EnvironmentError as e:
2564 if e.errno == errno.ENOENT:
2567 elif e.errno == errno.ENOTDIR:
2569 # A non-directory is in a location where this package
2570 # expects to have a directory.
2572 parent_path = dest_path
2573 while len(parent_path) > len(destroot):
2574 parent_path = os.path.dirname(parent_path)
2576 dest_lstat = os.lstat(parent_path)
2578 except EnvironmentError as e:
2579 if e.errno != errno.ENOTDIR:
2583 raise AssertionError(
2584 "unable to find non-directory " + \
2585 "parent for '%s'" % dest_path)
2586 dest_path = parent_path
2587 f = os.path.sep + dest_path[len(destroot):]
2595 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
2598 cpv = plib_cpv_map[path]
2599 paths = plib_collisions.get(cpv)
2602 plib_collisions[cpv] = paths
2604 # The current package will assume ownership and the
2605 # libraries will be unregistered, so exclude this
2606 # path from the normal collisions.
2610 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
2611 for ver in mypkglist:
2615 if not isowned and self.isprotected(full_path):
2619 if collision_ignore:
2620 if f in collision_ignore:
2623 for myignore in collision_ignore:
2624 if f.startswith(myignore + os.path.sep):
2628 collisions.append(f)
2629 return collisions, plib_collisions
2631 def _lstat_inode_map(self, path_iter):
2633 Use lstat to create a map of the form:
2634 {(st_dev, st_ino) : set([path1, path2, ...])}
2635 Multiple paths may reference the same inode due to hardlinks.
2636 All lstat() calls are relative to self.myroot.
2644 path = os.path.join(root, f.lstrip(os.sep))
2647 except OSError as e:
2648 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2652 key = (st.st_dev, st.st_ino)
2653 paths = inode_map.get(key)
2656 inode_map[key] = paths
2660 def _security_check(self, installed_instances):
2661 if not installed_instances:
2666 showMessage = self._display_merge
2667 scheduler = self._scheduler
2670 for dblnk in installed_instances:
2671 file_paths.update(dblnk.getcontents())
2674 for i, path in enumerate(file_paths):
2676 if scheduler is not None and \
2677 0 == i % self._file_merge_yield_interval:
2678 scheduler.scheduleYield()
2682 _unicode_encode(path,
2683 encoding=_encodings['merge'], errors='strict')
2684 except UnicodeEncodeError:
2685 # The package appears to have been merged with a
2686 # different value of sys.getfilesystemencoding(),
2687 # so fall back to utf_8 if appropriate.
2689 _unicode_encode(path,
2690 encoding=_encodings['fs'], errors='strict')
2691 except UnicodeEncodeError:
2698 except OSError as e:
2699 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2703 if not stat.S_ISREG(s.st_mode):
2705 path = os.path.realpath(path)
2706 if path in real_paths:
2708 real_paths.add(path)
2709 if s.st_nlink > 1 and \
2710 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
2711 k = (s.st_dev, s.st_ino)
2712 inode_map.setdefault(k, []).append((path, s))
2713 suspicious_hardlinks = []
2714 for path_list in inode_map.values():
2715 path, s = path_list[0]
2716 if len(path_list) == s.st_nlink:
2717 # All hardlinks seem to be owned by this package.
2719 suspicious_hardlinks.append(path_list)
2720 if not suspicious_hardlinks:
2724 msg.append(_("suid/sgid file(s) "
2725 "with suspicious hardlink(s):"))
2727 for path_list in suspicious_hardlinks:
2728 for path, s in path_list:
2729 msg.append("\t%s" % path)
2731 msg.append(_("See the Gentoo Security Handbook "
2732 "guide for advice on how to proceed."))
2734 self._eerror("preinst", msg)
2738 def _eqawarn(self, phase, lines):
2739 from portage.elog.messages import eqawarn as _eqawarn
2740 if self._scheduler is None:
2742 _eqawarn(l, phase=phase, key=self.settings.mycpv)
2744 self._scheduler.dblinkElog(self,
2745 phase, _eqawarn, lines)
2747 def _eerror(self, phase, lines):
2748 from portage.elog.messages import eerror as _eerror
2749 if self._scheduler is None:
2751 _eerror(l, phase=phase, key=self.settings.mycpv)
2753 self._scheduler.dblinkElog(self,
2754 phase, _eerror, lines)
2756 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
2757 mydbapi=None, prev_mtimes=None):
2760 This function does the following:
2762 calls self._preserve_libs if FEATURES=preserve-libs
2763 calls self._collision_protect if FEATURES=collision-protect
2764 calls doebuild(mydo=pkg_preinst)
2765 Merges the package to the livefs
2766 unmerges old version (if required)
2767 calls doebuild(mydo=pkg_postinst)
2771 @param srcroot: Typically this is ${D}
2772 @type srcroot: String (Path)
2773 @param destroot: ignored, self.settings['ROOT'] is used instead
2774 @type destroot: String (Path)
2775 @param inforoot: root of the vardb entry ?
2776 @type inforoot: String (Path)
2777 @param myebuild: path to the ebuild that we are processing
2778 @type myebuild: String (Path)
2779 @param mydbapi: dbapi which is handed to doebuild.
2780 @type mydbapi: portdbapi instance
2781 @param prev_mtimes: { Filename:mtime } mapping for env_update
2782 @type prev_mtimes: Dictionary
2788 secondhand is a list of symlinks that have been skipped due to their target
2789 not existing; we will merge these symlinks at a later time.
2794 srcroot = _unicode_decode(srcroot,
2795 encoding=_encodings['content'], errors='strict')
2796 destroot = self.settings['ROOT']
2797 inforoot = _unicode_decode(inforoot,
2798 encoding=_encodings['content'], errors='strict')
2799 myebuild = _unicode_decode(myebuild,
2800 encoding=_encodings['content'], errors='strict')
2802 showMessage = self._display_merge
2803 scheduler = self._scheduler
2805 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2807 if not os.path.isdir(srcroot):
2808 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
2809 level=logging.ERROR, noiselevel=-1)
2813 for var_name in ('CHOST', 'SLOT'):
2814 if var_name == 'CHOST' and self.cat == 'virtual':
2816 os.unlink(os.path.join(inforoot, var_name))
2822 val = codecs.open(_unicode_encode(
2823 os.path.join(inforoot, var_name),
2824 encoding=_encodings['fs'], errors='strict'),
2825 mode='r', encoding=_encodings['repo.content'],
2826 errors='replace').readline().strip()
2827 except EnvironmentError as e:
2828 if e.errno != errno.ENOENT:
2833 if var_name == 'SLOT':
2836 if not slot.strip():
2837 slot = self.settings.get(var_name, '')
2838 if not slot.strip():
2839 showMessage(_("!!! SLOT is undefined\n"),
2840 level=logging.ERROR, noiselevel=-1)
2842 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
2844 if val != self.settings.get(var_name, ''):
2845 self._eqawarn('preinst',
2846 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
2847 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
2850 self._eerror("preinst", lines)
2852 if not os.path.exists(self.dbcatdir):
2853 ensure_dirs(self.dbcatdir)
2856 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2857 otherversions.append(v.split("/")[1])
2859 cp = self.mysplit[0]
2860 slot_atom = "%s:%s" % (cp, slot)
2862 # filter any old-style virtual matches
2863 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
2864 if cpv_getkey(cpv) == cp]
2866 if self.mycpv not in slot_matches and \
2867 self.vartree.dbapi.cpv_exists(self.mycpv):
2868 # handle multislot or unapplied slotmove
2869 slot_matches.append(self.mycpv)
2872 from portage import config
2873 for cur_cpv in slot_matches:
2874 # Clone the config in case one of these has to be unmerged since
2875 # we need it to have private ${T} etc... for things like elog.
2876 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2877 settings=config(clone=self.settings),
2878 vartree=self.vartree, treetype="vartree",
2879 scheduler=self._scheduler))
2881 retval = self._security_check(others_in_slot)
2885 self.settings["REPLACING_VERSIONS"] = " ".join(
2886 [portage.versions.cpv_getversion(other.mycpv) for other in others_in_slot] )
2887 self.settings.backup_changes("REPLACING_VERSIONS")
2890 # Used by self.isprotected().
2893 for dblnk in others_in_slot:
2894 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2895 if cur_counter > max_counter:
2896 max_counter = cur_counter
2898 self._installed_instance = max_dblnk
2900 # We check for unicode encoding issues after src_install. However,
2901 # the check must be repeated here for binary packages (it's
2902 # inexpensive since we call os.walk() here anyway).
2907 unicode_error = False
2911 paths_with_newlines = []
2912 srcroot_len = len(srcroot)
2915 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2917 parent = _unicode_decode(parent,
2918 encoding=_encodings['merge'], errors='strict')
2919 except UnicodeDecodeError:
2920 new_parent = _unicode_decode(parent,
2921 encoding=_encodings['merge'], errors='replace')
2922 new_parent = _unicode_encode(new_parent,
2923 encoding=_encodings['merge'], errors='backslashreplace')
2924 new_parent = _unicode_decode(new_parent,
2925 encoding=_encodings['merge'], errors='replace')
2926 os.rename(parent, new_parent)
2927 unicode_error = True
2928 unicode_errors.append(new_parent[srcroot_len:])
2933 fname = _unicode_decode(fname,
2934 encoding=_encodings['merge'], errors='strict')
2935 except UnicodeDecodeError:
2936 fpath = portage._os.path.join(
2937 parent.encode(_encodings['merge']), fname)
2938 new_fname = _unicode_decode(fname,
2939 encoding=_encodings['merge'], errors='replace')
2940 new_fname = _unicode_encode(new_fname,
2941 encoding=_encodings['merge'], errors='backslashreplace')
2942 new_fname = _unicode_decode(new_fname,
2943 encoding=_encodings['merge'], errors='replace')
2944 new_fpath = os.path.join(parent, new_fname)
2945 os.rename(fpath, new_fpath)
2946 unicode_error = True
2947 unicode_errors.append(new_fpath[srcroot_len:])
2951 fpath = os.path.join(parent, fname)
2953 relative_path = fpath[srcroot_len:]
2955 if "\n" in relative_path:
2956 paths_with_newlines.append(relative_path)
2958 file_mode = os.lstat(fpath).st_mode
2959 if stat.S_ISREG(file_mode):
2960 myfilelist.append(relative_path)
2961 elif stat.S_ISLNK(file_mode):
2962 # Note: os.walk puts symlinks to directories in the "dirs"
2963 # list and it does not traverse them since that could lead
2964 # to an infinite recursion loop.
2965 mylinklist.append(relative_path)
2970 if not unicode_error:
2974 eerror(portage._merge_unicode_error(unicode_errors))
2976 if paths_with_newlines:
2978 msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
2980 paths_with_newlines.sort()
2981 for f in paths_with_newlines:
2982 msg.append("\t/%s" % (f.replace("\n", "\\n")))
2984 msg.append(_("package %s NOT merged") % self.mycpv)
2989 # If there are no files to merge, and an installed package in the same
2990 # slot has files, it probably means that something went wrong.
2991 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2992 not myfilelist and not mylinklist and others_in_slot:
2993 installed_files = None
2994 for other_dblink in others_in_slot:
2995 installed_files = other_dblink.getcontents()
2996 if not installed_files:
2998 from textwrap import wrap
3002 "new_cpv":self.mycpv,
3003 "old_cpv":other_dblink.mycpv
3005 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3006 "any files, but the currently installed '%(old_cpv)s'"
3007 " package has the following files: ") % d, wrap_width))
3009 msg.extend(sorted(installed_files))
3011 msg.append(_("package %s NOT merged") % self.mycpv)
3014 _("Manually run `emerge --unmerge =%s` if you "
3015 "really want to remove the above files. Set "
3016 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3017 "/etc/make.conf if you do not want to "
3018 "abort in cases like this.") % other_dblink.mycpv,
3024 # check for package collisions
3026 if self._blockers is not None:
3027 # This is only supposed to be called when
3028 # the vdb is locked, like it is here.
3029 blockers = self._blockers()
3030 if blockers is None:
3032 collisions, plib_collisions = \
3033 self._collision_protect(srcroot, destroot,
3034 others_in_slot + blockers, myfilelist + mylinklist)
3036 # Make sure the ebuild environment is initialized and that ${T}/elog
3037 # exists for logging of collision-protect eerror messages.
3038 if myebuild is None:
3039 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3040 doebuild_environment(myebuild, "preinst",
3041 settings=self.settings, db=mydbapi)
3042 prepare_build_dirs(settings=self.settings, cleanup=cleanup)
3045 collision_protect = "collision-protect" in self.settings.features
3046 protect_owned = "protect-owned" in self.settings.features
3047 msg = _("This package will overwrite one or more files that"
3048 " may belong to other packages (see list below).")
3049 if not (collision_protect or protect_owned):
3050 msg += _(" Add either \"collision-protect\" or"
3051 " \"protect-owned\" to FEATURES in"
3052 " make.conf if you would like the merge to abort"
3053 " in cases like this. See the make.conf man page for"
3054 " more information about these features.")
3055 if self.settings.get("PORTAGE_QUIET") != "1":
3056 msg += _(" You can use a command such as"
3057 " `portageq owners / <filename>` to identify the"
3058 " installed package that owns a file. If portageq"
3059 " reports that only one package owns a file then do NOT"
3060 " file a bug report. A bug report is only useful if it"
3061 " identifies at least two or more packages that are known"
3062 " to install the same file(s)."
3063 " If a collision occurs and you"
3064 " can not explain where the file came from then you"
3065 " should simply ignore the collision since there is not"
3066 " enough information to determine if a real problem"
3067 " exists. Please do NOT file a bug report at"
3068 " http://bugs.gentoo.org unless you report exactly which"
3069 " two packages install the same file(s). Once again,"
3070 " please do NOT file a bug report unless you have"
3071 " completely understood the above message.")
3073 self.settings["EBUILD_PHASE"] = "preinst"
3074 from textwrap import wrap
3076 if collision_protect:
3078 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3080 msg.append(_("Detected file collision(s):"))
3083 for f in collisions:
3084 msg.append("\t%s" % \
3085 os.path.join(destroot, f.lstrip(os.path.sep)))
3090 if collision_protect or protect_owned:
3093 msg.append(_("Searching all installed"
3094 " packages for file collisions..."))
3096 msg.append(_("Press Ctrl-C to Stop"))
3100 if len(collisions) > 20:
3101 # get_owners is slow for large numbers of files, so
3102 # don't look them all up.
3103 collisions = collisions[:20]
3104 owners = self.vartree.dbapi._owners.get_owners(collisions,
3105 scheduler=self._scheduler)
3106 self.vartree.dbapi.flush_cache()
3108 for pkg, owned_files in owners.items():
3111 msg.append("%s" % cpv)
3112 for f in sorted(owned_files):
3113 msg.append("\t%s" % os.path.join(destroot,
3114 f.lstrip(os.path.sep)))
3119 eerror([_("None of the installed"
3120 " packages claim the file(s)."), ""])
3122 # The explanation about the collision and how to solve
3123 # it may not be visible via a scrollback buffer, especially
3124 # if the number of file collisions is large. Therefore,
3125 # show a summary at the end.
3126 if collision_protect:
3127 msg = _("Package '%s' NOT merged due to file collisions.") % \
3129 elif protect_owned and owners:
3130 msg = _("Package '%s' NOT merged due to file collisions.") % \
3133 msg = _("Package '%s' merged despite file collisions.") % \
3135 msg += _(" If necessary, refer to your elog "
3136 "messages for the whole content of the above message.")
3137 eerror(wrap(msg, 70))
3139 if collision_protect or (protect_owned and owners):
3142 # The merge process may move files out of the image directory,
3143 # which causes invalidation of the .installed flag.
3145 os.unlink(os.path.join(
3146 os.path.dirname(normalize_path(srcroot)), ".installed"))
3147 except OSError as e:
3148 if e.errno != errno.ENOENT:
3152 self.dbdir = self.dbtmpdir
3154 ensure_dirs(self.dbtmpdir)
3156 # run preinst script
3157 if scheduler is None:
3158 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % {"cpv":self.mycpv, "destroot":destroot})
3159 a = _spawn_phase("preinst", self.settings)
3161 a = scheduler.dblinkEbuildPhase(
3162 self, mydbapi, myebuild, "preinst")
3164 # XXX: Decide how to handle failures here.
3166 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3167 level=logging.ERROR, noiselevel=-1)
3170 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3171 for x in os.listdir(inforoot):
3172 self.copyfile(inforoot+"/"+x)
3174 # write local package counter for recording
3175 counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
3176 codecs.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3177 encoding=_encodings['fs'], errors='strict'),
3178 'w', encoding=_encodings['repo.content'], errors='backslashreplace'
3179 ).write(str(counter))
3181 # open CONTENTS file (possibly overwriting old one) for recording
3182 outfile = codecs.open(_unicode_encode(
3183 os.path.join(self.dbtmpdir, 'CONTENTS'),
3184 encoding=_encodings['fs'], errors='strict'),
3185 mode='w', encoding=_encodings['repo.content'],
3186 errors='backslashreplace')
3188 self.updateprotect()
3190 #if we have a file containing previously-merged config file md5sums, grab it.
3191 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
3192 cfgfiledict = grabdict(conf_mem_file)
3193 cfgfiledict_orig = cfgfiledict.copy()
3194 if "NOCONFMEM" in self.settings:
3195 cfgfiledict["IGNORE"]=1
3197 cfgfiledict["IGNORE"]=0
3199 # Always behave like --noconfmem is enabled for downgrades
3200 # so that people who don't know about this option are less
3201 # likely to get confused when doing upgrade/downgrade cycles.
3202 pv_split = catpkgsplit(self.mycpv)[1:]
3203 for other in others_in_slot:
3204 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3205 cfgfiledict["IGNORE"] = 1
3208 # Don't bump mtimes on merge since some application require
3209 # preservation of timestamps. This means that the unmerge phase must
3210 # check to see if file belongs to an installed instance in the same
3214 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3215 prevmask = os.umask(0)
3218 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3219 # "second hand" of symlinks to merge later
3220 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
3223 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3224 # broken symlinks. We'll merge them too.
3226 while len(secondhand) and len(secondhand)!=lastlen:
3227 # clear the thirdhand. Anything from our second hand that
3228 # couldn't get merged will be added to thirdhand.
3231 if self.mergeme(srcroot, destroot, outfile, thirdhand,
3232 secondhand, cfgfiledict, mymtime):
3236 lastlen = len(secondhand)
3238 # our thirdhand now becomes our secondhand. It's ok to throw
3239 # away secondhand since thirdhand contains all the stuff that
3240 # couldn't be merged.
3241 secondhand = thirdhand
3244 # force merge of remaining symlinks (broken or circular; oh well)
3245 if self.mergeme(srcroot, destroot, outfile, None,
3246 secondhand, cfgfiledict, mymtime):
3248 self._md5_merge_map.clear()
3253 #if we opened it, close it
3257 # write out our collection of md5sums
3258 cfgfiledict.pop("IGNORE", None)
3259 if cfgfiledict != cfgfiledict_orig:
3260 ensure_dirs(os.path.dirname(conf_mem_file),
3261 gid=portage_gid, mode=0o2750, mask=0o2)
3262 writedict(cfgfiledict, conf_mem_file)
3264 # These caches are populated during collision-protect and the data
3265 # they contain is now invalid. It's very important to invalidate
3266 # the contents_inodes cache so that FEATURES=unmerge-orphans
3267 # doesn't unmerge anything that belongs to this package that has
3269 for dblnk in others_in_slot:
3270 dblnk._clear_contents_cache()
3271 self._clear_contents_cache()
3273 linkmap = self.vartree.dbapi._linkmap
3275 # preserve-libs is entirely disabled
3276 preserve_paths = None
3278 self._linkmap_rebuild(include_file=os.path.join(inforoot,
3279 linkmap._needed_aux_key))
3281 # Preserve old libs if they are still in use
3282 preserve_paths = self._find_libs_to_preserve()
3284 self._add_preserve_libs_to_contents(preserve_paths)
3286 # If portage is reinstalling itself, remove the old
3287 # version now since we want to use the temporary
3288 # PORTAGE_BIN_PATH that will be removed when we return.
3289 reinstall_self = False
3290 if self.myroot == "/" and \
3291 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3292 reinstall_self = True
3294 if scheduler is None:
3295 def emerge_log(msg):
3298 emerge_log = scheduler.dblinkEmergeLog
3300 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
3303 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3305 others_in_slot.append(self) # self has just been merged
3306 for dblnk in list(others_in_slot):
3309 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3311 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3312 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
3313 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3314 dblnk._linkmap_broken = self._linkmap_broken
3315 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
3316 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
3317 unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
3318 others_in_slot=others_in_slot)
3319 dblnk.settings.pop("REPLACED_BY_VERSION", None)
3321 if unmerge_rval == os.EX_OK:
3322 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
3324 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
3326 # TODO: Check status and abort if necessary.
3328 showMessage(_(">>> Original instance of package unmerged safely.\n"))
3330 if len(others_in_slot) > 1:
3331 showMessage(colorize("WARN", _("WARNING:"))
3332 + _(" AUTOCLEAN is disabled. This can cause serious"
3333 " problems due to overlapping packages.\n"),
3334 level=logging.WARN, noiselevel=-1)
3336 # We hold both directory locks.
3337 self.dbdir = self.dbpkgdir
3339 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3341 # keep track of the libs we preserved
3342 if self.vartree.dbapi._plib_registry is not None and \
3344 self.vartree.dbapi._plib_registry.register(self.mycpv,
3345 slot, counter, sorted(preserve_paths))
3347 # Check for file collisions with blocking packages
3348 # and remove any colliding files from their CONTENTS
3349 # since they now belong to this package.
3350 self._clear_contents_cache()
3351 contents = self.getcontents()
3352 destroot_len = len(destroot) - 1
3353 for blocker in blockers:
3354 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3355 relative_paths=False)
3357 # Unregister any preserved libs that this package has overwritten
3358 # and update the contents of the packages that owned them.
3359 plib_registry = self.vartree.dbapi._plib_registry
3360 if plib_registry is None:
3361 # preserve-libs is entirely disabled
3364 plib_dict = plib_registry.getPreservedLibs()
3365 for cpv, paths in plib_collisions.items():
3366 if cpv not in plib_dict:
3368 if cpv == self.mycpv:
3371 slot, counter = self.vartree.dbapi.aux_get(
3372 cpv, ["SLOT", "COUNTER"])
3375 remaining = [f for f in plib_dict[cpv] if f not in paths]
3376 plib_registry.register(cpv, slot, counter, remaining)
3377 self.vartree.dbapi.removeFromContents(cpv, paths)
3379 self.vartree.dbapi._add(self)
3380 contents = self.getcontents()
3383 self.settings["PORTAGE_UPDATE_ENV"] = \
3384 os.path.join(self.dbpkgdir, "environment.bz2")
3385 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
3387 if scheduler is None:
3388 a = _spawn_phase("postinst", self.settings)
3390 showMessage(_(">>> %s merged.\n") % self.mycpv)
3392 a = scheduler.dblinkEbuildPhase(
3393 self, mydbapi, myebuild, "postinst")
3395 self.settings.pop("PORTAGE_UPDATE_ENV", None)
3398 # It's stupid to bail out here, so keep going regardless of
3399 # phase return code.
3400 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
3401 level=logging.ERROR, noiselevel=-1)
3404 for v in otherversions:
3405 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
3408 #update environment settings, library paths. DO NOT change symlinks.
3409 env_update(makelinks=(not downgrade),
3410 target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
3411 contents=contents, env=self.settings.environ(),
3412 writemsg_level=self._display_merge)
3414 # For gcc upgrades, preserved libs have to be removed after the
3415 # the library path has been updated.
3416 self._linkmap_rebuild()
3417 cpv_lib_map = self._find_unused_preserved_libs()
3419 self._remove_preserved_libs(cpv_lib_map)
3420 for cpv, removed in cpv_lib_map.items():
3421 if not self.vartree.dbapi.cpv_exists(cpv):
3423 self.vartree.dbapi.removeFromContents(cpv, removed)
3427 def _new_backup_path(self, p):
3429 The works for any type path, such as a regular file, symlink,
3430 or directory. The parent directory is assumed to exist.
3431 The returned filename is of the form p + '.backup.' + x, where
3432 x guarantees that the returned path does not exist yet.
3439 backup_p = p + '.backup.' + str(x).rjust(4, '0')
3447 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
3450 This function handles actual merging of the package contents to the livefs.
3451 It also handles config protection.
3453 @param srcroot: Where are we copying files from (usually ${D})
3454 @type srcroot: String (Path)
3455 @param destroot: Typically ${ROOT}
3456 @type destroot: String (Path)
3457 @param outfile: File to log operations to
3458 @type outfile: File Object
3459 @param secondhand: A set of items to merge in pass two (usually
3460 or symlinks that point to non-existing files that may get merged later)
3461 @type secondhand: List
3462 @param stufftomerge: Either a diretory to merge, or a list of items.
3463 @type stufftomerge: String or List
3464 @param cfgfiledict: { File:mtime } mapping for config_protected files
3465 @type cfgfiledict: Dictionary
3466 @param thismtime: The current time (typically long(time.time())
3467 @type thismtime: Long
3468 @rtype: None or Boolean
3475 showMessage = self._display_merge
3476 writemsg = self._display_merge
3477 scheduler = self._scheduler
3482 srcroot = normalize_path(srcroot).rstrip(sep) + sep
3483 destroot = normalize_path(destroot).rstrip(sep) + sep
3484 calc_prelink = "prelink-checksums" in self.settings.features
3486 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3487 if isinstance(stufftomerge, basestring):
3488 #A directory is specified. Figure out protection paths, listdir() it and process it.
3489 mergelist = os.listdir(join(srcroot, stufftomerge))
3490 offset = stufftomerge
3492 mergelist = stufftomerge
3495 for i, x in enumerate(mergelist):
3497 if scheduler is not None and \
3498 0 == i % self._file_merge_yield_interval:
3499 scheduler.scheduleYield()
3501 mysrc = join(srcroot, offset, x)
3502 mydest = join(destroot, offset, x)
3503 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3504 myrealdest = join(sep, offset, x)
3505 # stat file once, test using S_* macros many times (faster that way)
3506 mystat = os.lstat(mysrc)
3507 mymode = mystat[stat.ST_MODE]
3508 # handy variables; mydest is the target object on the live filesystems;
3509 # mysrc is the source object in the temporary install dir
3511 mydstat = os.lstat(mydest)
3512 mydmode = mydstat.st_mode
3513 except OSError as e:
3514 if e.errno != errno.ENOENT:
3517 #dest file doesn't exist
3521 if stat.S_ISLNK(mymode):
3522 # we are merging a symbolic link
3523 myabsto = abssymlink(mysrc)
3524 if myabsto.startswith(srcroot):
3525 myabsto = myabsto[len(srcroot):]
3526 myabsto = myabsto.lstrip(sep)
3527 myto = os.readlink(mysrc)
3528 if self.settings and self.settings["D"]:
3529 if myto.startswith(self.settings["D"]):
3530 myto = myto[len(self.settings["D"]):]
3531 # myrealto contains the path of the real file to which this symlink points.
3532 # we can simply test for existence of this file to see if the target has been merged yet
3533 myrealto = normalize_path(os.path.join(destroot, myabsto))
3536 if not stat.S_ISLNK(mydmode):
3537 if stat.S_ISDIR(mydmode):
3538 # directory in the way: we can't merge a symlink over a directory
3539 # we won't merge this, continue with next file...
3542 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
3543 # Kill file blocking installation of symlink to dir #71787
3545 elif self.isprotected(mydest):
3546 # Use md5 of the target in ${D} if it exists...
3548 newmd5 = perform_md5(join(srcroot, myabsto))
3549 except FileNotFound:
3550 # Maybe the target is merged already.
3552 newmd5 = perform_md5(myrealto)
3553 except FileNotFound:
3555 mydest = new_protect_filename(mydest, newmd5=newmd5)
3557 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3558 if (secondhand != None) and (not os.path.exists(myrealto)):
3559 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3560 # the target is a broken symlink. We will add this file to our "second hand" and merge
3562 secondhand.append(mysrc[len(srcroot):])
3564 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3565 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
3566 sstat=mystat, mysettings=self.settings,
3567 encoding=_encodings['merge'])
3569 showMessage(">>> %s -> %s\n" % (mydest, myto))
3570 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
3572 showMessage(_("!!! Failed to move file.\n"),
3573 level=logging.ERROR, noiselevel=-1)
3574 showMessage("!!! %s -> %s\n" % (mydest, myto),
3575 level=logging.ERROR, noiselevel=-1)
3577 elif stat.S_ISDIR(mymode):
3578 # we are merging a directory
3580 # destination exists
3583 # Save then clear flags on dest.
3584 dflags = mydstat.st_flags
3586 bsd_chflags.lchflags(mydest, 0)
3588 if not os.access(mydest, os.W_OK):
3589 pkgstuff = pkgsplit(self.pkg)
3590 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
3591 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
3592 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
3593 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
3594 writemsg(_("!!! And finish by running this: env-update\n\n"))
3597 if stat.S_ISDIR(mydmode) or \
3598 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
3599 # a symlink to an existing directory will work for us; keep it:
3600 showMessage("--- %s/\n" % mydest)
3602 bsd_chflags.lchflags(mydest, dflags)
3604 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3605 backup_dest = self._new_backup_path(mydest)
3608 msg.append(_("Installation of a directory is blocked by a file:"))
3609 msg.append(" '%s'" % mydest)
3610 msg.append(_("This file will be renamed to a different name:"))
3611 msg.append(" '%s'" % backup_dest)
3613 self._eerror("preinst", msg)
3614 if movefile(mydest, backup_dest,
3615 mysettings=self.settings,
3616 encoding=_encodings['merge']) is None:
3618 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
3619 level=logging.ERROR, noiselevel=-1)
3620 #now create our directory
3622 if self.settings.selinux_enabled():
3623 _selinux_merge.mkdir(mydest, mysrc)
3626 except OSError as e:
3627 # Error handling should be equivalent to
3628 # portage.util.ensure_dirs() for cases
3630 if e.errno in (errno.EEXIST,):
3632 elif os.path.isdir(mydest):
3639 bsd_chflags.lchflags(mydest, dflags)
3640 os.chmod(mydest, mystat[0])
3641 os.chown(mydest, mystat[4], mystat[5])
3642 showMessage(">>> %s/\n" % mydest)
3645 #destination doesn't exist
3646 if self.settings.selinux_enabled():
3647 _selinux_merge.mkdir(mydest, mysrc)
3650 except OSError as e:
3651 # Error handling should be equivalent to
3652 # portage.util.ensure_dirs() for cases
3654 if e.errno in (errno.EEXIST,):
3656 elif os.path.isdir(mydest):
3661 os.chmod(mydest, mystat[0])
3662 os.chown(mydest, mystat[4], mystat[5])
3663 showMessage(">>> %s/\n" % mydest)
3664 outfile.write("dir "+myrealdest+"\n")
3665 # recurse and merge this directory
3666 if self.mergeme(srcroot, destroot, outfile, secondhand,
3667 join(offset, x), cfgfiledict, thismtime):
3669 elif stat.S_ISREG(mymode):
3670 # we are merging a regular file
3671 mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
3672 # calculate config file protection stuff
3673 mydestdir = os.path.dirname(mydest)
3677 protected = self.isprotected(mydest)
3679 # destination file exists
3681 if stat.S_ISDIR(mydmode):
3682 # install of destination is blocked by an existing directory with the same name
3683 newdest = self._new_backup_path(mydest)
3686 msg.append(_("Installation of a regular file is blocked by a directory:"))
3687 msg.append(" '%s'" % mydest)
3688 msg.append(_("This file will be merged with a different name:"))
3689 msg.append(" '%s'" % newdest)
3691 self._eerror("preinst", msg)
3694 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
3695 # install of destination is blocked by an existing regular file,
3696 # or by a symlink to an existing regular file;
3697 # now, config file management may come into play.
3698 # we only need to tweak mydest if cfg file management is in play.
3700 # we have a protection path; enable config file management.
3702 destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
3703 if mymd5 == destmd5:
3704 #file already in place; simply update mtimes of destination
3707 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
3708 """ An identical update has previously been
3709 merged. Skip it unless the user has chosen
3711 moveme = cfgfiledict["IGNORE"]
3712 cfgprot = cfgfiledict["IGNORE"]
3715 mymtime = mystat[stat.ST_MTIME]
3720 # Merging a new file, so update confmem.
3721 cfgfiledict[myrealdest] = [mymd5]
3722 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
3723 """A previously remembered update has been
3724 accepted, so it is removed from confmem."""
3725 del cfgfiledict[myrealdest]
3728 mydest = new_protect_filename(mydest, newmd5=mymd5)
3730 # whether config protection or not, we merge the new file the
3731 # same way. Unless moveme=0 (blocking directory)
3733 # Do not hardlink files unless they are in the same
3734 # directory, since otherwise tar may not be able to
3735 # extract a tarball of the resulting hardlinks due to
3736 # 'Invalid cross-device link' errors (depends on layout of
3737 # mount points). Also, don't hardlink zero-byte files since
3738 # it doesn't save any space, and don't hardlink
3739 # CONFIG_PROTECTed files since config files shouldn't be
3740 # hardlinked to eachother (for example, shadow installs
3741 # several identical config files inside /etc/pam.d/).
3742 parent_dir = os.path.dirname(myrealdest)
3743 hardlink_key = (parent_dir, mymd5, mystat.st_size,
3744 mystat.st_mode, mystat.st_uid, mystat.st_gid)
3746 hardlink_candidates = None
3747 if not protected and mystat.st_size != 0:
3748 hardlink_candidates = self._md5_merge_map.get(hardlink_key)
3749 if hardlink_candidates is None:
3750 hardlink_candidates = []
3751 self._md5_merge_map[hardlink_key] = hardlink_candidates
3753 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
3754 sstat=mystat, mysettings=self.settings,
3755 hardlink_candidates=hardlink_candidates,
3756 encoding=_encodings['merge'])
3759 if hardlink_candidates is not None:
3760 hardlink_candidates.append(mydest)
3764 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
3765 showMessage("%s %s\n" % (zing,mydest))
3767 # we are merging a fifo or device node
3770 # destination doesn't exist
3771 if movefile(mysrc, mydest, newmtime=thismtime,
3772 sstat=mystat, mysettings=self.settings,
3773 encoding=_encodings['merge']) is not None:
3777 if stat.S_ISFIFO(mymode):
3778 outfile.write("fif %s\n" % myrealdest)
3780 outfile.write("dev %s\n" % myrealdest)
3781 showMessage(zing + " " + mydest + "\n")
3783 def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
3784 mydbapi=None, prev_mtimes=None):
3786 If portage is reinstalling itself, create temporary
3787 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
3788 to avoid relying on the new versions which may be
3789 incompatible. Register an atexit hook to clean up the
3790 temporary directories. Pre-load elog modules here since
3791 we won't be able to later if they get unmerged (happens
3792 when namespace changes).
3794 @param myroot: ignored, self._eroot is used instead
3797 if self.vartree.dbapi._categories is not None:
3798 self.vartree.dbapi._categories = None
3799 if self.myroot == "/" and \
3800 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]) and \
3801 (not self.vartree.dbapi.cpv_exists(self.mycpv) or \
3802 '9999' in self.mycpv or \
3803 'git' in self.settings.get('INHERITED', '').split()):
3804 # Load lazily referenced portage submodules into memory,
3805 # so imports won't fail during portage upgrade/downgrade.
3806 portage.proxy.lazyimport._preload_portage_submodules()
3807 settings = self.settings
3808 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
3809 from tempfile import mkdtemp
3811 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
3812 # /tmp, it can't be mounted with the "noexec" option.
3813 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
3814 settings["PORTAGE_TMPDIR"])
3815 from portage.process import atexit_register
3816 atexit_register(shutil.rmtree, base_path_tmp)
3818 for subdir in "bin", "pym":
3819 var_name = "PORTAGE_%s_PATH" % subdir.upper()
3820 var_orig = settings[var_name]
3821 var_new = os.path.join(base_path_tmp, subdir)
3822 settings[var_name] = var_new
3823 settings.backup_changes(var_name)
3824 shutil.copytree(var_orig, var_new, symlinks=True)
3825 os.chmod(var_new, dir_perms)
3826 os.chmod(base_path_tmp, dir_perms)
3827 # This serves so pre-load the modules.
3828 elog_process(self.mycpv, self.settings)
3830 return self._merge(mergeroot, inforoot,
3831 myebuild=myebuild, cleanup=cleanup,
3832 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3834 def _merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
3835 mydbapi=None, prev_mtimes=None):
3837 @param myroot: ignored, self._eroot is used instead
3842 self.vartree.dbapi._bump_mtime(self.mycpv)
3844 plib_registry = self.vartree.dbapi._plib_registry
3845 if plib_registry is None:
3846 # preserve-libs is entirely disabled
3849 plib_registry.load()
3850 plib_registry.pruneNonExisting()
3852 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
3853 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3855 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
3856 # fail-clean is enabled, and the success/die hooks have
3857 # already been called by _emerge.EbuildPhase (via
3858 # self._scheduler.dblinkEbuildPhase) prior to cleaning.
3859 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
3861 if retval == os.EX_OK:
3862 phase = 'success_hooks'
3866 if self._scheduler is None:
3867 ebuild_phase = MiscFunctionsProcess(
3870 scheduler=PollScheduler().sched_iface,
3871 settings=self.settings)
3872 ebuild_phase.start()
3875 self._scheduler.dblinkEbuildPhase(
3876 self, mydbapi, myebuild, phase)
3878 elog_process(self.mycpv, self.settings)
3880 if 'noclean' not in self.settings.features and \
3881 (retval == os.EX_OK or \
3882 'fail-clean' in self.settings.features):
3883 if myebuild is None:
3884 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3886 doebuild_environment(myebuild, "clean",
3887 settings=self.settings, db=mydbapi)
3888 if self._scheduler is None:
3889 _spawn_phase("clean", self.settings)
3891 self._scheduler.dblinkEbuildPhase(
3892 self, mydbapi, myebuild, "clean")
3895 self.settings.pop('REPLACING_VERSIONS', None)
3896 if self.vartree.dbapi._linkmap is None:
3897 # preserve-libs is entirely disabled
3900 self.vartree.dbapi._linkmap._clear_cache()
3902 self.vartree.dbapi._bump_mtime(self.mycpv)
3905 def getstring(self,name):
3906 "returns contents of a file with whitespace converted to spaces"
3907 if not os.path.exists(self.dbdir+"/"+name):
3909 mydata = codecs.open(
3910 _unicode_encode(os.path.join(self.dbdir, name),
3911 encoding=_encodings['fs'], errors='strict'),
3912 mode='r', encoding=_encodings['repo.content'], errors='replace'
3914 return " ".join(mydata)
3916 def copyfile(self,fname):
3917 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
3919 def getfile(self,fname):
3920 if not os.path.exists(self.dbdir+"/"+fname):
3922 return codecs.open(_unicode_encode(os.path.join(self.dbdir, fname),
3923 encoding=_encodings['fs'], errors='strict'),
3924 mode='r', encoding=_encodings['repo.content'], errors='replace'
3927 def setfile(self,fname,data):
3929 if fname == 'environment.bz2' or not isinstance(data, basestring):
3930 kwargs['mode'] = 'wb'
3932 kwargs['mode'] = 'w'
3933 kwargs['encoding'] = _encodings['repo.content']
3934 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
3936 def getelements(self,ename):
3937 if not os.path.exists(self.dbdir+"/"+ename):
3939 mylines = codecs.open(_unicode_encode(
3940 os.path.join(self.dbdir, ename),
3941 encoding=_encodings['fs'], errors='strict'),
3942 mode='r', encoding=_encodings['repo.content'], errors='replace'
3946 for y in x[:-1].split():
3950 def setelements(self,mylist,ename):
3951 myelement = codecs.open(_unicode_encode(
3952 os.path.join(self.dbdir, ename),
3953 encoding=_encodings['fs'], errors='strict'),
3954 mode='w', encoding=_encodings['repo.content'],
3955 errors='backslashreplace')
3957 myelement.write(x+"\n")
3960 def isregular(self):
3961 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
3962 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
3964 def merge(mycat, mypkg, pkgloc, infloc,
3965 myroot=None, settings=None, myebuild=None,
3966 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
3969 @param myroot: ignored, settings['EROOT'] is used instead
3972 if settings is None:
3973 raise TypeError("settings argument is required")
3974 if not os.access(settings['EROOT'], os.W_OK):
3975 writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
3978 mylink = dblink(mycat, mypkg, settings=settings, treetype=mytree,
3979 vartree=vartree, blockers=blockers, scheduler=scheduler)
3980 return mylink.merge(pkgloc, infloc, myebuild=myebuild,
3981 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
3983 def unmerge(cat, pkg, myroot=None, settings=None,
3984 mytrimworld=None, vartree=None,
3985 ldpath_mtimes=None, scheduler=None):
3987 @param myroot: ignored, settings['EROOT'] is used instead
3988 @param mytrimworld: ignored
3991 if settings is None:
3992 raise TypeError("settings argument is required")
3993 mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
3994 vartree=vartree, scheduler=scheduler)
3995 vartree = mylink.vartree
3999 plib_registry = vartree.dbapi._plib_registry
4000 if plib_registry is None:
4001 # preserve-libs is entirely disabled
4004 plib_registry.load()
4005 plib_registry.pruneNonExisting()
4006 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
4007 if retval == os.EX_OK:
4012 if vartree.dbapi._linkmap is None:
4013 # preserve-libs is entirely disabled
4016 vartree.dbapi._linkmap._clear_cache()
4019 def write_contents(contents, root, f):
4021 Write contents to any file like object. The file will be left open.
4023 root_len = len(root) - 1
4024 for filename in sorted(contents):
4025 entry_data = contents[filename]
4026 entry_type = entry_data[0]
4027 relative_filename = filename[root_len:]
4028 if entry_type == "obj":
4029 entry_type, mtime, md5sum = entry_data
4030 line = "%s %s %s %s\n" % \
4031 (entry_type, relative_filename, md5sum, mtime)
4032 elif entry_type == "sym":
4033 entry_type, mtime, link = entry_data
4034 line = "%s %s -> %s %s\n" % \
4035 (entry_type, relative_filename, link, mtime)
4036 else: # dir, dev, fif
4037 line = "%s %s\n" % (entry_type, relative_filename)
4040 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4046 encoding=_encodings['merge'],
4048 except UnicodeEncodeError:
4049 # The package appears to have been merged with a
4050 # different value of sys.getfilesystemencoding(),
4051 # so fall back to utf_8 if appropriate.
4055 encoding=_encodings['fs'],
4057 except UnicodeEncodeError:
4062 from portage.util import normalize_path
4064 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4066 maxval = len(contents)
4069 onProgress(maxval, 0)
4070 paths = list(contents)
4075 lst = os.lstat(path)
4076 except OSError as e:
4077 if e.errno != errno.ENOENT:
4081 onProgress(maxval, curval)
4083 contents_type = contents[path][0]
4084 if path.startswith(root):
4085 arcname = path[len(root):]
4087 raise ValueError("invalid root argument: '%s'" % root)
4089 if 'dir' == contents_type and \
4090 not stat.S_ISDIR(lst.st_mode) and \
4091 os.path.isdir(live_path):
4092 # Even though this was a directory in the original ${D}, it exists
4093 # as a symlink to a directory in the live filesystem. It must be
4094 # recorded as a real directory in the tar file to ensure that tar
4095 # can properly extract it's children.
4096 live_path = os.path.realpath(live_path)
4097 tarinfo = tar.gettarinfo(live_path, arcname)
4099 if stat.S_ISREG(lst.st_mode):
4100 # break hardlinks due to bug #185305
4101 tarinfo.type = tarfile.REGTYPE
4102 if protect and protect(path):
4103 # Create an empty file as a place holder in order to avoid
4104 # potential collision-protect issues.
4105 f = tempfile.TemporaryFile()
4106 f.write(_unicode_encode(
4107 "# empty file because --include-config=n " + \
4108 "when `quickpkg` was used\n"))
4111 tarinfo.size = os.fstat(f.fileno()).st_size
4112 tar.addfile(tarinfo, f)
4115 f = open(_unicode_encode(path,
4116 encoding=object.__getattribute__(os, '_encoding'),
4117 errors='strict'), 'rb')
4119 tar.addfile(tarinfo, f)
4123 tar.addfile(tarinfo)
4125 onProgress(maxval, curval)