1 # Copyright 1998-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dbapi._MergeProcess:MergeProcess',
14 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
15 'use_reduce,_slot_re',
16 'portage.elog:collect_ebuild_messages,collect_messages,' + \
17 'elog_process,_merge_logentries',
18 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
19 'portage.output:bold,colorize',
20 'portage.package.ebuild.doebuild:doebuild_environment,' + \
21 '_merge_unicode_error', '_spawn_phase',
22 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
23 'portage.update:fixdbentries',
24 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
25 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
26 'grabdict,normalize_path,new_protect_filename',
27 'portage.util.digraph:digraph',
28 'portage.util.env_update:env_update',
29 'portage.util.listdir:dircache,listdir',
30 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
31 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
32 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
36 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
37 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
38 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
39 from portage.dbapi import dbapi
40 from portage.exception import CommandNotFound, \
41 InvalidData, InvalidLocation, InvalidPackageName, \
42 FileNotFound, PermissionDenied, UnsupportedAPIException
43 from portage.localization import _
44 from portage.util.movefile import movefile
46 from portage import abssymlink, _movefile, bsd_chflags
48 # This is a special version of the os module, wrapped for unicode support.
49 from portage import os
50 from portage import _encodings
51 from portage import _os_merge
52 from portage import _selinux_merge
53 from portage import _unicode_decode
54 from portage import _unicode_encode
56 from _emerge.EbuildBuildDir import EbuildBuildDir
57 from _emerge.EbuildPhase import EbuildPhase
58 from _emerge.emergelog import emergelog
59 from _emerge.PollScheduler import PollScheduler
60 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
65 from itertools import chain
78 import cPickle as pickle
82 if sys.hexversion >= 0x3000000:
86 class vardbapi(dbapi):
88 _excluded_dirs = ["CVS", "lost+found"]
89 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
90 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
91 "|".join(_excluded_dirs) + r')$')
93 _aux_cache_version = "1"
94 _owners_cache_version = "1"
96 # Number of uncached packages to trigger cache update, since
97 # it's wasteful to update it for every vdb change.
98 _aux_cache_threshold = 5
100 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
101 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
103 def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
105 The categories parameter is unused since the dbapi class
106 now has a categories property that is generated from the
110 # Used by emerge to check whether any packages
111 # have been added or removed.
112 self._pkgs_changed = False
114 # The _aux_cache_threshold doesn't work as designed
115 # if the cache is flushed from a subprocess, so we
116 # use this to avoid waste vdb cache updates.
117 self._flush_cache_enabled = True
119 #cache for category directory mtimes
122 #cache for dependency checks
125 #cache for cp_list results
130 settings = portage.settings
131 self.settings = settings
133 if _unused_param is not None and _unused_param != settings['ROOT']:
134 warnings.warn("The first parameter of the "
135 "portage.dbapi.vartree.vardbapi"
136 " constructor is now unused. Use "
137 "settings['ROOT'] instead.",
138 DeprecationWarning, stacklevel=2)
140 self._eroot = settings['EROOT']
141 self._dbroot = self._eroot + VDB_PATH
145 self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
146 self._fs_lock_obj = None
147 self._fs_lock_count = 0
150 vartree = portage.db[settings['EROOT']]['vartree']
151 self.vartree = vartree
152 self._aux_cache_keys = set(
153 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
154 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
155 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
156 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
158 self._aux_cache_obj = None
159 self._aux_cache_filename = os.path.join(self._eroot,
160 CACHE_PATH, "vdb_metadata.pickle")
161 self._counter_path = os.path.join(self._eroot,
162 CACHE_PATH, "counter")
164 self._plib_registry = None
165 if _ENABLE_PRESERVE_LIBS:
166 self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
167 os.path.join(self._eroot, PRIVATE_PATH,
168 "preserved_libs_registry"))
171 if _ENABLE_DYN_LINK_MAP:
172 self._linkmap = LinkageMap(self)
173 self._owners = self._owners_db(self)
175 self._cached_counter = None
179 warnings.warn("The root attribute of "
180 "portage.dbapi.vartree.vardbapi"
181 " is deprecated. Use "
182 "settings['ROOT'] instead.",
183 DeprecationWarning, stacklevel=2)
184 return self.settings['ROOT']
186 def getpath(self, mykey, filename=None):
187 # This is an optimized hotspot, so don't use unicode-wrapped
188 # os module and don't use os.path.join().
189 rValue = self._eroot + VDB_PATH + _os.sep + mykey
190 if filename is not None:
191 # If filename is always relative, we can do just
192 # rValue += _os.sep + filename
193 rValue = _os.path.join(rValue, filename)
198 Acquire a reentrant lock, blocking, for cooperation with concurrent
199 processes. State is inherited by subprocesses, allowing subprocesses
200 to reenter a lock that was acquired by a parent process. However,
201 a lock can be released only by the same process that acquired it.
204 self._lock_count += 1
206 if self._lock is not None:
207 raise AssertionError("already locked")
208 # At least the parent needs to exist for the lock file.
209 ensure_dirs(self._dbroot)
210 self._lock = lockdir(self._dbroot)
211 self._lock_count += 1
215 Release a lock, decrementing the recursion level. Each unlock() call
216 must be matched with a prior lock() call, or else an AssertionError
217 will be raised if unlock() is called while not locked.
219 if self._lock_count > 1:
220 self._lock_count -= 1
222 if self._lock is None:
223 raise AssertionError("not locked")
225 unlockdir(self._lock)
230 Acquire a reentrant lock, blocking, for cooperation with concurrent
233 if self._fs_lock_count < 1:
234 if self._fs_lock_obj is not None:
235 raise AssertionError("already locked")
237 self._fs_lock_obj = lockfile(self._conf_mem_file)
238 except InvalidLocation:
239 self.settings._init_dirs()
240 self._fs_lock_obj = lockfile(self._conf_mem_file)
241 self._fs_lock_count += 1
243 def _fs_unlock(self):
245 Release a lock, decrementing the recursion level.
247 if self._fs_lock_count <= 1:
248 if self._fs_lock_obj is None:
249 raise AssertionError("not locked")
250 unlockfile(self._fs_lock_obj)
251 self._fs_lock_obj = None
252 self._fs_lock_count -= 1
254 def _bump_mtime(self, cpv):
256 This is called before an after any modifications, so that consumers
257 can use directory mtimes to validate caches. See bug #290428.
259 base = self._eroot + VDB_PATH
260 cat = catsplit(cpv)[0]
261 catdir = base + _os.sep + cat
265 for x in (catdir, base):
270 def cpv_exists(self, mykey, myrepo=None):
271 "Tells us whether an actual ebuild exists on disk (no masking)"
272 return os.path.exists(self.getpath(mykey))
274 def cpv_counter(self, mycpv):
275 "This method will grab the COUNTER. Returns a counter value."
277 return long(self.aux_get(mycpv, ["COUNTER"])[0])
278 except (KeyError, ValueError):
280 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
281 "resetting to value of 0\n") % (mycpv,),
282 level=logging.ERROR, noiselevel=-1)
285 def cpv_inject(self, mycpv):
286 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
287 ensure_dirs(self.getpath(mycpv))
288 counter = self.counter_tick(mycpv=mycpv)
289 # write local package counter so that emerge clean does the right thing
290 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
292 def isInjected(self, mycpv):
293 if self.cpv_exists(mycpv):
294 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
296 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
300 def move_ent(self, mylist, repo_match=None):
305 for atom in (origcp, newcp):
306 if not isjustname(atom):
307 raise InvalidPackageName(str(atom))
308 origmatches = self.match(origcp, use_cache=0)
312 for mycpv in origmatches:
313 mycpv_cp = cpv_getkey(mycpv)
314 if mycpv_cp != origcp:
315 # Ignore PROVIDE virtual match.
317 if repo_match is not None \
318 and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
320 mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
321 mynewcat = catsplit(newcp)[0]
322 origpath = self.getpath(mycpv)
323 if not os.path.exists(origpath):
326 if not os.path.exists(self.getpath(mynewcat)):
327 #create the directory
328 ensure_dirs(self.getpath(mynewcat))
329 newpath = self.getpath(mynewcpv)
330 if os.path.exists(newpath):
331 #dest already exists; keep this puppy where it is.
333 _movefile(origpath, newpath, mysettings=self.settings)
334 self._clear_pkg_cache(self._dblink(mycpv))
335 self._clear_pkg_cache(self._dblink(mynewcpv))
337 # We need to rename the ebuild now.
338 old_pf = catsplit(mycpv)[1]
339 new_pf = catsplit(mynewcpv)[1]
342 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
343 os.path.join(newpath, new_pf + ".ebuild"))
344 except EnvironmentError as e:
345 if e.errno != errno.ENOENT:
348 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
349 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
350 fixdbentries([mylist], newpath)
353 def cp_list(self, mycp, use_cache=1):
354 mysplit=catsplit(mycp)
355 if mysplit[0] == '*':
356 mysplit[0] = mysplit[0][1:]
358 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
361 if use_cache and mycp in self.cpcache:
362 cpc = self.cpcache[mycp]
365 cat_dir = self.getpath(mysplit[0])
367 dir_list = os.listdir(cat_dir)
368 except EnvironmentError as e:
369 if e.errno == PermissionDenied.errno:
370 raise PermissionDenied(cat_dir)
376 if self._excluded_dirs.match(x) is not None:
380 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
383 if ps[0] == mysplit[1]:
384 returnme.append(mysplit[0]+"/"+x)
385 self._cpv_sort_ascending(returnme)
387 self.cpcache[mycp] = [mystat, returnme[:]]
388 elif mycp in self.cpcache:
389 del self.cpcache[mycp]
392 def cpv_all(self, use_cache=1):
394 Set use_cache=0 to bypass the portage.cachedir() cache in cases
395 when the accuracy of mtime staleness checks should not be trusted
396 (generally this is only necessary in critical sections that
397 involve merge or unmerge of packages).
400 basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
403 from portage import listdir
405 def listdir(p, **kwargs):
407 return [x for x in os.listdir(p) \
408 if os.path.isdir(os.path.join(p, x))]
409 except EnvironmentError as e:
410 if e.errno == PermissionDenied.errno:
411 raise PermissionDenied(p)
415 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
416 if self._excluded_dirs.match(x) is not None:
418 if not self._category_re.match(x):
420 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
421 if self._excluded_dirs.match(y) is not None:
423 subpath = x + "/" + y
424 # -MERGING- should never be a cpv, nor should files.
426 if catpkgsplit(subpath) is None:
427 self.invalidentry(self.getpath(subpath))
430 self.invalidentry(self.getpath(subpath))
432 returnme.append(subpath)
436 def cp_all(self, use_cache=1):
437 mylist = self.cpv_all(use_cache=use_cache)
443 mysplit = catpkgsplit(y)
445 self.invalidentry(self.getpath(y))
448 self.invalidentry(self.getpath(y))
450 d[mysplit[0]+"/"+mysplit[1]] = None
453 def checkblockers(self, origdep):
456 def _clear_cache(self):
457 self.mtdircache.clear()
458 self.matchcache.clear()
460 self._aux_cache_obj = None
462 def _add(self, pkg_dblink):
463 self._pkgs_changed = True
464 self._clear_pkg_cache(pkg_dblink)
466 def _remove(self, pkg_dblink):
467 self._pkgs_changed = True
468 self._clear_pkg_cache(pkg_dblink)
470 def _clear_pkg_cache(self, pkg_dblink):
471 # Due to 1 second mtime granularity in <python-2.5, mtime checks
472 # are not always sufficient to invalidate vardbapi caches. Therefore,
473 # the caches need to be actively invalidated here.
474 self.mtdircache.pop(pkg_dblink.cat, None)
475 self.matchcache.pop(pkg_dblink.cat, None)
476 self.cpcache.pop(pkg_dblink.mysplit[0], None)
477 dircache.pop(pkg_dblink.dbcatdir, None)
479 def match(self, origdep, use_cache=1):
480 "caching match function"
482 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
483 mykey = dep_getkey(mydep)
484 mycat = catsplit(mykey)[0]
486 if mycat in self.matchcache:
487 del self.mtdircache[mycat]
488 del self.matchcache[mycat]
489 return list(self._iter_match(mydep,
490 self.cp_list(mydep.cp, use_cache=use_cache)))
492 curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
493 except (IOError, OSError):
496 if mycat not in self.matchcache or \
497 self.mtdircache[mycat] != curmtime:
499 self.mtdircache[mycat] = curmtime
500 self.matchcache[mycat] = {}
501 if mydep not in self.matchcache[mycat]:
502 mymatch = list(self._iter_match(mydep,
503 self.cp_list(mydep.cp, use_cache=use_cache)))
504 self.matchcache[mycat][mydep] = mymatch
505 return self.matchcache[mycat][mydep][:]
507 def findname(self, mycpv, myrepo=None):
508 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
510 def flush_cache(self):
511 """If the current user has permission and the internal aux_get cache has
512 been updated, save it to disk and mark it unmodified. This is called
513 by emerge after it has loaded the full vdb for use in dependency
514 calculations. Currently, the cache is only written if the user has
515 superuser privileges (since that's required to obtain a lock), but all
516 users have read access and benefit from faster metadata lookups (as
517 long as at least part of the cache is still valid)."""
518 if self._flush_cache_enabled and \
519 self._aux_cache is not None and \
520 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
522 self._owners.populate() # index any unindexed contents
523 valid_nodes = set(self.cpv_all())
524 for cpv in list(self._aux_cache["packages"]):
525 if cpv not in valid_nodes:
526 del self._aux_cache["packages"][cpv]
527 del self._aux_cache["modified"]
529 f = atomic_ofstream(self._aux_cache_filename, 'wb')
530 pickle.dump(self._aux_cache, f, protocol=2)
532 apply_secpass_permissions(
533 self._aux_cache_filename, gid=portage_gid, mode=0o644)
534 except (IOError, OSError) as e:
536 self._aux_cache["modified"] = set()
539 def _aux_cache(self):
540 if self._aux_cache_obj is None:
541 self._aux_cache_init()
542 return self._aux_cache_obj
544 def _aux_cache_init(self):
547 if sys.hexversion >= 0x3000000:
548 # Buffered io triggers extreme performance issues in
549 # Unpickler.load() (problem observed with python-3.0.1).
550 # Unfortunately, performance is still poor relative to
551 # python-2.x, but buffering makes it much worse.
552 open_kwargs["buffering"] = 0
554 f = open(_unicode_encode(self._aux_cache_filename,
555 encoding=_encodings['fs'], errors='strict'),
556 mode='rb', **open_kwargs)
557 mypickle = pickle.Unpickler(f)
559 mypickle.find_global = None
560 except AttributeError:
561 # TODO: If py3k, override Unpickler.find_class().
563 aux_cache = mypickle.load()
566 except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
567 if isinstance(e, EnvironmentError) and \
568 getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
571 writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
572 (self._aux_cache_filename, e), noiselevel=-1)
575 if not aux_cache or \
576 not isinstance(aux_cache, dict) or \
577 aux_cache.get("version") != self._aux_cache_version or \
578 not aux_cache.get("packages"):
579 aux_cache = {"version": self._aux_cache_version}
580 aux_cache["packages"] = {}
582 owners = aux_cache.get("owners")
583 if owners is not None:
584 if not isinstance(owners, dict):
586 elif "version" not in owners:
588 elif owners["version"] != self._owners_cache_version:
590 elif "base_names" not in owners:
592 elif not isinstance(owners["base_names"], dict):
598 "version" : self._owners_cache_version
600 aux_cache["owners"] = owners
602 aux_cache["modified"] = set()
603 self._aux_cache_obj = aux_cache
605 def aux_get(self, mycpv, wants, myrepo = None):
606 """This automatically caches selected keys that are frequently needed
607 by emerge for dependency calculations. The cached metadata is
608 considered valid if the mtime of the package directory has not changed
609 since the data was cached. The cache is stored in a pickled dict
610 object with the following format:
612 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
614 If an error occurs while loading the cache pickle or the version is
615 unrecognized, the cache will simple be recreated from scratch (it is
616 completely disposable).
618 cache_these_wants = self._aux_cache_keys.intersection(wants)
620 if self._aux_cache_keys_re.match(x) is not None:
621 cache_these_wants.add(x)
623 if not cache_these_wants:
624 return self._aux_get(mycpv, wants)
626 cache_these = set(self._aux_cache_keys)
627 cache_these.update(cache_these_wants)
629 mydir = self.getpath(mycpv)
632 mydir_stat = os.stat(mydir)
634 if e.errno != errno.ENOENT:
636 raise KeyError(mycpv)
637 mydir_mtime = mydir_stat[stat.ST_MTIME]
638 pkg_data = self._aux_cache["packages"].get(mycpv)
639 pull_me = cache_these.union(wants)
640 mydata = {"_mtime_" : mydir_mtime}
642 cache_incomplete = False
645 if pkg_data is not None:
646 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
649 cache_mtime, metadata = pkg_data
650 if not isinstance(cache_mtime, (long, int)) or \
651 not isinstance(metadata, dict):
655 cache_mtime, metadata = pkg_data
656 cache_valid = cache_mtime == mydir_mtime
658 # Migrate old metadata to unicode.
659 for k, v in metadata.items():
660 metadata[k] = _unicode_decode(v,
661 encoding=_encodings['repo.content'], errors='replace')
663 mydata.update(metadata)
664 pull_me.difference_update(mydata)
667 # pull any needed data and cache it
668 aux_keys = list(pull_me)
669 for k, v in zip(aux_keys,
670 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
672 if not cache_valid or cache_these.difference(metadata):
674 if cache_valid and metadata:
675 cache_data.update(metadata)
676 for aux_key in cache_these:
677 cache_data[aux_key] = mydata[aux_key]
678 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
679 self._aux_cache["modified"].add(mycpv)
681 if _slot_re.match(mydata['SLOT']) is None:
682 # Empty or invalid slot triggers InvalidAtom exceptions when
683 # generating slot atoms for packages, so translate it to '0' here.
684 mydata['SLOT'] = _unicode_decode('0')
686 return [mydata[x] for x in wants]
688 def _aux_get(self, mycpv, wants, st=None):
689 mydir = self.getpath(mycpv)
694 if e.errno == errno.ENOENT:
695 raise KeyError(mycpv)
696 elif e.errno == PermissionDenied.errno:
697 raise PermissionDenied(mydir)
700 if not stat.S_ISDIR(st.st_mode):
701 raise KeyError(mycpv)
705 results.append(st[stat.ST_MTIME])
709 _unicode_encode(os.path.join(mydir, x),
710 encoding=_encodings['fs'], errors='strict'),
711 mode='r', encoding=_encodings['repo.content'],
717 # Preserve \n for metadata that is known to
718 # contain multiple lines.
719 if self._aux_multi_line_re.match(x) is None:
720 myd = " ".join(myd.split())
722 myd = _unicode_decode('')
723 if x == "EAPI" and not myd:
724 results.append(_unicode_decode('0'))
729 def aux_update(self, cpv, values):
730 mylink = self._dblink(cpv)
731 if not mylink.exists():
733 self._bump_mtime(cpv)
734 self._clear_pkg_cache(mylink)
735 for k, v in values.items():
740 os.unlink(os.path.join(self.getpath(cpv), k))
741 except EnvironmentError:
743 self._bump_mtime(cpv)
745 def counter_tick(self, myroot=None, mycpv=None):
747 @param myroot: ignored, self._eroot is used instead
749 return self.counter_tick_core(incrementing=1, mycpv=mycpv)
751 def get_counter_tick_core(self, myroot=None, mycpv=None):
753 Use this method to retrieve the counter instead
754 of having to trust the value of a global counter
755 file that can lead to invalid COUNTER
756 generation. When cache is valid, the package COUNTER
757 files are not read and we rely on the timestamp of
758 the package directory to validate cache. The stat
759 calls should only take a short time, so performance
760 is sufficient without having to rely on a potentially
761 corrupt global counter file.
763 The global counter file located at
764 $CACHE_PATH/counter serves to record the
765 counter of the last installed package and
766 it also corresponds to the total number of
767 installation actions that have occurred in
768 the history of this package database.
770 @param myroot: ignored, self._eroot is used instead
777 _unicode_encode(self._counter_path,
778 encoding=_encodings['fs'], errors='strict'),
779 mode='r', encoding=_encodings['repo.content'],
781 except EnvironmentError as e:
782 new_vdb = not bool(self.cpv_all())
784 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
785 self._counter_path, noiselevel=-1)
786 writemsg("!!! %s\n" % str(e), noiselevel=-1)
791 counter = long(cfile.readline().strip())
794 except (OverflowError, ValueError) as e:
795 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
796 self._counter_path, noiselevel=-1)
797 writemsg("!!! %s\n" % str(e), noiselevel=-1)
800 if self._cached_counter == counter:
801 max_counter = counter
803 # We must ensure that we return a counter
804 # value that is at least as large as the
805 # highest one from the installed packages,
806 # since having a corrupt value that is too low
807 # can trigger incorrect AUTOCLEAN behavior due
808 # to newly installed packages having lower
809 # COUNTERs than the previous version in the
811 max_counter = counter
812 for cpv in self.cpv_all():
814 pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
815 except (KeyError, OverflowError, ValueError):
817 if pkg_counter > max_counter:
818 max_counter = pkg_counter
820 if counter < 0 and not new_vdb:
821 writemsg(_("!!! Initializing COUNTER to " \
822 "value of %d\n") % max_counter, noiselevel=-1)
824 return max_counter + 1
826 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
828 This method will grab the next COUNTER value and record it back
829 to the global file. Note that every package install must have
830 a unique counter, since a slotmove update can move two packages
831 into the same SLOT and in that case it's important that both
832 packages have different COUNTER metadata.
834 @param myroot: ignored, self._eroot is used instead
835 @param mycpv: ignored
837 @returns: new counter value
843 counter = self.get_counter_tick_core() - 1
847 # update new global counter file
849 write_atomic(self._counter_path, str(counter))
850 except InvalidLocation:
851 self.settings._init_dirs()
852 write_atomic(self._counter_path, str(counter))
853 self._cached_counter = counter
855 # Since we hold a lock, this is a good opportunity
856 # to flush the cache. Note that this will only
857 # flush the cache periodically in the main process
858 # when _aux_cache_threshold is exceeded.
865 def _dblink(self, cpv):
866 category, pf = catsplit(cpv)
867 return dblink(category, pf, settings=self.settings,
868 vartree=self.vartree, treetype="vartree")
870 def removeFromContents(self, pkg, paths, relative_paths=True):
872 @param pkg: cpv for an installed package
874 @param paths: paths of files to remove from contents
875 @type paths: iterable
877 if not hasattr(pkg, "getcontents"):
878 pkg = self._dblink(pkg)
879 root = self.settings['ROOT']
880 root_len = len(root) - 1
881 new_contents = pkg.getcontents().copy()
884 for filename in paths:
885 filename = _unicode_decode(filename,
886 encoding=_encodings['content'], errors='strict')
887 filename = normalize_path(filename)
889 relative_filename = filename
891 relative_filename = filename[root_len:]
892 contents_key = pkg._match_contents(relative_filename)
894 del new_contents[contents_key]
898 self._bump_mtime(pkg.mycpv)
899 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
900 write_contents(new_contents, root, f)
902 self._bump_mtime(pkg.mycpv)
903 pkg._clear_contents_cache()
905 class _owners_cache(object):
907 This class maintains an hash table that serves to index package
908 contents by mapping the basename of file to a list of possible
909 packages that own it. This is used to optimize owner lookups
910 by narrowing the search down to a smaller number of packages.
913 from hashlib import md5 as _new_hash
915 from md5 import new as _new_hash
918 _hex_chars = int(_hash_bits / 4)
920 def __init__(self, vardb):
924 eroot_len = len(self._vardb._eroot)
925 contents = self._vardb._dblink(cpv).getcontents()
926 pkg_hash = self._hash_pkg(cpv)
928 # Empty path is a code used to represent empty contents.
929 self._add_path("", pkg_hash)
932 self._add_path(x[eroot_len:], pkg_hash)
934 self._vardb._aux_cache["modified"].add(cpv)
936 def _add_path(self, path, pkg_hash):
938 Empty path is a code that represents empty contents.
941 name = os.path.basename(path.rstrip(os.path.sep))
946 name_hash = self._hash_str(name)
947 base_names = self._vardb._aux_cache["owners"]["base_names"]
948 pkgs = base_names.get(name_hash)
951 base_names[name_hash] = pkgs
952 pkgs[pkg_hash] = None
954 def _hash_str(self, s):
956 # Always use a constant utf_8 encoding here, since
957 # the "default" encoding can change.
958 h.update(_unicode_encode(s,
959 encoding=_encodings['repo.content'],
960 errors='backslashreplace'))
962 h = h[-self._hex_chars:]
966 def _hash_pkg(self, cpv):
967 counter, mtime = self._vardb.aux_get(
968 cpv, ["COUNTER", "_mtime_"])
970 counter = int(counter)
973 return (cpv, counter, mtime)
975 class _owners_db(object):
977 def __init__(self, vardb):
984 owners_cache = vardbapi._owners_cache(self._vardb)
985 cached_hashes = set()
986 base_names = self._vardb._aux_cache["owners"]["base_names"]
988 # Take inventory of all cached package hashes.
989 for name, hash_values in list(base_names.items()):
990 if not isinstance(hash_values, dict):
993 cached_hashes.update(hash_values)
995 # Create sets of valid package hashes and uncached packages.
996 uncached_pkgs = set()
997 hash_pkg = owners_cache._hash_pkg
998 valid_pkg_hashes = set()
999 for cpv in self._vardb.cpv_all():
1000 hash_value = hash_pkg(cpv)
1001 valid_pkg_hashes.add(hash_value)
1002 if hash_value not in cached_hashes:
1003 uncached_pkgs.add(cpv)
1005 # Cache any missing packages.
1006 for cpv in uncached_pkgs:
1007 owners_cache.add(cpv)
1009 # Delete any stale cache.
1010 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1012 for base_name_hash, bucket in list(base_names.items()):
1013 for hash_value in stale_hashes.intersection(bucket):
1014 del bucket[hash_value]
1016 del base_names[base_name_hash]
1020 def get_owners(self, path_iter):
1022 @return the owners as a dblink -> set(files) mapping.
1025 for owner, f in self.iter_owners(path_iter):
1026 owned_files = owners.get(owner)
1027 if owned_files is None:
1029 owners[owner] = owned_files
1033 def getFileOwnerMap(self, path_iter):
1034 owners = self.get_owners(path_iter)
1036 for pkg_dblink, files in owners.items():
1038 owner_set = file_owners.get(f)
1039 if owner_set is None:
1041 file_owners[f] = owner_set
1042 owner_set.add(pkg_dblink)
1045 def iter_owners(self, path_iter):
1047 Iterate over tuples of (dblink, path). In order to avoid
1048 consuming too many resources for too much time, resources
1049 are only allocated for the duration of a given iter_owners()
1050 call. Therefore, to maximize reuse of resources when searching
1051 for multiple files, it's best to search for them all in a single
1055 if not isinstance(path_iter, list):
1056 path_iter = list(path_iter)
1057 owners_cache = self._populate()
1060 hash_pkg = owners_cache._hash_pkg
1061 hash_str = owners_cache._hash_str
1062 base_names = self._vardb._aux_cache["owners"]["base_names"]
1067 x = dblink_cache.get(cpv)
1069 if len(dblink_cache) > 20:
1070 # Ensure that we don't run out of memory.
1071 raise StopIteration()
1072 x = self._vardb._dblink(cpv)
1073 dblink_cache[cpv] = x
1078 path = path_iter.pop()
1079 is_basename = os.sep != path[:1]
1083 name = os.path.basename(path.rstrip(os.path.sep))
1088 name_hash = hash_str(name)
1089 pkgs = base_names.get(name_hash)
1091 if pkgs is not None:
1093 for hash_value in pkgs:
1094 if not isinstance(hash_value, tuple) or \
1095 len(hash_value) != 3:
1097 cpv, counter, mtime = hash_value
1098 if not isinstance(cpv, basestring):
1101 current_hash = hash_pkg(cpv)
1105 if current_hash != hash_value:
1109 for p in dblink(cpv).getcontents():
1110 if os.path.basename(p) == name:
1111 owners.append((cpv, p[len(root):]))
1113 if dblink(cpv).isowner(path):
1114 owners.append((cpv, path))
1116 except StopIteration:
1117 path_iter.append(path)
1119 dblink_cache.clear()
1121 for x in self._iter_owners_low_mem(path_iter):
1125 for cpv, p in owners:
1126 yield (dblink(cpv), p)
1128 def _iter_owners_low_mem(self, path_list):
1130 This implemention will make a short-lived dblink instance (and
1131 parse CONTENTS) for every single installed package. This is
1132 slower and but uses less memory than the method which uses the
1140 for path in path_list:
1141 is_basename = os.sep != path[:1]
1145 name = os.path.basename(path.rstrip(os.path.sep))
1146 path_info_list.append((path, name, is_basename))
1148 root = self._vardb._eroot
1149 for cpv in self._vardb.cpv_all():
1150 dblnk = self._vardb._dblink(cpv)
1152 for path, name, is_basename in path_info_list:
1154 for p in dblnk.getcontents():
1155 if os.path.basename(p) == name:
1156 yield dblnk, p[len(root):]
1158 if dblnk.isowner(path):
1161 class vartree(object):
1162 "this tree will scan a var/db/pkg database located at root (passed to init)"
1163 def __init__(self, root=None, virtual=None, categories=None,
1166 if settings is None:
1167 settings = portage.settings
1169 if root is not None and root != settings['ROOT']:
1170 warnings.warn("The 'root' parameter of the "
1171 "portage.dbapi.vartree.vartree"
1172 " constructor is now unused. Use "
1173 "settings['ROOT'] instead.",
1174 DeprecationWarning, stacklevel=2)
1176 self.settings = settings
1177 self.dbapi = vardbapi(settings=settings, vartree=self)
1182 warnings.warn("The root attribute of "
1183 "portage.dbapi.vartree.vartree"
1184 " is deprecated. Use "
1185 "settings['ROOT'] instead.",
1186 DeprecationWarning, stacklevel=2)
1187 return self.settings['ROOT']
1189 def getpath(self, mykey, filename=None):
1190 return self.dbapi.getpath(mykey, filename=filename)
1192 def zap(self, mycpv):
1195 def inject(self, mycpv):
1198 def get_provide(self, mycpv):
1202 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1204 myuse = myuse.split()
1205 mylines = use_reduce(mylines, uselist=myuse, flat=True)
1206 for myprovide in mylines:
1207 mys = catpkgsplit(myprovide)
1209 mys = myprovide.split("/")
1210 myprovides += [mys[0] + "/" + mys[1]]
1212 except SystemExit as e:
1214 except Exception as e:
1215 mydir = self.dbapi.getpath(mycpv)
1216 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1219 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1221 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1224 def get_all_provides(self):
1226 for node in self.getallcpv():
1227 for mykey in self.get_provide(node):
1228 if mykey in myprovides:
1229 myprovides[mykey] += [node]
1231 myprovides[mykey] = [node]
1234 def dep_bestmatch(self, mydep, use_cache=1):
1235 "compatibility method -- all matches, not just visible ones"
1236 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1237 mymatch = best(self.dbapi.match(
1238 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1239 use_cache=use_cache))
1245 def dep_match(self, mydep, use_cache=1):
1246 "compatibility method -- we want to see all matches, not just visible ones"
1247 #mymatch = match(mydep,self.dbapi)
1248 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1254 def exists_specific(self, cpv):
1255 return self.dbapi.cpv_exists(cpv)
1257 def getallcpv(self):
1258 """temporary function, probably to be renamed --- Gets a list of all
1259 category/package-versions installed on the system."""
1260 return self.dbapi.cpv_all()
1262 def getallnodes(self):
1263 """new behavior: these are all *unmasked* nodes. There may or may not be available
1264 masked package for nodes in this nodes list."""
1265 return self.dbapi.cp_all()
1267 def getebuildpath(self, fullpackage):
1268 cat, package = catsplit(fullpackage)
1269 return self.getpath(fullpackage, filename=package+".ebuild")
1271 def getslot(self, mycatpkg):
1272 "Get a slot for a catpkg; assume it exists."
1274 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1281 class dblink(object):
1283 This class provides an interface to the installed package database
1284 At present this is implemented as a text backend in /var/db/pkg.
1288 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1290 _contents_re = re.compile(r'^(' + \
1291 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1292 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1293 r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
1294 r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
1298 def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
1299 vartree=None, blockers=None, scheduler=None, pipe=None):
1301 Creates a DBlink object for a given CPV.
1302 The given CPV may not be present in the database already.
1304 @param cat: Category
1306 @param pkg: Package (PV)
1308 @param myroot: ignored, settings['ROOT'] is used instead
1309 @type myroot: String (Path)
1310 @param settings: Typically portage.settings
1311 @type settings: portage.config
1312 @param treetype: one of ['porttree','bintree','vartree']
1313 @type treetype: String
1314 @param vartree: an instance of vartree corresponding to myroot.
1315 @type vartree: vartree
1318 if settings is None:
1319 raise TypeError("settings argument is required")
1321 mysettings = settings
1322 self._eroot = mysettings['EROOT']
1325 self.mycpv = self.cat + "/" + self.pkg
1326 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1327 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1328 self.treetype = treetype
1330 vartree = portage.db[self._eroot]["vartree"]
1331 self.vartree = vartree
1332 self._blockers = blockers
1333 self._scheduler = scheduler
1334 self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
1335 self.dbcatdir = self.dbroot+"/"+cat
1336 self.dbpkgdir = self.dbcatdir+"/"+pkg
1337 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1338 self.dbdir = self.dbpkgdir
1339 self.settings = mysettings
1340 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1342 self.myroot = self.settings['ROOT']
1343 self._installed_instance = None
1344 self.contentscache = None
1345 self._contents_inodes = None
1346 self._contents_basenames = None
1347 self._linkmap_broken = False
1348 self._md5_merge_map = {}
1349 self._hash_key = (self._eroot, self.mycpv)
1350 self._protect_obj = None
1354 return hash(self._hash_key)
1356 def __eq__(self, other):
1357 return isinstance(other, dblink) and \
1358 self._hash_key == other._hash_key
1360 def _get_protect_obj(self):
1362 if self._protect_obj is None:
1363 self._protect_obj = ConfigProtect(self._eroot,
1364 portage.util.shlex_split(
1365 self.settings.get("CONFIG_PROTECT", "")),
1366 portage.util.shlex_split(
1367 self.settings.get("CONFIG_PROTECT_MASK", "")))
1369 return self._protect_obj
1371 def isprotected(self, obj):
1372 return self._get_protect_obj().isprotected(obj)
1374 def updateprotect(self):
1375 self._get_protect_obj().updateprotect()
1378 self.vartree.dbapi.lock()
1381 self.vartree.dbapi.unlock()
1384 "return path to location of db information (for >>> informational display)"
1388 "does the db entry exist? boolean."
1389 return os.path.exists(self.dbdir)
1393 Remove this entry from the database
1395 if not os.path.exists(self.dbdir):
1398 # Check validity of self.dbdir before attempting to remove it.
1399 if not self.dbdir.startswith(self.dbroot):
1400 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1401 self.dbdir, noiselevel=-1)
1404 shutil.rmtree(self.dbdir)
1405 # If empty, remove parent category directory.
1407 os.rmdir(os.path.dirname(self.dbdir))
1410 self.vartree.dbapi._remove(self)
1412 def clearcontents(self):
1414 For a given db entry (self), erase the CONTENTS values.
1418 if os.path.exists(self.dbdir+"/CONTENTS"):
1419 os.unlink(self.dbdir+"/CONTENTS")
1423 def _clear_contents_cache(self):
1424 self.contentscache = None
1425 self._contents_inodes = None
1426 self._contents_basenames = None
1428 def getcontents(self):
1430 Get the installed files of a given package (aka what that package installed)
1432 contents_file = os.path.join(self.dbdir, "CONTENTS")
1433 if self.contentscache is not None:
1434 return self.contentscache
1437 myc = io.open(_unicode_encode(contents_file,
1438 encoding=_encodings['fs'], errors='strict'),
1439 mode='r', encoding=_encodings['repo.content'],
1441 except EnvironmentError as e:
1442 if e.errno != errno.ENOENT:
1445 self.contentscache = pkgfiles
1447 mylines = myc.readlines()
1450 normalize_needed = self._normalize_needed
1451 contents_re = self._contents_re
1452 obj_index = contents_re.groupindex['obj']
1453 dir_index = contents_re.groupindex['dir']
1454 sym_index = contents_re.groupindex['sym']
1455 # The old symlink format may exist on systems that have packages
1456 # which were installed many years ago (see bug #351814).
1457 oldsym_index = contents_re.groupindex['oldsym']
1458 # CONTENTS files already contain EPREFIX
1459 myroot = self.settings['ROOT']
1460 if myroot == os.path.sep:
1462 # used to generate parent dir entries
1463 dir_entry = (_unicode_decode("dir"),)
1464 eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
1467 for pos, line in enumerate(mylines):
1468 if null_byte in line:
1469 # Null bytes are a common indication of corruption.
1470 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
1472 line = line.rstrip("\n")
1473 m = contents_re.match(line)
1475 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
1478 if m.group(obj_index) is not None:
1480 #format: type, mtime, md5sum
1481 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1482 elif m.group(dir_index) is not None:
1485 data = (m.group(base+1),)
1486 elif m.group(sym_index) is not None:
1488 if m.group(oldsym_index) is None:
1489 mtime = m.group(base+5)
1491 mtime = m.group(base+8)
1492 #format: type, mtime, dest
1493 data = (m.group(base+1), mtime, m.group(base+3))
1495 # This won't happen as long the regular expression
1496 # is written to only match valid entries.
1497 raise AssertionError(_("required group not found " + \
1498 "in CONTENTS entry: '%s'") % line)
1500 path = m.group(base+2)
1501 if normalize_needed.search(path) is not None:
1502 path = normalize_path(path)
1503 if not path.startswith(os.path.sep):
1504 path = os.path.sep + path
1506 if myroot is not None:
1507 path = os.path.join(myroot, path.lstrip(os.path.sep))
1509 # Implicitly add parent directories, since we can't necessarily
1510 # assume that they are explicitly listed in CONTENTS, and it's
1511 # useful for callers if they can rely on parent directory entries
1512 # being generated here (crucial for things like dblink.isowner()).
1513 path_split = path.split(os.sep)
1515 while len(path_split) > eroot_split_len:
1516 parent = os.sep.join(path_split)
1517 if parent in pkgfiles:
1519 pkgfiles[parent] = dir_entry
1522 pkgfiles[path] = data
1525 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
1526 for pos, e in errors:
1527 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
1528 self.contentscache = pkgfiles
1531 def _prune_plib_registry(self, unmerge=False,
1532 needed=None, preserve_paths=None):
1533 # remove preserved libraries that don't have any consumers left
1534 if not (self._linkmap_broken or
1535 self.vartree.dbapi._linkmap is None or
1536 self.vartree.dbapi._plib_registry is None):
1537 self.vartree.dbapi._fs_lock()
1538 plib_registry = self.vartree.dbapi._plib_registry
1539 plib_registry.lock()
1541 plib_registry.load()
1543 unmerge_with_replacement = \
1544 unmerge and preserve_paths is not None
1545 if unmerge_with_replacement:
1546 # If self.mycpv is about to be unmerged and we
1547 # have a replacement package, we want to exclude
1548 # the irrelevant NEEDED data that belongs to
1549 # files which are being unmerged now.
1550 exclude_pkgs = (self.mycpv,)
1554 self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
1555 include_file=needed, preserve_paths=preserve_paths)
1558 unmerge_preserve = None
1559 if not unmerge_with_replacement:
1560 unmerge_preserve = \
1561 self._find_libs_to_preserve(unmerge=True)
1562 counter = self.vartree.dbapi.cpv_counter(self.mycpv)
1563 plib_registry.unregister(self.mycpv,
1564 self.settings["SLOT"], counter)
1565 if unmerge_preserve:
1566 for path in sorted(unmerge_preserve):
1567 contents_key = self._match_contents(path)
1568 if not contents_key:
1570 obj_type = self.getcontents()[contents_key][0]
1571 self._display_merge(_(">>> needed %s %s\n") % \
1572 (obj_type, contents_key), noiselevel=-1)
1573 plib_registry.register(self.mycpv,
1574 self.settings["SLOT"], counter, unmerge_preserve)
1575 # Remove the preserved files from our contents
1576 # so that they won't be unmerged.
1577 self.vartree.dbapi.removeFromContents(self,
1580 unmerge_no_replacement = \
1581 unmerge and not unmerge_with_replacement
1582 cpv_lib_map = self._find_unused_preserved_libs(
1583 unmerge_no_replacement)
1585 self._remove_preserved_libs(cpv_lib_map)
1586 self.vartree.dbapi.lock()
1588 for cpv, removed in cpv_lib_map.items():
1589 if not self.vartree.dbapi.cpv_exists(cpv):
1591 self.vartree.dbapi.removeFromContents(cpv, removed)
1593 self.vartree.dbapi.unlock()
1595 plib_registry.store()
1597 plib_registry.unlock()
1598 self.vartree.dbapi._fs_unlock()
1600 def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
1601 ldpath_mtimes=None, others_in_slot=None, needed=None,
1602 preserve_paths=None):
1605 Unmerges a given package (CPV)
1610 @param pkgfiles: files to unmerge (generally self.getcontents() )
1611 @type pkgfiles: Dictionary
1612 @param trimworld: Unused
1613 @type trimworld: Boolean
1614 @param cleanup: cleanup to pass to doebuild (see doebuild)
1615 @type cleanup: Boolean
1616 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1617 @type ldpath_mtimes: Dictionary
1618 @param others_in_slot: all dblink instances in this slot, excluding self
1619 @type others_in_slot: list
1620 @param needed: Filename containing libraries needed after unmerge.
1621 @type needed: String
1622 @param preserve_paths: Libraries preserved by a package instance that
1623 is currently being merged. They need to be explicitly passed to the
1624 LinkageMap, since they are not registered in the
1625 PreservedLibsRegistry yet.
1626 @type preserve_paths: set
1629 1. os.EX_OK if everything went well.
1630 2. return code of the failed phase (for prerm, postrm, cleanrm)
1633 if trimworld is not None:
1634 warnings.warn("The trimworld parameter of the " + \
1635 "portage.dbapi.vartree.dblink.unmerge()" + \
1636 " method is now unused.",
1637 DeprecationWarning, stacklevel=2)
1640 log_path = self.settings.get("PORTAGE_LOG_FILE")
1641 if self._scheduler is None:
1642 # We create a scheduler instance and use it to
1643 # log unmerge output separately from merge output.
1644 self._scheduler = PollScheduler().sched_iface
1645 if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
1646 if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
1647 self.settings["PORTAGE_BACKGROUND"] = "1"
1648 self.settings.backup_changes("PORTAGE_BACKGROUND")
1650 elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
1651 self.settings["PORTAGE_BACKGROUND"] = "0"
1652 self.settings.backup_changes("PORTAGE_BACKGROUND")
1653 elif self.settings.get("PORTAGE_BACKGROUND") == "1":
1656 self.vartree.dbapi._bump_mtime(self.mycpv)
1657 showMessage = self._display_merge
1658 if self.vartree.dbapi._categories is not None:
1659 self.vartree.dbapi._categories = None
1660 # When others_in_slot is supplied, the security check has already been
1661 # done for this slot, so it shouldn't be repeated until the next
1662 # replacement or unmerge operation.
1663 if others_in_slot is None:
1664 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1665 slot_matches = self.vartree.dbapi.match(
1666 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1668 for cur_cpv in slot_matches:
1669 if cur_cpv == self.mycpv:
1671 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1672 settings=self.settings, vartree=self.vartree,
1673 treetype="vartree", pipe=self._pipe))
1675 retval = self._security_check([self] + others_in_slot)
1679 contents = self.getcontents()
1680 # Now, don't assume that the name of the ebuild is the same as the
1681 # name of the dir; the package may have been moved.
1682 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1684 ebuild_phase = "prerm"
1685 mystuff = os.listdir(self.dbdir)
1687 if x.endswith(".ebuild"):
1688 if x[:-7] != self.pkg:
1689 # Clean up after vardbapi.move_ent() breakage in
1690 # portage versions before 2.1.2
1691 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1692 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1695 if self.mycpv != self.settings.mycpv or \
1696 "EAPI" not in self.settings.configdict["pkg"]:
1697 # We avoid a redundant setcpv call here when
1698 # the caller has already taken care of it.
1699 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1701 eapi_unsupported = False
1703 doebuild_environment(myebuildpath, "prerm",
1704 settings=self.settings, db=self.vartree.dbapi)
1705 except UnsupportedAPIException as e:
1706 eapi_unsupported = e
1708 self._prune_plib_registry(unmerge=True, needed=needed,
1709 preserve_paths=preserve_paths)
1711 builddir_lock = None
1712 scheduler = self._scheduler
1715 # Only create builddir_lock if the caller
1716 # has not already acquired the lock.
1717 if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
1718 builddir_lock = EbuildBuildDir(
1719 scheduler=scheduler,
1720 settings=self.settings)
1721 builddir_lock.lock()
1722 prepare_build_dirs(settings=self.settings, cleanup=True)
1723 log_path = self.settings.get("PORTAGE_LOG_FILE")
1725 # Log the error after PORTAGE_LOG_FILE is initialized
1726 # by prepare_build_dirs above.
1727 if eapi_unsupported:
1728 # Sometimes this happens due to corruption of the EAPI file.
1730 showMessage(_("!!! FAILED prerm: %s\n") % \
1731 os.path.join(self.dbdir, "EAPI"),
1732 level=logging.ERROR, noiselevel=-1)
1733 showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
1734 level=logging.ERROR, noiselevel=-1)
1735 elif os.path.isfile(myebuildpath):
1736 phase = EbuildPhase(background=background,
1737 phase=ebuild_phase, scheduler=scheduler,
1738 settings=self.settings)
1740 retval = phase.wait()
1742 # XXX: Decide how to handle failures here.
1743 if retval != os.EX_OK:
1745 showMessage(_("!!! FAILED prerm: %s\n") % retval,
1746 level=logging.ERROR, noiselevel=-1)
1748 self.vartree.dbapi._fs_lock()
1750 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1752 self.vartree.dbapi._fs_unlock()
1753 self._clear_contents_cache()
1755 if not eapi_unsupported and os.path.isfile(myebuildpath):
1756 ebuild_phase = "postrm"
1757 phase = EbuildPhase(background=background,
1758 phase=ebuild_phase, scheduler=scheduler,
1759 settings=self.settings)
1761 retval = phase.wait()
1763 # XXX: Decide how to handle failures here.
1764 if retval != os.EX_OK:
1766 showMessage(_("!!! FAILED postrm: %s\n") % retval,
1767 level=logging.ERROR, noiselevel=-1)
1770 self.vartree.dbapi._bump_mtime(self.mycpv)
1772 if not eapi_unsupported and os.path.isfile(myebuildpath):
1773 if retval != os.EX_OK:
1775 msg = _("The '%(ebuild_phase)s' "
1776 "phase of the '%(cpv)s' package "
1777 "has failed with exit value %(retval)s.") % \
1778 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
1780 from textwrap import wrap
1781 msg_lines.extend(wrap(msg, 72))
1782 msg_lines.append("")
1784 ebuild_name = os.path.basename(myebuildpath)
1785 ebuild_dir = os.path.dirname(myebuildpath)
1786 msg = _("The problem occurred while executing "
1787 "the ebuild file named '%(ebuild_name)s' "
1788 "located in the '%(ebuild_dir)s' directory. "
1789 "If necessary, manually remove "
1790 "the environment.bz2 file and/or the "
1791 "ebuild file located in that directory.") % \
1792 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
1793 msg_lines.extend(wrap(msg, 72))
1794 msg_lines.append("")
1797 "of the environment.bz2 file is "
1798 "preferred since it may allow the "
1799 "removal phases to execute successfully. "
1800 "The ebuild will be "
1801 "sourced and the eclasses "
1802 "from the current portage tree will be used "
1803 "when necessary. Removal of "
1804 "the ebuild file will cause the "
1805 "pkg_prerm() and pkg_postrm() removal "
1806 "phases to be skipped entirely.")
1807 msg_lines.extend(wrap(msg, 72))
1809 self._eerror(ebuild_phase, msg_lines)
1811 self._elog_process(phasefilter=("prerm", "postrm"))
1813 if retval == os.EX_OK:
1815 doebuild_environment(myebuildpath, "cleanrm",
1816 settings=self.settings, db=self.vartree.dbapi)
1817 except UnsupportedAPIException:
1819 phase = EbuildPhase(background=background,
1820 phase="cleanrm", scheduler=scheduler,
1821 settings=self.settings)
1823 retval = phase.wait()
1825 if builddir_lock is not None:
1826 builddir_lock.unlock()
1828 if log_path is not None:
1830 if not failures and 'unmerge-logs' not in self.settings.features:
1837 st = os.stat(log_path)
1847 if log_path is not None and os.path.exists(log_path):
1848 # Restore this since it gets lost somewhere above and it
1849 # needs to be set for _display_merge() to be able to log.
1850 # Note that the log isn't necessarily supposed to exist
1851 # since if PORT_LOGDIR is unset then it's a temp file
1852 # so it gets cleaned above.
1853 self.settings["PORTAGE_LOG_FILE"] = log_path
1855 self.settings.pop("PORTAGE_LOG_FILE", None)
1857 env_update(target_root=self.settings['ROOT'],
1858 prev_mtimes=ldpath_mtimes,
1859 contents=contents, env=self.settings,
1860 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
1864 def _display_merge(self, msg, level=0, noiselevel=0):
1865 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1867 if self._scheduler is None:
1868 writemsg_level(msg, level=level, noiselevel=noiselevel)
1871 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
1872 log_path = self.settings.get("PORTAGE_LOG_FILE")
1873 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
1875 if background and log_path is None:
1876 if level >= logging.WARN:
1877 writemsg_level(msg, level=level, noiselevel=noiselevel)
1879 self._scheduler.output(msg,
1880 log_path=log_path, background=background,
1881 level=level, noiselevel=noiselevel)
1883 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1886 Unmerges the contents of a package from the liveFS
1887 Removes the VDB entry for self
1889 @param pkgfiles: typically self.getcontents()
1890 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1891 @param others_in_slot: all dblink instances in this slot, excluding self
1892 @type others_in_slot: list
1897 perf_md5 = perform_md5
1898 showMessage = self._display_merge
1901 showMessage(_("No package files given... Grabbing a set.\n"))
1902 pkgfiles = self.getcontents()
1904 if others_in_slot is None:
1906 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1907 slot_matches = self.vartree.dbapi.match(
1908 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1909 for cur_cpv in slot_matches:
1910 if cur_cpv == self.mycpv:
1912 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1913 settings=self.settings,
1914 vartree=self.vartree, treetype="vartree", pipe=self._pipe))
1916 dest_root = self._eroot
1917 dest_root_len = len(dest_root) - 1
1919 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
1921 protected_symlinks = {}
1923 unmerge_orphans = "unmerge-orphans" in self.settings.features
1924 calc_prelink = "prelink-checksums" in self.settings.features
1927 self.updateprotect()
1928 mykeys = list(pkgfiles)
1932 #process symlinks second-to-last, directories last.
1934 ignored_unlink_errnos = (
1935 errno.EBUSY, errno.ENOENT,
1936 errno.ENOTDIR, errno.EISDIR)
1937 ignored_rmdir_errnos = (
1938 errno.EEXIST, errno.ENOTEMPTY,
1939 errno.EBUSY, errno.ENOENT,
1940 errno.ENOTDIR, errno.EISDIR,
1942 modprotect = os.path.join(self._eroot, "lib/modules/")
1944 def unlink(file_name, lstatobj):
1946 if lstatobj.st_flags != 0:
1947 bsd_chflags.lchflags(file_name, 0)
1948 parent_name = os.path.dirname(file_name)
1949 # Use normal stat/chflags for the parent since we want to
1950 # follow any symlinks to the real parent directory.
1951 pflags = os.stat(parent_name).st_flags
1953 bsd_chflags.chflags(parent_name, 0)
1955 if not stat.S_ISLNK(lstatobj.st_mode):
1956 # Remove permissions to ensure that any hardlinks to
1957 # suid/sgid files are rendered harmless.
1958 os.chmod(file_name, 0)
1959 os.unlink(file_name)
1960 except OSError as ose:
1961 # If the chmod or unlink fails, you are in trouble.
1962 # With Prefix this can be because the file is owned
1963 # by someone else (a screwup by root?), on a normal
1964 # system maybe filesystem corruption. In any case,
1965 # if we backtrace and die here, we leave the system
1966 # in a totally undefined state, hence we just bleed
1967 # like hell and continue to hopefully finish all our
1968 # administrative and pkg_postinst stuff.
1969 self._eerror("postrm",
1970 ["Could not chmod or unlink '%s': %s" % \
1973 if bsd_chflags and pflags != 0:
1974 # Restore the parent flags we saved before unlinking
1975 bsd_chflags.chflags(parent_name, pflags)
1977 def show_unmerge(zing, desc, file_type, file_name):
1978 showMessage("%s %s %s %s\n" % \
1979 (zing, desc.ljust(8), file_type, file_name))
1982 unmerge_desc["cfgpro"] = _("cfgpro")
1983 unmerge_desc["replaced"] = _("replaced")
1984 unmerge_desc["!dir"] = _("!dir")
1985 unmerge_desc["!empty"] = _("!empty")
1986 unmerge_desc["!fif"] = _("!fif")
1987 unmerge_desc["!found"] = _("!found")
1988 unmerge_desc["!md5"] = _("!md5")
1989 unmerge_desc["!mtime"] = _("!mtime")
1990 unmerge_desc["!obj"] = _("!obj")
1991 unmerge_desc["!sym"] = _("!sym")
1992 unmerge_desc["!prefix"] = _("!prefix")
1994 real_root = self.settings['ROOT']
1995 real_root_len = len(real_root) - 1
1996 eroot = self.settings["EROOT"]
1998 # These files are generated by emerge, so we need to remove
1999 # them when they are the only thing left in a directory.
2000 infodir_cleanup = frozenset(["dir", "dir.old"])
2001 infodirs = frozenset(infodir for infodir in chain(
2002 self.settings.get("INFOPATH", "").split(":"),
2003 self.settings.get("INFODIR", "").split(":")) if infodir)
2004 infodirs_inodes = set()
2005 for infodir in infodirs:
2006 infodir = os.path.join(real_root, infodir.lstrip(os.sep))
2008 statobj = os.stat(infodir)
2012 infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
2014 for i, objkey in enumerate(mykeys):
2016 obj = normalize_path(objkey)
2019 _unicode_encode(obj,
2020 encoding=_encodings['merge'], errors='strict')
2021 except UnicodeEncodeError:
2022 # The package appears to have been merged with a
2023 # different value of sys.getfilesystemencoding(),
2024 # so fall back to utf_8 if appropriate.
2026 _unicode_encode(obj,
2027 encoding=_encodings['fs'], errors='strict')
2028 except UnicodeEncodeError:
2032 perf_md5 = portage.checksum.perform_md5
2034 file_data = pkgfiles[objkey]
2035 file_type = file_data[0]
2037 # don't try to unmerge the prefix offset itself
2038 if len(obj) <= len(eroot) or not obj.startswith(eroot):
2039 show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
2044 statobj = os.stat(obj)
2049 lstatobj = os.lstat(obj)
2050 except (OSError, AttributeError):
2052 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2053 if lstatobj is None:
2054 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
2056 # don't use EROOT, CONTENTS entries already contain EPREFIX
2057 if obj.startswith(real_root):
2058 relative_path = obj[real_root_len:]
2060 for dblnk in others_in_slot:
2061 if dblnk.isowner(relative_path):
2065 if file_type == "sym" and is_owned and \
2066 (islink and statobj and stat.S_ISDIR(statobj.st_mode)):
2067 # A new instance of this package claims the file, so
2068 # don't unmerge it. If the file is symlink to a
2069 # directory and the unmerging package installed it as
2070 # a symlink, but the new owner has it listed as a
2071 # directory, then we'll produce a warning since the
2072 # symlink is a sort of orphan in this case (see
2074 symlink_orphan = False
2075 for dblnk in others_in_slot:
2076 parent_contents_key = \
2077 dblnk._match_contents(relative_path)
2078 if not parent_contents_key:
2080 if not parent_contents_key.startswith(
2083 if dblnk.getcontents()[
2084 parent_contents_key][0] == "dir":
2085 symlink_orphan = True
2089 protected_symlinks.setdefault(
2090 (statobj.st_dev, statobj.st_ino),
2091 []).append(relative_path)
2094 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
2096 elif relative_path in cfgfiledict:
2097 stale_confmem.append(relative_path)
2098 # next line includes a tweak to protect modules from being unmerged,
2099 # but we don't protect modules from being overwritten if they are
2100 # upgraded. We effectively only want one half of the config protection
2101 # functionality for /lib/modules. For portage-ng both capabilities
2102 # should be able to be independently specified.
2103 # TODO: For rebuilds, re-parent previous modules to the new
2104 # installed instance (so they are not orphans). For normal
2105 # uninstall (not rebuild/reinstall), remove the modules along
2106 # with all other files (leave no orphans).
2107 if obj.startswith(modprotect):
2108 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
2111 # Don't unlink symlinks to directories here since that can
2112 # remove /lib and /usr/lib symlinks.
2113 if unmerge_orphans and \
2114 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2115 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2116 not self.isprotected(obj):
2118 unlink(obj, lstatobj)
2119 except EnvironmentError as e:
2120 if e.errno not in ignored_unlink_errnos:
2123 show_unmerge("<<<", "", file_type, obj)
2126 lmtime = str(lstatobj[stat.ST_MTIME])
2127 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2128 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
2131 if pkgfiles[objkey][0] == "dir":
2132 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
2133 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
2135 mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
2136 elif pkgfiles[objkey][0] == "sym":
2138 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
2141 # If this symlink points to a directory then we don't want
2142 # to unmerge it if there are any other packages that
2143 # installed files into the directory via this symlink
2144 # (see bug #326685).
2145 # TODO: Resolving a symlink to a directory will require
2146 # simulation if $ROOT != / and the link is not relative.
2147 if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
2148 and obj.startswith(real_root):
2150 relative_path = obj[real_root_len:]
2152 target_dir_contents = os.listdir(obj)
2156 if target_dir_contents:
2157 # If all the children are regular files owned
2158 # by this package, then the symlink should be
2161 for child in target_dir_contents:
2162 child = os.path.join(relative_path, child)
2163 if not self.isowner(child):
2167 child_lstat = os.lstat(os.path.join(
2168 real_root, child.lstrip(os.sep)))
2172 if not stat.S_ISREG(child_lstat.st_mode):
2173 # Nested symlinks or directories make
2174 # the issue very complex, so just
2175 # preserve the symlink in order to be
2181 protected_symlinks.setdefault(
2182 (statobj.st_dev, statobj.st_ino),
2183 []).append(relative_path)
2184 show_unmerge("---", unmerge_desc["!empty"],
2188 # Go ahead and unlink symlinks to directories here when
2189 # they're actually recorded as symlinks in the contents.
2190 # Normally, symlinks such as /lib -> lib64 are not recorded
2191 # as symlinks in the contents of a package. If a package
2192 # installs something into ${D}/lib/, it is recorded in the
2193 # contents as a directory even if it happens to correspond
2194 # to a symlink when it's merged to the live filesystem.
2196 unlink(obj, lstatobj)
2197 show_unmerge("<<<", "", file_type, obj)
2198 except (OSError, IOError) as e:
2199 if e.errno not in ignored_unlink_errnos:
2202 show_unmerge("!!!", "", file_type, obj)
2203 elif pkgfiles[objkey][0] == "obj":
2204 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2205 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2209 mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
2210 except FileNotFound as e:
2211 # the file has disappeared between now and our stat call
2212 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2215 # string.lower is needed because db entries used to be in upper-case. The
2216 # string.lower allows for backwards compatibility.
2217 if mymd5 != pkgfiles[objkey][2].lower():
2218 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
2221 unlink(obj, lstatobj)
2222 except (OSError, IOError) as e:
2223 if e.errno not in ignored_unlink_errnos:
2226 show_unmerge("<<<", "", file_type, obj)
2227 elif pkgfiles[objkey][0] == "fif":
2228 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2229 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
2231 show_unmerge("---", "", file_type, obj)
2232 elif pkgfiles[objkey][0] == "dev":
2233 show_unmerge("---", "", file_type, obj)
2235 mydirs = sorted(mydirs)
2238 for obj, inode_key in mydirs:
2239 # Treat any directory named "info" as a candidate here,
2240 # since it might have been in INFOPATH previously even
2241 # though it may not be there now.
2242 if inode_key in infodirs_inodes or \
2243 os.path.basename(obj) == "info":
2245 remaining = os.listdir(obj)
2249 cleanup_info_dir = ()
2251 len(remaining) <= len(infodir_cleanup):
2252 if not set(remaining).difference(infodir_cleanup):
2253 cleanup_info_dir = remaining
2255 for child in cleanup_info_dir:
2256 child = os.path.join(obj, child)
2258 lstatobj = os.lstat(child)
2259 if stat.S_ISREG(lstatobj.st_mode):
2260 unlink(child, lstatobj)
2261 show_unmerge("<<<", "", "obj", child)
2262 except EnvironmentError as e:
2263 if e.errno not in ignored_unlink_errnos:
2266 show_unmerge("!!!", "", "obj", child)
2269 lstatobj = os.lstat(obj)
2270 if lstatobj.st_flags != 0:
2271 bsd_chflags.lchflags(obj, 0)
2272 parent_name = os.path.dirname(obj)
2273 # Use normal stat/chflags for the parent since we want to
2274 # follow any symlinks to the real parent directory.
2275 pflags = os.stat(parent_name).st_flags
2277 bsd_chflags.chflags(parent_name, 0)
2281 if bsd_chflags and pflags != 0:
2282 # Restore the parent flags we saved before unlinking
2283 bsd_chflags.chflags(parent_name, pflags)
2284 show_unmerge("<<<", "", "dir", obj)
2285 except EnvironmentError as e:
2286 if e.errno not in ignored_rmdir_errnos:
2288 if e.errno != errno.ENOENT:
2289 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
2292 # When a directory is successfully removed, there's
2293 # no need to protect symlinks that point to it.
2294 unmerge_syms = protected_symlinks.pop(inode_key, None)
2295 if unmerge_syms is not None:
2296 for relative_path in unmerge_syms:
2297 obj = os.path.join(real_root,
2298 relative_path.lstrip(os.sep))
2300 unlink(obj, os.lstat(obj))
2301 show_unmerge("<<<", "", "sym", obj)
2302 except (OSError, IOError) as e:
2303 if e.errno not in ignored_unlink_errnos:
2306 show_unmerge("!!!", "", "sym", obj)
2308 if protected_symlinks:
2309 msg = "One or more symlinks to directories have been " + \
2310 "preserved in order to ensure that files installed " + \
2311 "via these symlinks remain accessible:"
2312 lines = textwrap.wrap(msg, 72)
2315 flat_list.update(*protected_symlinks.values())
2316 flat_list = sorted(flat_list)
2318 lines.append("\t%s" % (os.path.join(real_root,
2321 self._elog("eerror", "postrm", lines)
2323 # Remove stale entries from config memory.
2325 for filename in stale_confmem:
2326 del cfgfiledict[filename]
2327 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
2329 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2330 self.vartree.zap(self.mycpv)
2332 def isowner(self, filename, destroot=None):
2334 Check if a file belongs to this package. This may
2335 result in a stat call for the parent directory of
2336 every installed file, since the inode numbers are
2337 used to work around the problem of ambiguous paths
2338 caused by symlinked directories. The results of
2339 stat calls are cached to optimize multiple calls
2348 1. True if this package owns the file.
2349 2. False if this package does not own the file.
2352 if destroot is not None and destroot != self._eroot:
2353 warnings.warn("The second parameter of the " + \
2354 "portage.dbapi.vartree.dblink.isowner()" + \
2355 " is now unused. Instead " + \
2356 "self.settings['EROOT'] will be used.",
2357 DeprecationWarning, stacklevel=2)
2359 return bool(self._match_contents(filename))
2361 def _match_contents(self, filename, destroot=None):
2363 The matching contents entry is returned, which is useful
2364 since the path may differ from the one given by the caller,
2368 @return: the contents entry corresponding to the given path, or False
2369 if the file is not owned by this package.
2372 filename = _unicode_decode(filename,
2373 encoding=_encodings['content'], errors='strict')
2375 if destroot is not None and destroot != self._eroot:
2376 warnings.warn("The second parameter of the " + \
2377 "portage.dbapi.vartree.dblink._match_contents()" + \
2378 " is now unused. Instead " + \
2379 "self.settings['ROOT'] will be used.",
2380 DeprecationWarning, stacklevel=2)
2382 # don't use EROOT here, image already contains EPREFIX
2383 destroot = self.settings['ROOT']
2385 # The given filename argument might have a different encoding than the
2386 # the filenames contained in the contents, so use separate wrapped os
2387 # modules for each. The basename is more likely to contain non-ascii
2388 # characters than the directory path, so use os_filename_arg for all
2389 # operations involving the basename of the filename arg.
2390 os_filename_arg = _os_merge
2394 _unicode_encode(filename,
2395 encoding=_encodings['merge'], errors='strict')
2396 except UnicodeEncodeError:
2397 # The package appears to have been merged with a
2398 # different value of sys.getfilesystemencoding(),
2399 # so fall back to utf_8 if appropriate.
2401 _unicode_encode(filename,
2402 encoding=_encodings['fs'], errors='strict')
2403 except UnicodeEncodeError:
2406 os_filename_arg = portage.os
2408 destfile = normalize_path(
2409 os_filename_arg.path.join(destroot,
2410 filename.lstrip(os_filename_arg.path.sep)))
2412 pkgfiles = self.getcontents()
2413 if pkgfiles and destfile in pkgfiles:
2416 basename = os_filename_arg.path.basename(destfile)
2417 if self._contents_basenames is None:
2422 encoding=_encodings['merge'],
2424 except UnicodeEncodeError:
2425 # The package appears to have been merged with a
2426 # different value of sys.getfilesystemencoding(),
2427 # so fall back to utf_8 if appropriate.
2431 encoding=_encodings['fs'],
2433 except UnicodeEncodeError:
2438 self._contents_basenames = set(
2439 os.path.basename(x) for x in pkgfiles)
2440 if basename not in self._contents_basenames:
2441 # This is a shortcut that, in most cases, allows us to
2442 # eliminate this package as an owner without the need
2443 # to examine inode numbers of parent directories.
2446 # Use stat rather than lstat since we want to follow
2447 # any symlinks to the real parent directory.
2448 parent_path = os_filename_arg.path.dirname(destfile)
2450 parent_stat = os_filename_arg.stat(parent_path)
2451 except EnvironmentError as e:
2452 if e.errno != errno.ENOENT:
2456 if self._contents_inodes is None:
2462 encoding=_encodings['merge'],
2464 except UnicodeEncodeError:
2465 # The package appears to have been merged with a
2466 # different value of sys.getfilesystemencoding(),
2467 # so fall back to utf_8 if appropriate.
2471 encoding=_encodings['fs'],
2473 except UnicodeEncodeError:
2478 self._contents_inodes = {}
2479 parent_paths = set()
2481 p_path = os.path.dirname(x)
2482 if p_path in parent_paths:
2484 parent_paths.add(p_path)
2490 inode_key = (s.st_dev, s.st_ino)
2491 # Use lists of paths in case multiple
2492 # paths reference the same inode.
2493 p_path_list = self._contents_inodes.get(inode_key)
2494 if p_path_list is None:
2496 self._contents_inodes[inode_key] = p_path_list
2497 if p_path not in p_path_list:
2498 p_path_list.append(p_path)
2500 p_path_list = self._contents_inodes.get(
2501 (parent_stat.st_dev, parent_stat.st_ino))
2503 for p_path in p_path_list:
2504 x = os_filename_arg.path.join(p_path, basename)
2510 def _linkmap_rebuild(self, **kwargs):
2512 Rebuild the self._linkmap if it's not broken due to missing
2513 scanelf binary. Also, return early if preserve-libs is disabled
2514 and the preserve-libs registry is empty.
2516 if self._linkmap_broken or \
2517 self.vartree.dbapi._linkmap is None or \
2518 self.vartree.dbapi._plib_registry is None or \
2519 ("preserve-libs" not in self.settings.features and \
2520 not self.vartree.dbapi._plib_registry.hasEntries()):
2523 self.vartree.dbapi._linkmap.rebuild(**kwargs)
2524 except CommandNotFound as e:
2525 self._linkmap_broken = True
2526 self._display_merge(_("!!! Disabling preserve-libs " \
2527 "due to error: Command Not Found: %s\n") % (e,),
2528 level=logging.ERROR, noiselevel=-1)
2530 def _find_libs_to_preserve(self, unmerge=False):
2532 Get set of relative paths for libraries to be preserved. When
2533 unmerge is False, file paths to preserve are selected from
2534 self._installed_instance. Otherwise, paths are selected from
2537 if self._linkmap_broken or \
2538 self.vartree.dbapi._linkmap is None or \
2539 self.vartree.dbapi._plib_registry is None or \
2540 (not unmerge and self._installed_instance is None) or \
2541 "preserve-libs" not in self.settings.features:
2545 linkmap = self.vartree.dbapi._linkmap
2547 installed_instance = self
2549 installed_instance = self._installed_instance
2550 old_contents = installed_instance.getcontents()
2551 root = self.settings['ROOT']
2552 root_len = len(root) - 1
2553 lib_graph = digraph()
2556 def path_to_node(path):
2557 node = path_node_map.get(path)
2559 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2560 alt_path_node = lib_graph.get(node)
2561 if alt_path_node is not None:
2562 node = alt_path_node
2563 node.alt_paths.add(path)
2564 path_node_map[path] = node
2568 provider_nodes = set()
2569 # Create provider nodes and add them to the graph.
2570 for f_abs in old_contents:
2574 _unicode_encode(f_abs,
2575 encoding=_encodings['merge'], errors='strict')
2576 except UnicodeEncodeError:
2577 # The package appears to have been merged with a
2578 # different value of sys.getfilesystemencoding(),
2579 # so fall back to utf_8 if appropriate.
2581 _unicode_encode(f_abs,
2582 encoding=_encodings['fs'], errors='strict')
2583 except UnicodeEncodeError:
2588 f = f_abs[root_len:]
2589 if not unmerge and self.isowner(f):
2590 # We have an indentically named replacement file,
2591 # so we don't try to preserve the old copy.
2594 consumers = linkmap.findConsumers(f,
2595 exclude_providers=(installed_instance.isowner,))
2600 provider_node = path_to_node(f)
2601 lib_graph.add(provider_node, None)
2602 provider_nodes.add(provider_node)
2603 consumer_map[provider_node] = consumers
2605 # Create consumer nodes and add them to the graph.
2606 # Note that consumers can also be providers.
2607 for provider_node, consumers in consumer_map.items():
2609 consumer_node = path_to_node(c)
2610 if installed_instance.isowner(c) and \
2611 consumer_node not in provider_nodes:
2612 # This is not a provider, so it will be uninstalled.
2614 lib_graph.add(provider_node, consumer_node)
2616 # Locate nodes which should be preserved. They consist of all
2617 # providers that are reachable from consumers that are not
2618 # providers themselves.
2619 preserve_nodes = set()
2620 for consumer_node in lib_graph.root_nodes():
2621 if consumer_node in provider_nodes:
2623 # Preserve all providers that are reachable from this consumer.
2624 node_stack = lib_graph.child_nodes(consumer_node)
2626 provider_node = node_stack.pop()
2627 if provider_node in preserve_nodes:
2629 preserve_nodes.add(provider_node)
2630 node_stack.extend(lib_graph.child_nodes(provider_node))
2632 preserve_paths = set()
2633 for preserve_node in preserve_nodes:
2634 # Preserve the library itself, and also preserve the
2635 # soname symlink which is the only symlink that is
2636 # strictly required.
2638 soname_symlinks = set()
2639 soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
2640 for f in preserve_node.alt_paths:
2641 f_abs = os.path.join(root, f.lstrip(os.sep))
2643 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2645 elif os.path.basename(f) == soname:
2646 soname_symlinks.add(f)
2651 preserve_paths.update(hardlinks)
2652 preserve_paths.update(soname_symlinks)
2654 return preserve_paths
2656 def _add_preserve_libs_to_contents(self, preserve_paths):
2658 Preserve libs returned from _find_libs_to_preserve().
2661 if not preserve_paths:
2665 showMessage = self._display_merge
2666 root = self.settings['ROOT']
2668 # Copy contents entries from the old package to the new one.
2669 new_contents = self.getcontents().copy()
2670 old_contents = self._installed_instance.getcontents()
2671 for f in sorted(preserve_paths):
2672 f = _unicode_decode(f,
2673 encoding=_encodings['content'], errors='strict')
2674 f_abs = os.path.join(root, f.lstrip(os.sep))
2675 contents_entry = old_contents.get(f_abs)
2676 if contents_entry is None:
2677 # This will probably never happen, but it might if one of the
2678 # paths returned from findConsumers() refers to one of the libs
2679 # that should be preserved yet the path is not listed in the
2680 # contents. Such a path might belong to some other package, so
2681 # it shouldn't be preserved here.
2682 showMessage(_("!!! File '%s' will not be preserved "
2683 "due to missing contents entry\n") % (f_abs,),
2684 level=logging.ERROR, noiselevel=-1)
2685 preserve_paths.remove(f)
2687 new_contents[f_abs] = contents_entry
2688 obj_type = contents_entry[0]
2689 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2691 # Add parent directories to contents if necessary.
2692 parent_dir = os.path.dirname(f_abs)
2693 while len(parent_dir) > len(root):
2694 new_contents[parent_dir] = ["dir"]
2696 parent_dir = os.path.dirname(parent_dir)
2697 if prev == parent_dir:
2699 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
2700 write_contents(new_contents, root, outfile)
2702 self._clear_contents_cache()
2704 def _find_unused_preserved_libs(self, unmerge_no_replacement):
2706 Find preserved libraries that don't have any consumers left.
2709 if self._linkmap_broken or \
2710 self.vartree.dbapi._linkmap is None or \
2711 self.vartree.dbapi._plib_registry is None or \
2712 not self.vartree.dbapi._plib_registry.hasEntries():
2715 # Since preserved libraries can be consumers of other preserved
2716 # libraries, use a graph to track consumer relationships.
2717 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2718 linkmap = self.vartree.dbapi._linkmap
2719 lib_graph = digraph()
2720 preserved_nodes = set()
2721 preserved_paths = set()
2724 root = self.settings['ROOT']
2726 def path_to_node(path):
2727 node = path_node_map.get(path)
2729 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2730 alt_path_node = lib_graph.get(node)
2731 if alt_path_node is not None:
2732 node = alt_path_node
2733 node.alt_paths.add(path)
2734 path_node_map[path] = node
2737 for cpv, plibs in plib_dict.items():
2739 path_cpv_map[f] = cpv
2740 preserved_node = path_to_node(f)
2741 if not preserved_node.file_exists():
2743 lib_graph.add(preserved_node, None)
2744 preserved_paths.add(f)
2745 preserved_nodes.add(preserved_node)
2746 for c in self.vartree.dbapi._linkmap.findConsumers(f):
2747 consumer_node = path_to_node(c)
2748 if not consumer_node.file_exists():
2750 # Note that consumers may also be providers.
2751 lib_graph.add(preserved_node, consumer_node)
2753 # Eliminate consumers having providers with the same soname as an
2754 # installed library that is not preserved. This eliminates
2755 # libraries that are erroneously preserved due to a move from one
2756 # directory to another.
2757 # Also eliminate consumers that are going to be unmerged if
2758 # unmerge_no_replacement is True.
2760 for preserved_node in preserved_nodes:
2761 soname = linkmap.getSoname(preserved_node)
2762 for consumer_node in lib_graph.parent_nodes(preserved_node):
2763 if consumer_node in preserved_nodes:
2765 if unmerge_no_replacement:
2766 will_be_unmerged = True
2767 for path in consumer_node.alt_paths:
2768 if not self.isowner(path):
2769 will_be_unmerged = False
2771 if will_be_unmerged:
2772 # This consumer is not preserved and it is
2773 # being unmerged, so drop this edge.
2774 lib_graph.remove_edge(preserved_node, consumer_node)
2777 providers = provider_cache.get(consumer_node)
2778 if providers is None:
2779 providers = linkmap.findProviders(consumer_node)
2780 provider_cache[consumer_node] = providers
2781 providers = providers.get(soname)
2782 if providers is None:
2784 for provider in providers:
2785 if provider in preserved_paths:
2787 provider_node = path_to_node(provider)
2788 if not provider_node.file_exists():
2790 if provider_node in preserved_nodes:
2792 # An alternative provider seems to be
2793 # installed, so drop this edge.
2794 lib_graph.remove_edge(preserved_node, consumer_node)
2799 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
2802 lib_graph.difference_update(root_nodes)
2804 for node in root_nodes:
2805 unlink_list.update(node.alt_paths)
2806 unlink_list = sorted(unlink_list)
2807 for obj in unlink_list:
2808 cpv = path_cpv_map.get(obj)
2810 # This means that a symlink is in the preserved libs
2811 # registry, but the actual lib it points to is not.
2812 self._display_merge(_("!!! symlink to lib is preserved, "
2813 "but not the lib itself:\n!!! '%s'\n") % (obj,),
2814 level=logging.ERROR, noiselevel=-1)
2816 removed = cpv_lib_map.get(cpv)
2819 cpv_lib_map[cpv] = removed
2824 def _remove_preserved_libs(self, cpv_lib_map):
2826 Remove files returned from _find_unused_preserved_libs().
2831 files_to_remove = set()
2832 for files in cpv_lib_map.values():
2833 files_to_remove.update(files)
2834 files_to_remove = sorted(files_to_remove)
2835 showMessage = self._display_merge
2836 root = self.settings['ROOT']
2839 for obj in files_to_remove:
2840 obj = os.path.join(root, obj.lstrip(os.sep))
2841 parent_dirs.add(os.path.dirname(obj))
2842 if os.path.islink(obj):
2848 except OSError as e:
2849 if e.errno != errno.ENOENT:
2853 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
2856 # Remove empty parent directories if possible.
2858 x = parent_dirs.pop()
2865 x = os.path.dirname(x)
2869 self.vartree.dbapi._plib_registry.pruneNonExisting()
2871 def _collision_protect(self, srcroot, destroot, mypkglist,
2872 file_list, symlink_list):
2876 collision_ignore = set([normalize_path(myignore) for myignore in \
2877 portage.util.shlex_split(
2878 self.settings.get("COLLISION_IGNORE", ""))])
2880 # For collisions with preserved libraries, the current package
2881 # will assume ownership and the libraries will be unregistered.
2882 if self.vartree.dbapi._plib_registry is None:
2883 # preserve-libs is entirely disabled
2888 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2891 for cpv, paths in plib_dict.items():
2892 plib_paths.update(paths)
2894 plib_cpv_map[f] = cpv
2895 plib_inodes = self._lstat_inode_map(plib_paths)
2897 plib_collisions = {}
2899 showMessage = self._display_merge
2902 symlink_collisions = []
2903 destroot = self.settings['ROOT']
2904 showMessage(_(" %s checking %d files for package collisions\n") % \
2905 (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
2906 for i, (f, f_type) in enumerate(chain(
2907 ((f, "reg") for f in file_list),
2908 ((f, "sym") for f in symlink_list))):
2909 if i % 1000 == 0 and i != 0:
2910 showMessage(_("%d files checked ...\n") % i)
2912 dest_path = normalize_path(
2913 os.path.join(destroot, f.lstrip(os.path.sep)))
2915 dest_lstat = os.lstat(dest_path)
2916 except EnvironmentError as e:
2917 if e.errno == errno.ENOENT:
2920 elif e.errno == errno.ENOTDIR:
2922 # A non-directory is in a location where this package
2923 # expects to have a directory.
2925 parent_path = dest_path
2926 while len(parent_path) > len(destroot):
2927 parent_path = os.path.dirname(parent_path)
2929 dest_lstat = os.lstat(parent_path)
2931 except EnvironmentError as e:
2932 if e.errno != errno.ENOTDIR:
2936 raise AssertionError(
2937 "unable to find non-directory " + \
2938 "parent for '%s'" % dest_path)
2939 dest_path = parent_path
2940 f = os.path.sep + dest_path[len(destroot):]
2948 if stat.S_ISDIR(dest_lstat.st_mode):
2950 # This case is explicitly banned
2951 # by PMS (see bug #326685).
2952 symlink_collisions.append(f)
2953 collisions.append(f)
2956 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
2959 cpv = plib_cpv_map[path]
2960 paths = plib_collisions.get(cpv)
2963 plib_collisions[cpv] = paths
2965 # The current package will assume ownership and the
2966 # libraries will be unregistered, so exclude this
2967 # path from the normal collisions.
2971 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
2972 for ver in mypkglist:
2976 if not isowned and self.isprotected(full_path):
2980 if collision_ignore:
2981 if f in collision_ignore:
2984 for myignore in collision_ignore:
2985 if f.startswith(myignore + os.path.sep):
2989 collisions.append(f)
2990 return collisions, symlink_collisions, plib_collisions
2992 def _lstat_inode_map(self, path_iter):
2994 Use lstat to create a map of the form:
2995 {(st_dev, st_ino) : set([path1, path2, ...])}
2996 Multiple paths may reference the same inode due to hardlinks.
2997 All lstat() calls are relative to self.myroot.
3002 root = self.settings['ROOT']
3005 path = os.path.join(root, f.lstrip(os.sep))
3008 except OSError as e:
3009 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3013 key = (st.st_dev, st.st_ino)
3014 paths = inode_map.get(key)
3017 inode_map[key] = paths
3021 def _security_check(self, installed_instances):
3022 if not installed_instances:
3027 showMessage = self._display_merge
3030 for dblnk in installed_instances:
3031 file_paths.update(dblnk.getcontents())
3034 for i, path in enumerate(file_paths):
3038 _unicode_encode(path,
3039 encoding=_encodings['merge'], errors='strict')
3040 except UnicodeEncodeError:
3041 # The package appears to have been merged with a
3042 # different value of sys.getfilesystemencoding(),
3043 # so fall back to utf_8 if appropriate.
3045 _unicode_encode(path,
3046 encoding=_encodings['fs'], errors='strict')
3047 except UnicodeEncodeError:
3054 except OSError as e:
3055 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3059 if not stat.S_ISREG(s.st_mode):
3061 path = os.path.realpath(path)
3062 if path in real_paths:
3064 real_paths.add(path)
3065 if s.st_nlink > 1 and \
3066 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
3067 k = (s.st_dev, s.st_ino)
3068 inode_map.setdefault(k, []).append((path, s))
3069 suspicious_hardlinks = []
3070 for path_list in inode_map.values():
3071 path, s = path_list[0]
3072 if len(path_list) == s.st_nlink:
3073 # All hardlinks seem to be owned by this package.
3075 suspicious_hardlinks.append(path_list)
3076 if not suspicious_hardlinks:
3080 msg.append(_("suid/sgid file(s) "
3081 "with suspicious hardlink(s):"))
3083 for path_list in suspicious_hardlinks:
3084 for path, s in path_list:
3085 msg.append("\t%s" % path)
3087 msg.append(_("See the Gentoo Security Handbook "
3088 "guide for advice on how to proceed."))
3090 self._eerror("preinst", msg)
3094 def _eqawarn(self, phase, lines):
3095 self._elog("eqawarn", phase, lines)
3097 def _eerror(self, phase, lines):
3098 self._elog("eerror", phase, lines)
3100 def _elog(self, funcname, phase, lines):
3101 func = getattr(portage.elog.messages, funcname)
3102 if self._scheduler is None:
3104 func(l, phase=phase, key=self.mycpv)
3106 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
3108 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
3109 log_path = self.settings.get("PORTAGE_LOG_FILE")
3112 func(line, phase=phase, key=self.mycpv, out=out)
3113 msg = out.getvalue()
3114 self._scheduler.output(msg,
3115 background=background, log_path=log_path)
3117 def _elog_process(self, phasefilter=None):
3119 if self._pipe is None:
3120 elog_process(cpv, self.settings, phasefilter=phasefilter)
3122 logdir = os.path.join(self.settings["T"], "logging")
3123 ebuild_logentries = collect_ebuild_messages(logdir)
3124 py_logentries = collect_messages(key=cpv).get(cpv, {})
3125 logentries = _merge_logentries(py_logentries, ebuild_logentries)
3134 for phase, messages in logentries.items():
3135 for key, lines in messages:
3136 funcname = funcnames[key]
3137 if isinstance(lines, basestring):
3140 for line in line.split('\n'):
3141 fields = (funcname, phase, cpv, line)
3142 str_buffer.append(' '.join(fields))
3143 str_buffer.append('\n')
3145 os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
3147 def _emerge_log(self, msg):
3148 emergelog(False, msg)
3150 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
3151 mydbapi=None, prev_mtimes=None, counter=None):
3154 This function does the following:
3156 calls self._preserve_libs if FEATURES=preserve-libs
3157 calls self._collision_protect if FEATURES=collision-protect
3158 calls doebuild(mydo=pkg_preinst)
3159 Merges the package to the livefs
3160 unmerges old version (if required)
3161 calls doebuild(mydo=pkg_postinst)
3164 @param srcroot: Typically this is ${D}
3165 @type srcroot: String (Path)
3166 @param destroot: ignored, self.settings['ROOT'] is used instead
3167 @type destroot: String (Path)
3168 @param inforoot: root of the vardb entry ?
3169 @type inforoot: String (Path)
3170 @param myebuild: path to the ebuild that we are processing
3171 @type myebuild: String (Path)
3172 @param mydbapi: dbapi which is handed to doebuild.
3173 @type mydbapi: portdbapi instance
3174 @param prev_mtimes: { Filename:mtime } mapping for env_update
3175 @type prev_mtimes: Dictionary
3181 secondhand is a list of symlinks that have been skipped due to their target
3182 not existing; we will merge these symlinks at a later time.
3187 srcroot = _unicode_decode(srcroot,
3188 encoding=_encodings['content'], errors='strict')
3189 destroot = self.settings['ROOT']
3190 inforoot = _unicode_decode(inforoot,
3191 encoding=_encodings['content'], errors='strict')
3192 myebuild = _unicode_decode(myebuild,
3193 encoding=_encodings['content'], errors='strict')
3195 showMessage = self._display_merge
3196 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
3198 if not os.path.isdir(srcroot):
3199 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
3200 level=logging.ERROR, noiselevel=-1)
3204 for var_name in ('CHOST', 'SLOT'):
3205 if var_name == 'CHOST' and self.cat == 'virtual':
3207 os.unlink(os.path.join(inforoot, var_name))
3214 f = io.open(_unicode_encode(
3215 os.path.join(inforoot, var_name),
3216 encoding=_encodings['fs'], errors='strict'),
3217 mode='r', encoding=_encodings['repo.content'],
3219 val = f.readline().strip()
3220 except EnvironmentError as e:
3221 if e.errno != errno.ENOENT:
3229 if var_name == 'SLOT':
3232 if not slot.strip():
3233 slot = self.settings.get(var_name, '')
3234 if not slot.strip():
3235 showMessage(_("!!! SLOT is undefined\n"),
3236 level=logging.ERROR, noiselevel=-1)
3238 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
3240 if val != self.settings.get(var_name, ''):
3241 self._eqawarn('preinst',
3242 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
3243 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
3246 self._eerror("preinst", lines)
3248 if not os.path.exists(self.dbcatdir):
3249 ensure_dirs(self.dbcatdir)
3251 cp = self.mysplit[0]
3252 slot_atom = "%s:%s" % (cp, slot)
3254 # filter any old-style virtual matches
3255 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
3256 if cpv_getkey(cpv) == cp]
3258 if self.mycpv not in slot_matches and \
3259 self.vartree.dbapi.cpv_exists(self.mycpv):
3260 # handle multislot or unapplied slotmove
3261 slot_matches.append(self.mycpv)
3264 from portage import config
3265 for cur_cpv in slot_matches:
3266 # Clone the config in case one of these has to be unmerged since
3267 # we need it to have private ${T} etc... for things like elog.
3268 settings_clone = config(clone=self.settings)
3269 settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
3270 settings_clone.reset()
3271 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
3272 settings=settings_clone,
3273 vartree=self.vartree, treetype="vartree",
3274 scheduler=self._scheduler, pipe=self._pipe))
3276 retval = self._security_check(others_in_slot)
3281 # Used by self.isprotected().
3284 for dblnk in others_in_slot:
3285 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
3286 if cur_counter > max_counter:
3287 max_counter = cur_counter
3289 self._installed_instance = max_dblnk
3291 # We check for unicode encoding issues after src_install. However,
3292 # the check must be repeated here for binary packages (it's
3293 # inexpensive since we call os.walk() here anyway).
3295 line_ending_re = re.compile('[\n\r]')
3299 unicode_error = False
3303 paths_with_newlines = []
3304 srcroot_len = len(srcroot)
3307 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
3309 parent = _unicode_decode(parent,
3310 encoding=_encodings['merge'], errors='strict')
3311 except UnicodeDecodeError:
3312 new_parent = _unicode_decode(parent,
3313 encoding=_encodings['merge'], errors='replace')
3314 new_parent = _unicode_encode(new_parent,
3315 encoding='ascii', errors='backslashreplace')
3316 new_parent = _unicode_decode(new_parent,
3317 encoding=_encodings['merge'], errors='replace')
3318 os.rename(parent, new_parent)
3319 unicode_error = True
3320 unicode_errors.append(new_parent[srcroot_len:])
3325 fname = _unicode_decode(fname,
3326 encoding=_encodings['merge'], errors='strict')
3327 except UnicodeDecodeError:
3328 fpath = portage._os.path.join(
3329 parent.encode(_encodings['merge']), fname)
3330 new_fname = _unicode_decode(fname,
3331 encoding=_encodings['merge'], errors='replace')
3332 new_fname = _unicode_encode(new_fname,
3333 encoding='ascii', errors='backslashreplace')
3334 new_fname = _unicode_decode(new_fname,
3335 encoding=_encodings['merge'], errors='replace')
3336 new_fpath = os.path.join(parent, new_fname)
3337 os.rename(fpath, new_fpath)
3338 unicode_error = True
3339 unicode_errors.append(new_fpath[srcroot_len:])
3343 fpath = os.path.join(parent, fname)
3345 relative_path = fpath[srcroot_len:]
3347 if line_ending_re.search(relative_path) is not None:
3348 paths_with_newlines.append(relative_path)
3350 file_mode = os.lstat(fpath).st_mode
3351 if stat.S_ISREG(file_mode):
3352 myfilelist.append(relative_path)
3353 elif stat.S_ISLNK(file_mode):
3354 # Note: os.walk puts symlinks to directories in the "dirs"
3355 # list and it does not traverse them since that could lead
3356 # to an infinite recursion loop.
3357 mylinklist.append(relative_path)
3362 if not unicode_error:
3366 eerror(_merge_unicode_error(unicode_errors))
3368 if paths_with_newlines:
3370 msg.append(_("This package installs one or more files containing line ending characters:"))
3372 paths_with_newlines.sort()
3373 for f in paths_with_newlines:
3374 msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
3376 msg.append(_("package %s NOT merged") % self.mycpv)
3381 # If there are no files to merge, and an installed package in the same
3382 # slot has files, it probably means that something went wrong.
3383 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
3384 not myfilelist and not mylinklist and others_in_slot:
3385 installed_files = None
3386 for other_dblink in others_in_slot:
3387 installed_files = other_dblink.getcontents()
3388 if not installed_files:
3390 from textwrap import wrap
3394 "new_cpv":self.mycpv,
3395 "old_cpv":other_dblink.mycpv
3397 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3398 "any files, but the currently installed '%(old_cpv)s'"
3399 " package has the following files: ") % d, wrap_width))
3401 msg.extend(sorted(installed_files))
3403 msg.append(_("package %s NOT merged") % self.mycpv)
3406 _("Manually run `emerge --unmerge =%s` if you "
3407 "really want to remove the above files. Set "
3408 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3409 "/etc/make.conf if you do not want to "
3410 "abort in cases like this.") % other_dblink.mycpv,
3416 # check for package collisions
3417 blockers = self._blockers
3418 if blockers is None:
3420 collisions, symlink_collisions, plib_collisions = \
3421 self._collision_protect(srcroot, destroot,
3422 others_in_slot + blockers, myfilelist, mylinklist)
3424 # Make sure the ebuild environment is initialized and that ${T}/elog
3425 # exists for logging of collision-protect eerror messages.
3426 if myebuild is None:
3427 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3428 doebuild_environment(myebuild, "preinst",
3429 settings=self.settings, db=mydbapi)
3430 self.settings["REPLACING_VERSIONS"] = " ".join(
3431 [portage.versions.cpv_getversion(other.mycpv)
3432 for other in others_in_slot])
3433 prepare_build_dirs(settings=self.settings, cleanup=cleanup)
3436 collision_protect = "collision-protect" in self.settings.features
3437 protect_owned = "protect-owned" in self.settings.features
3438 msg = _("This package will overwrite one or more files that"
3439 " may belong to other packages (see list below).")
3440 if not (collision_protect or protect_owned):
3441 msg += _(" Add either \"collision-protect\" or"
3442 " \"protect-owned\" to FEATURES in"
3443 " make.conf if you would like the merge to abort"
3444 " in cases like this. See the make.conf man page for"
3445 " more information about these features.")
3446 if self.settings.get("PORTAGE_QUIET") != "1":
3447 msg += _(" You can use a command such as"
3448 " `portageq owners / <filename>` to identify the"
3449 " installed package that owns a file. If portageq"
3450 " reports that only one package owns a file then do NOT"
3451 " file a bug report. A bug report is only useful if it"
3452 " identifies at least two or more packages that are known"
3453 " to install the same file(s)."
3454 " If a collision occurs and you"
3455 " can not explain where the file came from then you"
3456 " should simply ignore the collision since there is not"
3457 " enough information to determine if a real problem"
3458 " exists. Please do NOT file a bug report at"
3459 " http://bugs.gentoo.org unless you report exactly which"
3460 " two packages install the same file(s). Once again,"
3461 " please do NOT file a bug report unless you have"
3462 " completely understood the above message.")
3464 self.settings["EBUILD_PHASE"] = "preinst"
3465 from textwrap import wrap
3467 if collision_protect:
3469 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3471 msg.append(_("Detected file collision(s):"))
3474 for f in collisions:
3475 msg.append("\t%s" % \
3476 os.path.join(destroot, f.lstrip(os.path.sep)))
3481 if collision_protect or protect_owned or symlink_collisions:
3484 msg.append(_("Searching all installed"
3485 " packages for file collisions..."))
3487 msg.append(_("Press Ctrl-C to Stop"))
3491 if len(collisions) > 20:
3492 # get_owners is slow for large numbers of files, so
3493 # don't look them all up.
3494 collisions = collisions[:20]
3497 owners = self.vartree.dbapi._owners.get_owners(collisions)
3498 self.vartree.dbapi.flush_cache()
3502 for pkg, owned_files in owners.items():
3505 msg.append("%s" % cpv)
3506 for f in sorted(owned_files):
3507 msg.append("\t%s" % os.path.join(destroot,
3508 f.lstrip(os.path.sep)))
3513 eerror([_("None of the installed"
3514 " packages claim the file(s)."), ""])
3516 # The explanation about the collision and how to solve
3517 # it may not be visible via a scrollback buffer, especially
3518 # if the number of file collisions is large. Therefore,
3519 # show a summary at the end.
3521 if collision_protect:
3523 msg = _("Package '%s' NOT merged due to file collisions.") % \
3525 elif protect_owned and owners:
3527 msg = _("Package '%s' NOT merged due to file collisions.") % \
3529 elif symlink_collisions:
3531 msg = _("Package '%s' NOT merged due to collision " + \
3532 "between a symlink and a directory which is explicitly " + \
3533 "forbidden by PMS (see bug #326685).") % \
3534 (self.settings.mycpv,)
3536 msg = _("Package '%s' merged despite file collisions.") % \
3538 msg += _(" If necessary, refer to your elog "
3539 "messages for the whole content of the above message.")
3540 eerror(wrap(msg, 70))
3545 # The merge process may move files out of the image directory,
3546 # which causes invalidation of the .installed flag.
3548 os.unlink(os.path.join(
3549 os.path.dirname(normalize_path(srcroot)), ".installed"))
3550 except OSError as e:
3551 if e.errno != errno.ENOENT:
3555 self.dbdir = self.dbtmpdir
3557 ensure_dirs(self.dbtmpdir)
3559 # run preinst script
3560 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
3561 {"cpv":self.mycpv, "destroot":destroot})
3562 phase = EbuildPhase(background=False, phase="preinst",
3563 scheduler=self._scheduler, settings=self.settings)
3567 # XXX: Decide how to handle failures here.
3569 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3570 level=logging.ERROR, noiselevel=-1)
3573 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3574 for x in os.listdir(inforoot):
3575 self.copyfile(inforoot+"/"+x)
3577 # write local package counter for recording
3579 counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
3580 f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3581 encoding=_encodings['fs'], errors='strict'),
3582 mode='w', encoding=_encodings['repo.content'],
3583 errors='backslashreplace')
3584 f.write(_unicode_decode(str(counter)))
3587 self.updateprotect()
3589 #if we have a file containing previously-merged config file md5sums, grab it.
3590 self.vartree.dbapi._fs_lock()
3592 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
3593 if "NOCONFMEM" in self.settings:
3594 cfgfiledict["IGNORE"]=1
3596 cfgfiledict["IGNORE"]=0
3598 # Always behave like --noconfmem is enabled for downgrades
3599 # so that people who don't know about this option are less
3600 # likely to get confused when doing upgrade/downgrade cycles.
3601 pv_split = catpkgsplit(self.mycpv)[1:]
3602 for other in others_in_slot:
3603 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3604 cfgfiledict["IGNORE"] = 1
3607 rval = self._merge_contents(srcroot, destroot, cfgfiledict)
3608 if rval != os.EX_OK:
3611 self.vartree.dbapi._fs_unlock()
3613 # These caches are populated during collision-protect and the data
3614 # they contain is now invalid. It's very important to invalidate
3615 # the contents_inodes cache so that FEATURES=unmerge-orphans
3616 # doesn't unmerge anything that belongs to this package that has
3618 for dblnk in others_in_slot:
3619 dblnk._clear_contents_cache()
3620 self._clear_contents_cache()
3622 linkmap = self.vartree.dbapi._linkmap
3623 plib_registry = self.vartree.dbapi._plib_registry
3624 # We initialize preserve_paths to an empty set rather
3625 # than None here because it plays an important role
3626 # in prune_plib_registry logic by serving to indicate
3627 # that we have a replacement for a package that's
3630 preserve_paths = set()
3632 if not (self._linkmap_broken or linkmap is None or
3633 plib_registry is None):
3634 self.vartree.dbapi._fs_lock()
3635 plib_registry.lock()
3637 plib_registry.load()
3638 needed = os.path.join(inforoot, linkmap._needed_aux_key)
3639 self._linkmap_rebuild(include_file=needed)
3641 # Preserve old libs if they are still in use
3642 # TODO: Handle cases where the previous instance
3643 # has already been uninstalled but it still has some
3644 # preserved libraries in the registry that we may
3645 # want to preserve here.
3646 preserve_paths = self._find_libs_to_preserve()
3648 plib_registry.unlock()
3649 self.vartree.dbapi._fs_unlock()
3652 self._add_preserve_libs_to_contents(preserve_paths)
3654 # If portage is reinstalling itself, remove the old
3655 # version now since we want to use the temporary
3656 # PORTAGE_BIN_PATH that will be removed when we return.
3657 reinstall_self = False
3658 if self.myroot == "/" and \
3659 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3660 reinstall_self = True
3662 emerge_log = self._emerge_log
3664 # If we have any preserved libraries then autoclean
3665 # is forced so that preserve-libs logic doesn't have
3666 # to account for the additional complexity of the
3667 # AUTOCLEAN=no mode.
3668 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
3672 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3674 others_in_slot.append(self) # self has just been merged
3675 for dblnk in list(others_in_slot):
3678 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3680 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3681 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
3682 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3683 dblnk._linkmap_broken = self._linkmap_broken
3684 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
3685 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
3686 unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
3687 others_in_slot=others_in_slot, needed=needed,
3688 preserve_paths=preserve_paths)
3689 dblnk.settings.pop("REPLACED_BY_VERSION", None)
3691 if unmerge_rval == os.EX_OK:
3692 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
3694 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
3698 # TODO: Check status and abort if necessary.
3702 showMessage(_(">>> Original instance of package unmerged safely.\n"))
3704 if len(others_in_slot) > 1:
3705 showMessage(colorize("WARN", _("WARNING:"))
3706 + _(" AUTOCLEAN is disabled. This can cause serious"
3707 " problems due to overlapping packages.\n"),
3708 level=logging.WARN, noiselevel=-1)
3710 # We hold both directory locks.
3711 self.dbdir = self.dbpkgdir
3715 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3719 # Check for file collisions with blocking packages
3720 # and remove any colliding files from their CONTENTS
3721 # since they now belong to this package.
3722 self._clear_contents_cache()
3723 contents = self.getcontents()
3724 destroot_len = len(destroot) - 1
3727 for blocker in blockers:
3728 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3729 relative_paths=False)
3733 plib_registry = self.vartree.dbapi._plib_registry
3735 self.vartree.dbapi._fs_lock()
3736 plib_registry.lock()
3738 plib_registry.load()
3741 # keep track of the libs we preserved
3742 plib_registry.register(self.mycpv, slot, counter,
3743 sorted(preserve_paths))
3745 # Unregister any preserved libs that this package has overwritten
3746 # and update the contents of the packages that owned them.
3747 plib_dict = plib_registry.getPreservedLibs()
3748 for cpv, paths in plib_collisions.items():
3749 if cpv not in plib_dict:
3751 has_vdb_entry = False
3752 if cpv != self.mycpv:
3753 # If we've replaced another instance with the
3754 # same cpv then the vdb entry no longer belongs
3755 # to it, so we'll have to get the slot and counter
3756 # from plib_registry._data instead.
3757 self.vartree.dbapi.lock()
3760 slot, counter = self.vartree.dbapi.aux_get(
3761 cpv, ["SLOT", "COUNTER"])
3765 has_vdb_entry = True
3766 self.vartree.dbapi.removeFromContents(
3769 self.vartree.dbapi.unlock()
3771 if not has_vdb_entry:
3772 # It's possible for previously unmerged packages
3773 # to have preserved libs in the registry, so try
3774 # to retrieve the slot and counter from there.
3775 has_registry_entry = False
3776 for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
3777 plib_registry._data.items():
3781 cp, slot = plib_cps.split(":", 1)
3784 counter = plib_counter
3785 has_registry_entry = True
3788 if not has_registry_entry:
3791 remaining = [f for f in plib_dict[cpv] if f not in paths]
3792 plib_registry.register(cpv, slot, counter, remaining)
3794 plib_registry.store()
3796 plib_registry.unlock()
3797 self.vartree.dbapi._fs_unlock()
3799 self.vartree.dbapi._add(self)
3800 contents = self.getcontents()
3803 self.settings["PORTAGE_UPDATE_ENV"] = \
3804 os.path.join(self.dbpkgdir, "environment.bz2")
3805 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
3807 phase = EbuildPhase(background=False, phase="postinst",
3808 scheduler=self._scheduler, settings=self.settings)
3812 showMessage(_(">>> %s merged.\n") % self.mycpv)
3814 self.settings.pop("PORTAGE_UPDATE_ENV", None)
3817 # It's stupid to bail out here, so keep going regardless of
3818 # phase return code.
3819 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
3820 level=logging.ERROR, noiselevel=-1)
3822 #update environment settings, library paths. DO NOT change symlinks.
3824 target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
3825 contents=contents, env=self.settings,
3826 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
3828 # For gcc upgrades, preserved libs have to be removed after the
3829 # the library path has been updated.
3830 self._prune_plib_registry()
3834 def _new_backup_path(self, p):
3836 The works for any type path, such as a regular file, symlink,
3837 or directory. The parent directory is assumed to exist.
3838 The returned filename is of the form p + '.backup.' + x, where
3839 x guarantees that the returned path does not exist yet.
3846 backup_p = p + '.backup.' + str(x).rjust(4, '0')
3854 def _merge_contents(self, srcroot, destroot, cfgfiledict):
3856 cfgfiledict_orig = cfgfiledict.copy()
3858 # open CONTENTS file (possibly overwriting old one) for recording
3859 # Use atomic_ofstream for automatic coercion of raw bytes to
3860 # unicode, in order to prevent TypeError when writing raw bytes
3861 # to TextIOWrapper with python2.
3862 outfile = atomic_ofstream(_unicode_encode(
3863 os.path.join(self.dbtmpdir, 'CONTENTS'),
3864 encoding=_encodings['fs'], errors='strict'),
3865 mode='w', encoding=_encodings['repo.content'],
3866 errors='backslashreplace')
3868 # Don't bump mtimes on merge since some application require
3869 # preservation of timestamps. This means that the unmerge phase must
3870 # check to see if file belongs to an installed instance in the same
3874 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3875 prevmask = os.umask(0)
3878 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3879 # "second hand" of symlinks to merge later
3880 if self.mergeme(srcroot, destroot, outfile, secondhand,
3881 self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
3884 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3885 # broken symlinks. We'll merge them too.
3887 while len(secondhand) and len(secondhand)!=lastlen:
3888 # clear the thirdhand. Anything from our second hand that
3889 # couldn't get merged will be added to thirdhand.
3892 if self.mergeme(srcroot, destroot, outfile, thirdhand,
3893 secondhand, cfgfiledict, mymtime):
3897 lastlen = len(secondhand)
3899 # our thirdhand now becomes our secondhand. It's ok to throw
3900 # away secondhand since thirdhand contains all the stuff that
3901 # couldn't be merged.
3902 secondhand = thirdhand
3905 # force merge of remaining symlinks (broken or circular; oh well)
3906 if self.mergeme(srcroot, destroot, outfile, None,
3907 secondhand, cfgfiledict, mymtime):
3913 #if we opened it, close it
3917 # write out our collection of md5sums
3918 if cfgfiledict != cfgfiledict_orig:
3919 cfgfiledict.pop("IGNORE", None)
3921 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
3922 except InvalidLocation:
3923 self.settings._init_dirs()
3924 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
3928 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
3931 This function handles actual merging of the package contents to the livefs.
3932 It also handles config protection.
3934 @param srcroot: Where are we copying files from (usually ${D})
3935 @type srcroot: String (Path)
3936 @param destroot: Typically ${ROOT}
3937 @type destroot: String (Path)
3938 @param outfile: File to log operations to
3939 @type outfile: File Object
3940 @param secondhand: A set of items to merge in pass two (usually
3941 or symlinks that point to non-existing files that may get merged later)
3942 @type secondhand: List
3943 @param stufftomerge: Either a diretory to merge, or a list of items.
3944 @type stufftomerge: String or List
3945 @param cfgfiledict: { File:mtime } mapping for config_protected files
3946 @type cfgfiledict: Dictionary
3947 @param thismtime: The current time (typically long(time.time())
3948 @type thismtime: Long
3949 @rtype: None or Boolean
3956 showMessage = self._display_merge
3957 writemsg = self._display_merge
3962 srcroot = normalize_path(srcroot).rstrip(sep) + sep
3963 destroot = normalize_path(destroot).rstrip(sep) + sep
3964 calc_prelink = "prelink-checksums" in self.settings.features
3966 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3967 if isinstance(stufftomerge, basestring):
3968 #A directory is specified. Figure out protection paths, listdir() it and process it.
3969 mergelist = os.listdir(join(srcroot, stufftomerge))
3970 offset = stufftomerge
3972 mergelist = stufftomerge
3975 for i, x in enumerate(mergelist):
3977 mysrc = join(srcroot, offset, x)
3978 mydest = join(destroot, offset, x)
3979 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3980 myrealdest = join(sep, offset, x)
3981 # stat file once, test using S_* macros many times (faster that way)
3982 mystat = os.lstat(mysrc)
3983 mymode = mystat[stat.ST_MODE]
3984 # handy variables; mydest is the target object on the live filesystems;
3985 # mysrc is the source object in the temporary install dir
3987 mydstat = os.lstat(mydest)
3988 mydmode = mydstat.st_mode
3989 except OSError as e:
3990 if e.errno != errno.ENOENT:
3993 #dest file doesn't exist
3997 if stat.S_ISLNK(mymode):
3998 # we are merging a symbolic link
3999 # The file name of mysrc and the actual file that it points to
4000 # will have earlier been forcefully converted to the 'merge'
4001 # encoding if necessary, but the content of the symbolic link
4002 # may need to be forcefully converted here.
4003 myto = _os.readlink(_unicode_encode(mysrc,
4004 encoding=_encodings['merge'], errors='strict'))
4006 myto = _unicode_decode(myto,
4007 encoding=_encodings['merge'], errors='strict')
4008 except UnicodeDecodeError:
4009 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4011 myto = _unicode_encode(myto, encoding='ascii',
4012 errors='backslashreplace')
4013 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4016 os.symlink(myto, mysrc)
4018 # Pass in the symlink target in order to bypass the
4019 # os.readlink() call inside abssymlink(), since that
4020 # call is unsafe if the merge encoding is not ascii
4021 # or utf_8 (see bug #382021).
4022 myabsto = abssymlink(mysrc, target=myto)
4024 if myabsto.startswith(srcroot):
4025 myabsto = myabsto[len(srcroot):]
4026 myabsto = myabsto.lstrip(sep)
4027 if self.settings and self.settings["D"]:
4028 if myto.startswith(self.settings["D"]):
4029 myto = myto[len(self.settings["D"]):]
4030 # myrealto contains the path of the real file to which this symlink points.
4031 # we can simply test for existence of this file to see if the target has been merged yet
4032 myrealto = normalize_path(os.path.join(destroot, myabsto))
4035 if stat.S_ISDIR(mydmode):
4036 # we can't merge a symlink over a directory
4037 newdest = self._new_backup_path(mydest)
4040 msg.append(_("Installation of a symlink is blocked by a directory:"))
4041 msg.append(" '%s'" % mydest)
4042 msg.append(_("This symlink will be merged with a different name:"))
4043 msg.append(" '%s'" % newdest)
4045 self._eerror("preinst", msg)
4048 elif not stat.S_ISLNK(mydmode):
4049 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
4050 # Kill file blocking installation of symlink to dir #71787
4052 elif self.isprotected(mydest):
4053 # Use md5 of the target in ${D} if it exists...
4055 newmd5 = perform_md5(join(srcroot, myabsto))
4056 except FileNotFound:
4057 # Maybe the target is merged already.
4059 newmd5 = perform_md5(myrealto)
4060 except FileNotFound:
4062 mydest = new_protect_filename(mydest, newmd5=newmd5)
4064 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
4065 if (secondhand != None) and (not os.path.exists(myrealto)):
4066 # either the target directory doesn't exist yet or the target file doesn't exist -- or
4067 # the target is a broken symlink. We will add this file to our "second hand" and merge
4069 secondhand.append(mysrc[len(srcroot):])
4071 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
4072 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4073 sstat=mystat, mysettings=self.settings,
4074 encoding=_encodings['merge'])
4076 showMessage(">>> %s -> %s\n" % (mydest, myto))
4077 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
4079 showMessage(_("!!! Failed to move file.\n"),
4080 level=logging.ERROR, noiselevel=-1)
4081 showMessage("!!! %s -> %s\n" % (mydest, myto),
4082 level=logging.ERROR, noiselevel=-1)
4084 elif stat.S_ISDIR(mymode):
4085 # we are merging a directory
4087 # destination exists
4090 # Save then clear flags on dest.
4091 dflags = mydstat.st_flags
4093 bsd_chflags.lchflags(mydest, 0)
4095 if not os.access(mydest, os.W_OK):
4096 pkgstuff = pkgsplit(self.pkg)
4097 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
4098 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
4099 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
4100 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
4101 writemsg(_("!!! And finish by running this: env-update\n\n"))
4104 if stat.S_ISDIR(mydmode) or \
4105 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
4106 # a symlink to an existing directory will work for us; keep it:
4107 showMessage("--- %s/\n" % mydest)
4109 bsd_chflags.lchflags(mydest, dflags)
4111 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
4112 backup_dest = self._new_backup_path(mydest)
4115 msg.append(_("Installation of a directory is blocked by a file:"))
4116 msg.append(" '%s'" % mydest)
4117 msg.append(_("This file will be renamed to a different name:"))
4118 msg.append(" '%s'" % backup_dest)
4120 self._eerror("preinst", msg)
4121 if movefile(mydest, backup_dest,
4122 mysettings=self.settings,
4123 encoding=_encodings['merge']) is None:
4125 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
4126 level=logging.ERROR, noiselevel=-1)
4127 #now create our directory
4129 if self.settings.selinux_enabled():
4130 _selinux_merge.mkdir(mydest, mysrc)
4133 except OSError as e:
4134 # Error handling should be equivalent to
4135 # portage.util.ensure_dirs() for cases
4137 if e.errno in (errno.EEXIST,):
4139 elif os.path.isdir(mydest):
4146 bsd_chflags.lchflags(mydest, dflags)
4147 os.chmod(mydest, mystat[0])
4148 os.chown(mydest, mystat[4], mystat[5])
4149 showMessage(">>> %s/\n" % mydest)
4152 #destination doesn't exist
4153 if self.settings.selinux_enabled():
4154 _selinux_merge.mkdir(mydest, mysrc)
4157 except OSError as e:
4158 # Error handling should be equivalent to
4159 # portage.util.ensure_dirs() for cases
4161 if e.errno in (errno.EEXIST,):
4163 elif os.path.isdir(mydest):
4168 os.chmod(mydest, mystat[0])
4169 os.chown(mydest, mystat[4], mystat[5])
4170 showMessage(">>> %s/\n" % mydest)
4171 outfile.write("dir "+myrealdest+"\n")
4172 # recurse and merge this directory
4173 if self.mergeme(srcroot, destroot, outfile, secondhand,
4174 join(offset, x), cfgfiledict, thismtime):
4176 elif stat.S_ISREG(mymode):
4177 # we are merging a regular file
4178 mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
4179 # calculate config file protection stuff
4180 mydestdir = os.path.dirname(mydest)
4184 protected = self.isprotected(mydest)
4186 # destination file exists
4188 if stat.S_ISDIR(mydmode):
4189 # install of destination is blocked by an existing directory with the same name
4190 newdest = self._new_backup_path(mydest)
4193 msg.append(_("Installation of a regular file is blocked by a directory:"))
4194 msg.append(" '%s'" % mydest)
4195 msg.append(_("This file will be merged with a different name:"))
4196 msg.append(" '%s'" % newdest)
4198 self._eerror("preinst", msg)
4201 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
4202 # install of destination is blocked by an existing regular file,
4203 # or by a symlink to an existing regular file;
4204 # now, config file management may come into play.
4205 # we only need to tweak mydest if cfg file management is in play.
4207 # we have a protection path; enable config file management.
4209 destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
4210 if mymd5 == destmd5:
4211 #file already in place; simply update mtimes of destination
4214 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
4215 """ An identical update has previously been
4216 merged. Skip it unless the user has chosen
4218 moveme = cfgfiledict["IGNORE"]
4219 cfgprot = cfgfiledict["IGNORE"]
4222 mymtime = mystat[stat.ST_MTIME]
4227 # Merging a new file, so update confmem.
4228 cfgfiledict[myrealdest] = [mymd5]
4229 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
4230 """A previously remembered update has been
4231 accepted, so it is removed from confmem."""
4232 del cfgfiledict[myrealdest]
4235 mydest = new_protect_filename(mydest, newmd5=mymd5)
4237 # whether config protection or not, we merge the new file the
4238 # same way. Unless moveme=0 (blocking directory)
4240 # Create hardlinks only for source files that already exist
4241 # as hardlinks (having identical st_dev and st_ino).
4242 hardlink_key = (mystat.st_dev, mystat.st_ino)
4244 hardlink_candidates = self._md5_merge_map.get(hardlink_key)
4245 if hardlink_candidates is None:
4246 hardlink_candidates = []
4247 self._md5_merge_map[hardlink_key] = hardlink_candidates
4249 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4250 sstat=mystat, mysettings=self.settings,
4251 hardlink_candidates=hardlink_candidates,
4252 encoding=_encodings['merge'])
4255 if hardlink_candidates is not None:
4256 hardlink_candidates.append(mydest)
4260 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
4261 showMessage("%s %s\n" % (zing,mydest))
4263 # we are merging a fifo or device node
4266 # destination doesn't exist
4267 if movefile(mysrc, mydest, newmtime=thismtime,
4268 sstat=mystat, mysettings=self.settings,
4269 encoding=_encodings['merge']) is not None:
4273 if stat.S_ISFIFO(mymode):
4274 outfile.write("fif %s\n" % myrealdest)
4276 outfile.write("dev %s\n" % myrealdest)
4277 showMessage(zing + " " + mydest + "\n")
4279 def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
4280 mydbapi=None, prev_mtimes=None, counter=None):
4282 @param myroot: ignored, self._eroot is used instead
4286 parallel_install = "parallel-install" in self.settings.features
4287 if not parallel_install:
4289 self.vartree.dbapi._bump_mtime(self.mycpv)
4290 if self._scheduler is None:
4291 self._scheduler = PollScheduler().sched_iface
4293 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
4294 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
4297 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
4298 # fail-clean is enabled, and the success/die hooks have
4299 # already been called by EbuildPhase.
4300 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
4302 if retval == os.EX_OK:
4303 phase = 'success_hooks'
4307 ebuild_phase = MiscFunctionsProcess(
4308 background=False, commands=[phase],
4309 scheduler=self._scheduler, settings=self.settings)
4310 ebuild_phase.start()
4312 self._elog_process()
4314 if 'noclean' not in self.settings.features and \
4315 (retval == os.EX_OK or \
4316 'fail-clean' in self.settings.features):
4317 if myebuild is None:
4318 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
4320 doebuild_environment(myebuild, "clean",
4321 settings=self.settings, db=mydbapi)
4322 phase = EbuildPhase(background=False, phase="clean",
4323 scheduler=self._scheduler, settings=self.settings)
4327 self.settings.pop('REPLACING_VERSIONS', None)
4328 if self.vartree.dbapi._linkmap is None:
4329 # preserve-libs is entirely disabled
4332 self.vartree.dbapi._linkmap._clear_cache()
4333 self.vartree.dbapi._bump_mtime(self.mycpv)
4334 if not parallel_install:
4338 def getstring(self,name):
4339 "returns contents of a file with whitespace converted to spaces"
4340 if not os.path.exists(self.dbdir+"/"+name):
4343 _unicode_encode(os.path.join(self.dbdir, name),
4344 encoding=_encodings['fs'], errors='strict'),
4345 mode='r', encoding=_encodings['repo.content'], errors='replace'
4347 return " ".join(mydata)
4349 def copyfile(self,fname):
4350 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4352 def getfile(self,fname):
4353 if not os.path.exists(self.dbdir+"/"+fname):
4355 return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
4356 encoding=_encodings['fs'], errors='strict'),
4357 mode='r', encoding=_encodings['repo.content'], errors='replace'
4360 def setfile(self,fname,data):
4362 if fname == 'environment.bz2' or not isinstance(data, basestring):
4363 kwargs['mode'] = 'wb'
4365 kwargs['mode'] = 'w'
4366 kwargs['encoding'] = _encodings['repo.content']
4367 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
4369 def getelements(self,ename):
4370 if not os.path.exists(self.dbdir+"/"+ename):
4372 mylines = io.open(_unicode_encode(
4373 os.path.join(self.dbdir, ename),
4374 encoding=_encodings['fs'], errors='strict'),
4375 mode='r', encoding=_encodings['repo.content'], errors='replace'
4379 for y in x[:-1].split():
4383 def setelements(self,mylist,ename):
4384 myelement = io.open(_unicode_encode(
4385 os.path.join(self.dbdir, ename),
4386 encoding=_encodings['fs'], errors='strict'),
4387 mode='w', encoding=_encodings['repo.content'],
4388 errors='backslashreplace')
4390 myelement.write(_unicode_decode(x+"\n"))
4393 def isregular(self):
4394 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4395 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
4397 def merge(mycat, mypkg, pkgloc, infloc,
4398 myroot=None, settings=None, myebuild=None,
4399 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
4402 @param myroot: ignored, settings['EROOT'] is used instead
4405 if settings is None:
4406 raise TypeError("settings argument is required")
4407 if not os.access(settings['EROOT'], os.W_OK):
4408 writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
4411 background = (settings.get('PORTAGE_BACKGROUND') == '1')
4412 merge_task = MergeProcess(
4413 mycat=mycat, mypkg=mypkg, settings=settings,
4414 treetype=mytree, vartree=vartree,
4415 scheduler=(scheduler or PollScheduler().sched_iface),
4416 background=background, blockers=blockers, pkgloc=pkgloc,
4417 infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
4418 prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
4420 retcode = merge_task.wait()
4423 def unmerge(cat, pkg, myroot=None, settings=None,
4424 mytrimworld=None, vartree=None,
4425 ldpath_mtimes=None, scheduler=None):
4427 @param myroot: ignored, settings['EROOT'] is used instead
4428 @param mytrimworld: ignored
4431 if settings is None:
4432 raise TypeError("settings argument is required")
4433 mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
4434 vartree=vartree, scheduler=scheduler)
4435 vartree = mylink.vartree
4436 parallel_install = "parallel-install" in settings.features
4437 if not parallel_install:
4441 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
4442 if retval == os.EX_OK:
4451 if vartree.dbapi._linkmap is None:
4452 # preserve-libs is entirely disabled
4455 vartree.dbapi._linkmap._clear_cache()
4456 if not parallel_install:
4459 def write_contents(contents, root, f):
4461 Write contents to any file like object. The file will be left open.
4463 root_len = len(root) - 1
4464 for filename in sorted(contents):
4465 entry_data = contents[filename]
4466 entry_type = entry_data[0]
4467 relative_filename = filename[root_len:]
4468 if entry_type == "obj":
4469 entry_type, mtime, md5sum = entry_data
4470 line = "%s %s %s %s\n" % \
4471 (entry_type, relative_filename, md5sum, mtime)
4472 elif entry_type == "sym":
4473 entry_type, mtime, link = entry_data
4474 line = "%s %s -> %s %s\n" % \
4475 (entry_type, relative_filename, link, mtime)
4476 else: # dir, dev, fif
4477 line = "%s %s\n" % (entry_type, relative_filename)
4480 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4482 encoding = _encodings['merge']
4487 encoding=_encodings['merge'],
4489 except UnicodeEncodeError:
4490 # The package appears to have been merged with a
4491 # different value of sys.getfilesystemencoding(),
4492 # so fall back to utf_8 if appropriate.
4496 encoding=_encodings['fs'],
4498 except UnicodeEncodeError:
4502 encoding = _encodings['fs']
4504 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4506 maxval = len(contents)
4509 onProgress(maxval, 0)
4510 paths = list(contents)
4515 lst = os.lstat(path)
4516 except OSError as e:
4517 if e.errno != errno.ENOENT:
4521 onProgress(maxval, curval)
4523 contents_type = contents[path][0]
4524 if path.startswith(root):
4525 arcname = path[len(root):]
4527 raise ValueError("invalid root argument: '%s'" % root)
4529 if 'dir' == contents_type and \
4530 not stat.S_ISDIR(lst.st_mode) and \
4531 os.path.isdir(live_path):
4532 # Even though this was a directory in the original ${D}, it exists
4533 # as a symlink to a directory in the live filesystem. It must be
4534 # recorded as a real directory in the tar file to ensure that tar
4535 # can properly extract it's children.
4536 live_path = os.path.realpath(live_path)
4537 tarinfo = tar.gettarinfo(live_path, arcname)
4539 if stat.S_ISREG(lst.st_mode):
4540 if protect and protect(path):
4541 # Create an empty file as a place holder in order to avoid
4542 # potential collision-protect issues.
4543 f = tempfile.TemporaryFile()
4544 f.write(_unicode_encode(
4545 "# empty file because --include-config=n " + \
4546 "when `quickpkg` was used\n"))
4549 tarinfo.size = os.fstat(f.fileno()).st_size
4550 tar.addfile(tarinfo, f)
4553 f = open(_unicode_encode(path,
4555 errors='strict'), 'rb')
4557 tar.addfile(tarinfo, f)
4561 tar.addfile(tarinfo)
4563 onProgress(maxval, curval)