1 # Copyright 1998-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dbapi._MergeProcess:MergeProcess',
14 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
15 'use_reduce,_slot_re',
16 'portage.elog:collect_ebuild_messages,collect_messages,' + \
17 'elog_process,_merge_logentries',
18 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
19 'portage.output:bold,colorize',
20 'portage.package.ebuild.doebuild:doebuild_environment,' + \
21 '_merge_unicode_error', '_spawn_phase',
22 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
23 'portage.update:fixdbentries',
24 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
25 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
26 'grabdict,normalize_path,new_protect_filename',
27 'portage.util.digraph:digraph',
28 'portage.util.env_update:env_update',
29 'portage.util.listdir:dircache,listdir',
30 'portage.util.movefile:movefile',
31 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
32 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
33 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
34 '_pkgsplit@pkgsplit,_pkg_str',
39 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
40 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
41 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
42 from portage.dbapi import dbapi
43 from portage.exception import CommandNotFound, \
44 InvalidData, InvalidLocation, InvalidPackageName, \
45 FileNotFound, PermissionDenied, UnsupportedAPIException
46 from portage.localization import _
48 from portage import abssymlink, _movefile, bsd_chflags
50 # This is a special version of the os module, wrapped for unicode support.
51 from portage import os
52 from portage import shutil
53 from portage import _encodings
54 from portage import _os_merge
55 from portage import _selinux_merge
56 from portage import _unicode_decode
57 from portage import _unicode_encode
59 from _emerge.EbuildBuildDir import EbuildBuildDir
60 from _emerge.EbuildPhase import EbuildPhase
61 from _emerge.emergelog import emergelog
62 from _emerge.PollScheduler import PollScheduler
63 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
70 from itertools import chain
83 import cPickle as pickle
87 if sys.hexversion >= 0x3000000:
94 class vardbapi(dbapi):
96 _excluded_dirs = ["CVS", "lost+found"]
97 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
98 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
99 "|".join(_excluded_dirs) + r')$')
101 _aux_cache_version = "1"
102 _owners_cache_version = "1"
104 # Number of uncached packages to trigger cache update, since
105 # it's wasteful to update it for every vdb change.
106 _aux_cache_threshold = 5
108 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
109 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
111 def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
113 The categories parameter is unused since the dbapi class
114 now has a categories property that is generated from the
118 # Used by emerge to check whether any packages
119 # have been added or removed.
120 self._pkgs_changed = False
122 # The _aux_cache_threshold doesn't work as designed
123 # if the cache is flushed from a subprocess, so we
124 # use this to avoid waste vdb cache updates.
125 self._flush_cache_enabled = True
127 #cache for category directory mtimes
130 #cache for dependency checks
133 #cache for cp_list results
138 settings = portage.settings
139 self.settings = settings
141 if _unused_param is not None and _unused_param != settings['ROOT']:
142 warnings.warn("The first parameter of the "
143 "portage.dbapi.vartree.vardbapi"
144 " constructor is now unused. Use "
145 "settings['ROOT'] instead.",
146 DeprecationWarning, stacklevel=2)
148 self._eroot = settings['EROOT']
149 self._dbroot = self._eroot + VDB_PATH
153 self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
154 self._fs_lock_obj = None
155 self._fs_lock_count = 0
158 vartree = portage.db[settings['EROOT']]['vartree']
159 self.vartree = vartree
160 self._aux_cache_keys = set(
161 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
162 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
163 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
164 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
166 self._aux_cache_obj = None
167 self._aux_cache_filename = os.path.join(self._eroot,
168 CACHE_PATH, "vdb_metadata.pickle")
169 self._counter_path = os.path.join(self._eroot,
170 CACHE_PATH, "counter")
172 self._plib_registry = None
173 if _ENABLE_PRESERVE_LIBS:
174 self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
175 os.path.join(self._eroot, PRIVATE_PATH,
176 "preserved_libs_registry"))
179 if _ENABLE_DYN_LINK_MAP:
180 self._linkmap = LinkageMap(self)
181 self._owners = self._owners_db(self)
183 self._cached_counter = None
187 warnings.warn("The root attribute of "
188 "portage.dbapi.vartree.vardbapi"
189 " is deprecated. Use "
190 "settings['ROOT'] instead.",
191 DeprecationWarning, stacklevel=3)
192 return self.settings['ROOT']
194 def getpath(self, mykey, filename=None):
195 # This is an optimized hotspot, so don't use unicode-wrapped
196 # os module and don't use os.path.join().
197 rValue = self._eroot + VDB_PATH + _os.sep + mykey
198 if filename is not None:
199 # If filename is always relative, we can do just
200 # rValue += _os.sep + filename
201 rValue = _os.path.join(rValue, filename)
206 Acquire a reentrant lock, blocking, for cooperation with concurrent
207 processes. State is inherited by subprocesses, allowing subprocesses
208 to reenter a lock that was acquired by a parent process. However,
209 a lock can be released only by the same process that acquired it.
212 self._lock_count += 1
214 if self._lock is not None:
215 raise AssertionError("already locked")
216 # At least the parent needs to exist for the lock file.
217 ensure_dirs(self._dbroot)
218 self._lock = lockdir(self._dbroot)
219 self._lock_count += 1
223 Release a lock, decrementing the recursion level. Each unlock() call
224 must be matched with a prior lock() call, or else an AssertionError
225 will be raised if unlock() is called while not locked.
227 if self._lock_count > 1:
228 self._lock_count -= 1
230 if self._lock is None:
231 raise AssertionError("not locked")
233 unlockdir(self._lock)
238 Acquire a reentrant lock, blocking, for cooperation with concurrent
241 if self._fs_lock_count < 1:
242 if self._fs_lock_obj is not None:
243 raise AssertionError("already locked")
245 self._fs_lock_obj = lockfile(self._conf_mem_file)
246 except InvalidLocation:
247 self.settings._init_dirs()
248 self._fs_lock_obj = lockfile(self._conf_mem_file)
249 self._fs_lock_count += 1
251 def _fs_unlock(self):
253 Release a lock, decrementing the recursion level.
255 if self._fs_lock_count <= 1:
256 if self._fs_lock_obj is None:
257 raise AssertionError("not locked")
258 unlockfile(self._fs_lock_obj)
259 self._fs_lock_obj = None
260 self._fs_lock_count -= 1
262 def _bump_mtime(self, cpv):
264 This is called before an after any modifications, so that consumers
265 can use directory mtimes to validate caches. See bug #290428.
267 base = self._eroot + VDB_PATH
268 cat = catsplit(cpv)[0]
269 catdir = base + _os.sep + cat
273 for x in (catdir, base):
278 def cpv_exists(self, mykey, myrepo=None):
279 "Tells us whether an actual ebuild exists on disk (no masking)"
280 return os.path.exists(self.getpath(mykey))
282 def cpv_counter(self, mycpv):
283 "This method will grab the COUNTER. Returns a counter value."
285 return long(self.aux_get(mycpv, ["COUNTER"])[0])
286 except (KeyError, ValueError):
288 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
289 "resetting to value of 0\n") % (mycpv,),
290 level=logging.ERROR, noiselevel=-1)
293 def cpv_inject(self, mycpv):
294 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
295 ensure_dirs(self.getpath(mycpv))
296 counter = self.counter_tick(mycpv=mycpv)
297 # write local package counter so that emerge clean does the right thing
298 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
300 def isInjected(self, mycpv):
301 if self.cpv_exists(mycpv):
302 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
304 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
308 def move_ent(self, mylist, repo_match=None):
313 for atom in (origcp, newcp):
314 if not isjustname(atom):
315 raise InvalidPackageName(str(atom))
316 origmatches = self.match(origcp, use_cache=0)
320 for mycpv in origmatches:
321 mycpv_cp = cpv_getkey(mycpv)
322 if mycpv_cp != origcp:
323 # Ignore PROVIDE virtual match.
325 if repo_match is not None \
326 and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
328 mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
329 mynewcat = catsplit(newcp)[0]
330 origpath = self.getpath(mycpv)
331 if not os.path.exists(origpath):
334 if not os.path.exists(self.getpath(mynewcat)):
335 #create the directory
336 ensure_dirs(self.getpath(mynewcat))
337 newpath = self.getpath(mynewcpv)
338 if os.path.exists(newpath):
339 #dest already exists; keep this puppy where it is.
341 _movefile(origpath, newpath, mysettings=self.settings)
342 self._clear_pkg_cache(self._dblink(mycpv))
343 self._clear_pkg_cache(self._dblink(mynewcpv))
345 # We need to rename the ebuild now.
346 old_pf = catsplit(mycpv)[1]
347 new_pf = catsplit(mynewcpv)[1]
350 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
351 os.path.join(newpath, new_pf + ".ebuild"))
352 except EnvironmentError as e:
353 if e.errno != errno.ENOENT:
356 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
357 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
358 fixdbentries([mylist], newpath)
361 def cp_list(self, mycp, use_cache=1):
362 mysplit=catsplit(mycp)
363 if mysplit[0] == '*':
364 mysplit[0] = mysplit[0][1:]
366 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
369 if use_cache and mycp in self.cpcache:
370 cpc = self.cpcache[mycp]
373 cat_dir = self.getpath(mysplit[0])
375 dir_list = os.listdir(cat_dir)
376 except EnvironmentError as e:
377 if e.errno == PermissionDenied.errno:
378 raise PermissionDenied(cat_dir)
384 if self._excluded_dirs.match(x) is not None:
388 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
391 if ps[0] == mysplit[1]:
392 returnme.append(_pkg_str(mysplit[0]+"/"+x))
393 self._cpv_sort_ascending(returnme)
395 self.cpcache[mycp] = [mystat, returnme[:]]
396 elif mycp in self.cpcache:
397 del self.cpcache[mycp]
400 def cpv_all(self, use_cache=1):
402 Set use_cache=0 to bypass the portage.cachedir() cache in cases
403 when the accuracy of mtime staleness checks should not be trusted
404 (generally this is only necessary in critical sections that
405 involve merge or unmerge of packages).
408 basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
411 from portage import listdir
413 def listdir(p, **kwargs):
415 return [x for x in os.listdir(p) \
416 if os.path.isdir(os.path.join(p, x))]
417 except EnvironmentError as e:
418 if e.errno == PermissionDenied.errno:
419 raise PermissionDenied(p)
423 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
424 if self._excluded_dirs.match(x) is not None:
426 if not self._category_re.match(x):
428 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
429 if self._excluded_dirs.match(y) is not None:
431 subpath = x + "/" + y
432 # -MERGING- should never be a cpv, nor should files.
434 if catpkgsplit(subpath) is None:
435 self.invalidentry(self.getpath(subpath))
438 self.invalidentry(self.getpath(subpath))
440 returnme.append(subpath)
444 def cp_all(self, use_cache=1):
445 mylist = self.cpv_all(use_cache=use_cache)
451 mysplit = catpkgsplit(y)
453 self.invalidentry(self.getpath(y))
456 self.invalidentry(self.getpath(y))
458 d[mysplit[0]+"/"+mysplit[1]] = None
461 def checkblockers(self, origdep):
464 def _clear_cache(self):
465 self.mtdircache.clear()
466 self.matchcache.clear()
468 self._aux_cache_obj = None
470 def _add(self, pkg_dblink):
471 self._pkgs_changed = True
472 self._clear_pkg_cache(pkg_dblink)
474 def _remove(self, pkg_dblink):
475 self._pkgs_changed = True
476 self._clear_pkg_cache(pkg_dblink)
478 def _clear_pkg_cache(self, pkg_dblink):
479 # Due to 1 second mtime granularity in <python-2.5, mtime checks
480 # are not always sufficient to invalidate vardbapi caches. Therefore,
481 # the caches need to be actively invalidated here.
482 self.mtdircache.pop(pkg_dblink.cat, None)
483 self.matchcache.pop(pkg_dblink.cat, None)
484 self.cpcache.pop(pkg_dblink.mysplit[0], None)
485 dircache.pop(pkg_dblink.dbcatdir, None)
487 def match(self, origdep, use_cache=1):
488 "caching match function"
490 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
491 cache_key = (mydep, mydep.unevaluated_atom)
492 mykey = dep_getkey(mydep)
493 mycat = catsplit(mykey)[0]
495 if mycat in self.matchcache:
496 del self.mtdircache[mycat]
497 del self.matchcache[mycat]
498 return list(self._iter_match(mydep,
499 self.cp_list(mydep.cp, use_cache=use_cache)))
501 curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
502 except (IOError, OSError):
505 if mycat not in self.matchcache or \
506 self.mtdircache[mycat] != curmtime:
508 self.mtdircache[mycat] = curmtime
509 self.matchcache[mycat] = {}
510 if mydep not in self.matchcache[mycat]:
511 mymatch = list(self._iter_match(mydep,
512 self.cp_list(mydep.cp, use_cache=use_cache)))
513 self.matchcache[mycat][cache_key] = mymatch
514 return self.matchcache[mycat][cache_key][:]
516 def findname(self, mycpv, myrepo=None):
517 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
519 def flush_cache(self):
520 """If the current user has permission and the internal aux_get cache has
521 been updated, save it to disk and mark it unmodified. This is called
522 by emerge after it has loaded the full vdb for use in dependency
523 calculations. Currently, the cache is only written if the user has
524 superuser privileges (since that's required to obtain a lock), but all
525 users have read access and benefit from faster metadata lookups (as
526 long as at least part of the cache is still valid)."""
527 if self._flush_cache_enabled and \
528 self._aux_cache is not None and \
529 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
531 self._owners.populate() # index any unindexed contents
532 valid_nodes = set(self.cpv_all())
533 for cpv in list(self._aux_cache["packages"]):
534 if cpv not in valid_nodes:
535 del self._aux_cache["packages"][cpv]
536 del self._aux_cache["modified"]
538 f = atomic_ofstream(self._aux_cache_filename, 'wb')
539 pickle.dump(self._aux_cache, f, protocol=2)
541 apply_secpass_permissions(
542 self._aux_cache_filename, gid=portage_gid, mode=0o644)
543 except (IOError, OSError) as e:
545 self._aux_cache["modified"] = set()
548 def _aux_cache(self):
549 if self._aux_cache_obj is None:
550 self._aux_cache_init()
551 return self._aux_cache_obj
553 def _aux_cache_init(self):
556 if sys.hexversion >= 0x3000000:
557 # Buffered io triggers extreme performance issues in
558 # Unpickler.load() (problem observed with python-3.0.1).
559 # Unfortunately, performance is still poor relative to
560 # python-2.x, but buffering makes it much worse.
561 open_kwargs["buffering"] = 0
563 f = open(_unicode_encode(self._aux_cache_filename,
564 encoding=_encodings['fs'], errors='strict'),
565 mode='rb', **open_kwargs)
566 mypickle = pickle.Unpickler(f)
568 mypickle.find_global = None
569 except AttributeError:
570 # TODO: If py3k, override Unpickler.find_class().
572 aux_cache = mypickle.load()
575 except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
576 if isinstance(e, EnvironmentError) and \
577 getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
580 writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
581 (self._aux_cache_filename, e), noiselevel=-1)
584 if not aux_cache or \
585 not isinstance(aux_cache, dict) or \
586 aux_cache.get("version") != self._aux_cache_version or \
587 not aux_cache.get("packages"):
588 aux_cache = {"version": self._aux_cache_version}
589 aux_cache["packages"] = {}
591 owners = aux_cache.get("owners")
592 if owners is not None:
593 if not isinstance(owners, dict):
595 elif "version" not in owners:
597 elif owners["version"] != self._owners_cache_version:
599 elif "base_names" not in owners:
601 elif not isinstance(owners["base_names"], dict):
607 "version" : self._owners_cache_version
609 aux_cache["owners"] = owners
611 aux_cache["modified"] = set()
612 self._aux_cache_obj = aux_cache
614 def aux_get(self, mycpv, wants, myrepo = None):
615 """This automatically caches selected keys that are frequently needed
616 by emerge for dependency calculations. The cached metadata is
617 considered valid if the mtime of the package directory has not changed
618 since the data was cached. The cache is stored in a pickled dict
619 object with the following format:
621 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
623 If an error occurs while loading the cache pickle or the version is
624 unrecognized, the cache will simple be recreated from scratch (it is
625 completely disposable).
627 cache_these_wants = self._aux_cache_keys.intersection(wants)
629 if self._aux_cache_keys_re.match(x) is not None:
630 cache_these_wants.add(x)
632 if not cache_these_wants:
633 mydata = self._aux_get(mycpv, wants)
634 return [mydata[x] for x in wants]
636 cache_these = set(self._aux_cache_keys)
637 cache_these.update(cache_these_wants)
639 mydir = self.getpath(mycpv)
642 mydir_stat = os.stat(mydir)
644 if e.errno != errno.ENOENT:
646 raise KeyError(mycpv)
647 mydir_mtime = mydir_stat[stat.ST_MTIME]
648 pkg_data = self._aux_cache["packages"].get(mycpv)
649 pull_me = cache_these.union(wants)
650 mydata = {"_mtime_" : mydir_mtime}
652 cache_incomplete = False
655 if pkg_data is not None:
656 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
659 cache_mtime, metadata = pkg_data
660 if not isinstance(cache_mtime, (long, int)) or \
661 not isinstance(metadata, dict):
665 cache_mtime, metadata = pkg_data
666 cache_valid = cache_mtime == mydir_mtime
668 # Migrate old metadata to unicode.
669 for k, v in metadata.items():
670 metadata[k] = _unicode_decode(v,
671 encoding=_encodings['repo.content'], errors='replace')
673 mydata.update(metadata)
674 pull_me.difference_update(mydata)
677 # pull any needed data and cache it
678 aux_keys = list(pull_me)
679 mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
680 if not cache_valid or cache_these.difference(metadata):
682 if cache_valid and metadata:
683 cache_data.update(metadata)
684 for aux_key in cache_these:
685 cache_data[aux_key] = mydata[aux_key]
686 self._aux_cache["packages"][_unicode(mycpv)] = \
687 (mydir_mtime, cache_data)
688 self._aux_cache["modified"].add(mycpv)
690 if _slot_re.match(mydata['SLOT']) is None:
691 # Empty or invalid slot triggers InvalidAtom exceptions when
692 # generating slot atoms for packages, so translate it to '0' here.
693 mydata['SLOT'] = _unicode_decode('0')
695 return [mydata[x] for x in wants]
697 def _aux_get(self, mycpv, wants, st=None):
698 mydir = self.getpath(mycpv)
703 if e.errno == errno.ENOENT:
704 raise KeyError(mycpv)
705 elif e.errno == PermissionDenied.errno:
706 raise PermissionDenied(mydir)
709 if not stat.S_ISDIR(st.st_mode):
710 raise KeyError(mycpv)
715 results[x] = st[stat.ST_MTIME]
719 _unicode_encode(os.path.join(mydir, x),
720 encoding=_encodings['fs'], errors='strict'),
721 mode='r', encoding=_encodings['repo.content'],
728 if x not in self._aux_cache_keys and \
729 self._aux_cache_keys_re.match(x) is None:
732 myd = _unicode_decode('')
734 # Preserve \n for metadata that is known to
735 # contain multiple lines.
736 if self._aux_multi_line_re.match(x) is None:
737 myd = " ".join(myd.split())
742 env_results = self._aux_env_search(mycpv, env_keys)
744 v = env_results.get(k)
746 v = _unicode_decode('')
747 if self._aux_multi_line_re.match(k) is None:
748 v = " ".join(v.split())
751 if results.get("EAPI") == "":
752 results[_unicode_decode("EAPI")] = _unicode_decode('0')
756 def _aux_env_search(self, cpv, variables):
758 Search environment.bz2 for the specified variables. Returns
759 a dict mapping variables to values, and any variables not
760 found in the environment will not be included in the dict.
761 This is useful for querying variables like ${SRC_URI} and
762 ${A}, which are not saved in separate files but are available
763 in environment.bz2 (see bug #395463).
765 env_file = self.getpath(cpv, filename="environment.bz2")
766 if not os.path.isfile(env_file):
768 bunzip2_cmd = portage.util.shlex_split(
769 self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
771 bunzip2_cmd = portage.util.shlex_split(
772 self.settings["PORTAGE_BZIP2_COMMAND"])
773 bunzip2_cmd.append("-d")
774 args = bunzip2_cmd + ["-c", env_file]
776 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
777 except EnvironmentError as e:
778 if e.errno != errno.ENOENT:
780 raise portage.exception.CommandNotFound(args[0])
782 # Parts of the following code are borrowed from
783 # filter-bash-environment.py (keep them in sync).
784 var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
785 close_quote_re = re.compile(r'(\\"|"|\')\s*$')
786 def have_end_quote(quote, line):
787 close_quote_match = close_quote_re.search(line)
788 return close_quote_match is not None and \
789 close_quote_match.group(1) == quote
791 variables = frozenset(variables)
793 for line in proc.stdout:
794 line = _unicode_decode(line,
795 encoding=_encodings['content'], errors='replace')
796 var_assign_match = var_assign_re.match(line)
797 if var_assign_match is not None:
798 key = var_assign_match.group(2)
799 quote = var_assign_match.group(3)
800 if quote is not None:
801 if have_end_quote(quote,
802 line[var_assign_match.end(2)+2:]):
803 value = var_assign_match.group(4)
805 value = [var_assign_match.group(4)]
806 for line in proc.stdout:
807 line = _unicode_decode(line,
808 encoding=_encodings['content'],
811 if have_end_quote(quote, line):
813 value = ''.join(value)
814 # remove trailing quote and whitespace
815 value = value.rstrip()[:-1]
817 value = var_assign_match.group(4).rstrip()
826 def aux_update(self, cpv, values):
827 mylink = self._dblink(cpv)
828 if not mylink.exists():
830 self._bump_mtime(cpv)
831 self._clear_pkg_cache(mylink)
832 for k, v in values.items():
837 os.unlink(os.path.join(self.getpath(cpv), k))
838 except EnvironmentError:
840 self._bump_mtime(cpv)
842 def counter_tick(self, myroot=None, mycpv=None):
844 @param myroot: ignored, self._eroot is used instead
846 return self.counter_tick_core(incrementing=1, mycpv=mycpv)
848 def get_counter_tick_core(self, myroot=None, mycpv=None):
850 Use this method to retrieve the counter instead
851 of having to trust the value of a global counter
852 file that can lead to invalid COUNTER
853 generation. When cache is valid, the package COUNTER
854 files are not read and we rely on the timestamp of
855 the package directory to validate cache. The stat
856 calls should only take a short time, so performance
857 is sufficient without having to rely on a potentially
858 corrupt global counter file.
860 The global counter file located at
861 $CACHE_PATH/counter serves to record the
862 counter of the last installed package and
863 it also corresponds to the total number of
864 installation actions that have occurred in
865 the history of this package database.
867 @param myroot: ignored, self._eroot is used instead
873 _unicode_encode(self._counter_path,
874 encoding=_encodings['fs'], errors='strict'),
875 mode='r', encoding=_encodings['repo.content'],
877 except EnvironmentError as e:
878 # Silently allow ENOENT since files under
879 # /var/cache/ are allowed to disappear.
880 if e.errno != errno.ENOENT:
881 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
882 self._counter_path, noiselevel=-1)
883 writemsg("!!! %s\n" % str(e), noiselevel=-1)
888 counter = long(cfile.readline().strip())
891 except (OverflowError, ValueError) as e:
892 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
893 self._counter_path, noiselevel=-1)
894 writemsg("!!! %s\n" % str(e), noiselevel=-1)
897 if self._cached_counter == counter:
898 max_counter = counter
900 # We must ensure that we return a counter
901 # value that is at least as large as the
902 # highest one from the installed packages,
903 # since having a corrupt value that is too low
904 # can trigger incorrect AUTOCLEAN behavior due
905 # to newly installed packages having lower
906 # COUNTERs than the previous version in the
908 max_counter = counter
909 for cpv in self.cpv_all():
911 pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
912 except (KeyError, OverflowError, ValueError):
914 if pkg_counter > max_counter:
915 max_counter = pkg_counter
917 return max_counter + 1
919 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
921 This method will grab the next COUNTER value and record it back
922 to the global file. Note that every package install must have
923 a unique counter, since a slotmove update can move two packages
924 into the same SLOT and in that case it's important that both
925 packages have different COUNTER metadata.
927 @param myroot: ignored, self._eroot is used instead
928 @param mycpv: ignored
930 @return: new counter value
936 counter = self.get_counter_tick_core() - 1
940 # update new global counter file
942 write_atomic(self._counter_path, str(counter))
943 except InvalidLocation:
944 self.settings._init_dirs()
945 write_atomic(self._counter_path, str(counter))
946 self._cached_counter = counter
948 # Since we hold a lock, this is a good opportunity
949 # to flush the cache. Note that this will only
950 # flush the cache periodically in the main process
951 # when _aux_cache_threshold is exceeded.
958 def _dblink(self, cpv):
959 category, pf = catsplit(cpv)
960 return dblink(category, pf, settings=self.settings,
961 vartree=self.vartree, treetype="vartree")
963 def removeFromContents(self, pkg, paths, relative_paths=True):
965 @param pkg: cpv for an installed package
967 @param paths: paths of files to remove from contents
968 @type paths: iterable
970 if not hasattr(pkg, "getcontents"):
971 pkg = self._dblink(pkg)
972 root = self.settings['ROOT']
973 root_len = len(root) - 1
974 new_contents = pkg.getcontents().copy()
977 for filename in paths:
978 filename = _unicode_decode(filename,
979 encoding=_encodings['content'], errors='strict')
980 filename = normalize_path(filename)
982 relative_filename = filename
984 relative_filename = filename[root_len:]
985 contents_key = pkg._match_contents(relative_filename)
987 del new_contents[contents_key]
991 self._bump_mtime(pkg.mycpv)
992 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
993 write_contents(new_contents, root, f)
995 self._bump_mtime(pkg.mycpv)
996 pkg._clear_contents_cache()
998 class _owners_cache(object):
1000 This class maintains an hash table that serves to index package
1001 contents by mapping the basename of file to a list of possible
1002 packages that own it. This is used to optimize owner lookups
1003 by narrowing the search down to a smaller number of packages.
1006 from hashlib import md5 as _new_hash
1008 from md5 import new as _new_hash
1011 _hex_chars = int(_hash_bits / 4)
1013 def __init__(self, vardb):
1017 eroot_len = len(self._vardb._eroot)
1018 contents = self._vardb._dblink(cpv).getcontents()
1019 pkg_hash = self._hash_pkg(cpv)
1021 # Empty path is a code used to represent empty contents.
1022 self._add_path("", pkg_hash)
1025 self._add_path(x[eroot_len:], pkg_hash)
1027 self._vardb._aux_cache["modified"].add(cpv)
1029 def _add_path(self, path, pkg_hash):
1031 Empty path is a code that represents empty contents.
1034 name = os.path.basename(path.rstrip(os.path.sep))
1039 name_hash = self._hash_str(name)
1040 base_names = self._vardb._aux_cache["owners"]["base_names"]
1041 pkgs = base_names.get(name_hash)
1044 base_names[name_hash] = pkgs
1045 pkgs[pkg_hash] = None
1047 def _hash_str(self, s):
1048 h = self._new_hash()
1049 # Always use a constant utf_8 encoding here, since
1050 # the "default" encoding can change.
1051 h.update(_unicode_encode(s,
1052 encoding=_encodings['repo.content'],
1053 errors='backslashreplace'))
1055 h = h[-self._hex_chars:]
1059 def _hash_pkg(self, cpv):
1060 counter, mtime = self._vardb.aux_get(
1061 cpv, ["COUNTER", "_mtime_"])
1063 counter = int(counter)
1066 return (_unicode(cpv), counter, mtime)
1068 class _owners_db(object):
1070 def __init__(self, vardb):
1076 def _populate(self):
1077 owners_cache = vardbapi._owners_cache(self._vardb)
1078 cached_hashes = set()
1079 base_names = self._vardb._aux_cache["owners"]["base_names"]
1081 # Take inventory of all cached package hashes.
1082 for name, hash_values in list(base_names.items()):
1083 if not isinstance(hash_values, dict):
1084 del base_names[name]
1086 cached_hashes.update(hash_values)
1088 # Create sets of valid package hashes and uncached packages.
1089 uncached_pkgs = set()
1090 hash_pkg = owners_cache._hash_pkg
1091 valid_pkg_hashes = set()
1092 for cpv in self._vardb.cpv_all():
1093 hash_value = hash_pkg(cpv)
1094 valid_pkg_hashes.add(hash_value)
1095 if hash_value not in cached_hashes:
1096 uncached_pkgs.add(cpv)
1098 # Cache any missing packages.
1099 for cpv in uncached_pkgs:
1100 owners_cache.add(cpv)
1102 # Delete any stale cache.
1103 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1105 for base_name_hash, bucket in list(base_names.items()):
1106 for hash_value in stale_hashes.intersection(bucket):
1107 del bucket[hash_value]
1109 del base_names[base_name_hash]
1113 def get_owners(self, path_iter):
1115 @return the owners as a dblink -> set(files) mapping.
1118 for owner, f in self.iter_owners(path_iter):
1119 owned_files = owners.get(owner)
1120 if owned_files is None:
1122 owners[owner] = owned_files
1126 def getFileOwnerMap(self, path_iter):
1127 owners = self.get_owners(path_iter)
1129 for pkg_dblink, files in owners.items():
1131 owner_set = file_owners.get(f)
1132 if owner_set is None:
1134 file_owners[f] = owner_set
1135 owner_set.add(pkg_dblink)
1138 def iter_owners(self, path_iter):
1140 Iterate over tuples of (dblink, path). In order to avoid
1141 consuming too many resources for too much time, resources
1142 are only allocated for the duration of a given iter_owners()
1143 call. Therefore, to maximize reuse of resources when searching
1144 for multiple files, it's best to search for them all in a single
1148 if not isinstance(path_iter, list):
1149 path_iter = list(path_iter)
1150 owners_cache = self._populate()
1153 hash_pkg = owners_cache._hash_pkg
1154 hash_str = owners_cache._hash_str
1155 base_names = self._vardb._aux_cache["owners"]["base_names"]
1160 x = dblink_cache.get(cpv)
1162 if len(dblink_cache) > 20:
1163 # Ensure that we don't run out of memory.
1164 raise StopIteration()
1165 x = self._vardb._dblink(cpv)
1166 dblink_cache[cpv] = x
1171 path = path_iter.pop()
1172 is_basename = os.sep != path[:1]
1176 name = os.path.basename(path.rstrip(os.path.sep))
1181 name_hash = hash_str(name)
1182 pkgs = base_names.get(name_hash)
1184 if pkgs is not None:
1186 for hash_value in pkgs:
1187 if not isinstance(hash_value, tuple) or \
1188 len(hash_value) != 3:
1190 cpv, counter, mtime = hash_value
1191 if not isinstance(cpv, basestring):
1194 current_hash = hash_pkg(cpv)
1198 if current_hash != hash_value:
1202 for p in dblink(cpv).getcontents():
1203 if os.path.basename(p) == name:
1204 owners.append((cpv, p[len(root):]))
1206 if dblink(cpv).isowner(path):
1207 owners.append((cpv, path))
1209 except StopIteration:
1210 path_iter.append(path)
1212 dblink_cache.clear()
1214 for x in self._iter_owners_low_mem(path_iter):
1218 for cpv, p in owners:
1219 yield (dblink(cpv), p)
1221 def _iter_owners_low_mem(self, path_list):
1223 This implemention will make a short-lived dblink instance (and
1224 parse CONTENTS) for every single installed package. This is
1225 slower and but uses less memory than the method which uses the
1233 for path in path_list:
1234 is_basename = os.sep != path[:1]
1238 name = os.path.basename(path.rstrip(os.path.sep))
1239 path_info_list.append((path, name, is_basename))
1241 root = self._vardb._eroot
1242 for cpv in self._vardb.cpv_all():
1243 dblnk = self._vardb._dblink(cpv)
1245 for path, name, is_basename in path_info_list:
1247 for p in dblnk.getcontents():
1248 if os.path.basename(p) == name:
1249 yield dblnk, p[len(root):]
1251 if dblnk.isowner(path):
1254 class vartree(object):
1255 "this tree will scan a var/db/pkg database located at root (passed to init)"
1256 def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
1259 if settings is None:
1260 settings = portage.settings
1262 if root is not None and root != settings['ROOT']:
1263 warnings.warn("The 'root' parameter of the "
1264 "portage.dbapi.vartree.vartree"
1265 " constructor is now unused. Use "
1266 "settings['ROOT'] instead.",
1267 DeprecationWarning, stacklevel=2)
1269 if virtual is not DeprecationWarning:
1270 warnings.warn("The 'virtual' parameter of the "
1271 "portage.dbapi.vartree.vartree"
1272 " constructor is unused",
1273 DeprecationWarning, stacklevel=2)
1275 self.settings = settings
1276 self.dbapi = vardbapi(settings=settings, vartree=self)
1281 warnings.warn("The root attribute of "
1282 "portage.dbapi.vartree.vartree"
1283 " is deprecated. Use "
1284 "settings['ROOT'] instead.",
1285 DeprecationWarning, stacklevel=3)
1286 return self.settings['ROOT']
1288 def getpath(self, mykey, filename=None):
1289 return self.dbapi.getpath(mykey, filename=filename)
1291 def zap(self, mycpv):
1294 def inject(self, mycpv):
1297 def get_provide(self, mycpv):
1301 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1303 myuse = myuse.split()
1304 mylines = use_reduce(mylines, uselist=myuse, flat=True)
1305 for myprovide in mylines:
1306 mys = catpkgsplit(myprovide)
1308 mys = myprovide.split("/")
1309 myprovides += [mys[0] + "/" + mys[1]]
1311 except SystemExit as e:
1313 except Exception as e:
1314 mydir = self.dbapi.getpath(mycpv)
1315 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1318 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1320 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1323 def get_all_provides(self):
1325 for node in self.getallcpv():
1326 for mykey in self.get_provide(node):
1327 if mykey in myprovides:
1328 myprovides[mykey] += [node]
1330 myprovides[mykey] = [node]
1333 def dep_bestmatch(self, mydep, use_cache=1):
1334 "compatibility method -- all matches, not just visible ones"
1335 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1336 mymatch = best(self.dbapi.match(
1337 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1338 use_cache=use_cache))
1344 def dep_match(self, mydep, use_cache=1):
1345 "compatibility method -- we want to see all matches, not just visible ones"
1346 #mymatch = match(mydep,self.dbapi)
1347 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1353 def exists_specific(self, cpv):
1354 return self.dbapi.cpv_exists(cpv)
1356 def getallcpv(self):
1357 """temporary function, probably to be renamed --- Gets a list of all
1358 category/package-versions installed on the system."""
1359 return self.dbapi.cpv_all()
1361 def getallnodes(self):
1362 """new behavior: these are all *unmasked* nodes. There may or may not be available
1363 masked package for nodes in this nodes list."""
1364 return self.dbapi.cp_all()
1366 def getebuildpath(self, fullpackage):
1367 cat, package = catsplit(fullpackage)
1368 return self.getpath(fullpackage, filename=package+".ebuild")
1370 def getslot(self, mycatpkg):
1371 "Get a slot for a catpkg; assume it exists."
1373 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1380 class dblink(object):
1382 This class provides an interface to the installed package database
1383 At present this is implemented as a text backend in /var/db/pkg.
1387 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1389 _contents_re = re.compile(r'^(' + \
1390 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1391 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1392 r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
1393 r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
1397 # These files are generated by emerge, so we need to remove
1398 # them when they are the only thing left in a directory.
1399 _infodir_cleanup = frozenset(["dir", "dir.old"])
1401 _ignored_unlink_errnos = (
1402 errno.EBUSY, errno.ENOENT,
1403 errno.ENOTDIR, errno.EISDIR)
1405 _ignored_rmdir_errnos = (
1406 errno.EEXIST, errno.ENOTEMPTY,
1407 errno.EBUSY, errno.ENOENT,
1408 errno.ENOTDIR, errno.EISDIR,
1411 def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
1412 vartree=None, blockers=None, scheduler=None, pipe=None):
1414 Creates a DBlink object for a given CPV.
1415 The given CPV may not be present in the database already.
1417 @param cat: Category
1419 @param pkg: Package (PV)
1421 @param myroot: ignored, settings['ROOT'] is used instead
1422 @type myroot: String (Path)
1423 @param settings: Typically portage.settings
1424 @type settings: portage.config
1425 @param treetype: one of ['porttree','bintree','vartree']
1426 @type treetype: String
1427 @param vartree: an instance of vartree corresponding to myroot.
1428 @type vartree: vartree
1431 if settings is None:
1432 raise TypeError("settings argument is required")
1434 mysettings = settings
1435 self._eroot = mysettings['EROOT']
1438 self.mycpv = self.cat + "/" + self.pkg
1439 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1440 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1441 self.treetype = treetype
1443 vartree = portage.db[self._eroot]["vartree"]
1444 self.vartree = vartree
1445 self._blockers = blockers
1446 self._scheduler = scheduler
1447 self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
1448 self.dbcatdir = self.dbroot+"/"+cat
1449 self.dbpkgdir = self.dbcatdir+"/"+pkg
1450 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1451 self.dbdir = self.dbpkgdir
1452 self.settings = mysettings
1453 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1455 self.myroot = self.settings['ROOT']
1456 self._installed_instance = None
1457 self.contentscache = None
1458 self._contents_inodes = None
1459 self._contents_basenames = None
1460 self._linkmap_broken = False
1461 self._hardlink_merge_map = {}
1462 self._hash_key = (self._eroot, self.mycpv)
1463 self._protect_obj = None
1467 return hash(self._hash_key)
1469 def __eq__(self, other):
1470 return isinstance(other, dblink) and \
1471 self._hash_key == other._hash_key
1473 def _get_protect_obj(self):
1475 if self._protect_obj is None:
1476 self._protect_obj = ConfigProtect(self._eroot,
1477 portage.util.shlex_split(
1478 self.settings.get("CONFIG_PROTECT", "")),
1479 portage.util.shlex_split(
1480 self.settings.get("CONFIG_PROTECT_MASK", "")))
1482 return self._protect_obj
1484 def isprotected(self, obj):
1485 return self._get_protect_obj().isprotected(obj)
1487 def updateprotect(self):
1488 self._get_protect_obj().updateprotect()
1491 self.vartree.dbapi.lock()
1494 self.vartree.dbapi.unlock()
1497 "return path to location of db information (for >>> informational display)"
1501 "does the db entry exist? boolean."
1502 return os.path.exists(self.dbdir)
1506 Remove this entry from the database
1508 if not os.path.exists(self.dbdir):
1511 # Check validity of self.dbdir before attempting to remove it.
1512 if not self.dbdir.startswith(self.dbroot):
1513 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1514 self.dbdir, noiselevel=-1)
1517 shutil.rmtree(self.dbdir)
1518 # If empty, remove parent category directory.
1520 os.rmdir(os.path.dirname(self.dbdir))
1523 self.vartree.dbapi._remove(self)
1525 def clearcontents(self):
1527 For a given db entry (self), erase the CONTENTS values.
1531 if os.path.exists(self.dbdir+"/CONTENTS"):
1532 os.unlink(self.dbdir+"/CONTENTS")
1536 def _clear_contents_cache(self):
1537 self.contentscache = None
1538 self._contents_inodes = None
1539 self._contents_basenames = None
1541 def getcontents(self):
1543 Get the installed files of a given package (aka what that package installed)
1545 contents_file = os.path.join(self.dbdir, "CONTENTS")
1546 if self.contentscache is not None:
1547 return self.contentscache
1550 myc = io.open(_unicode_encode(contents_file,
1551 encoding=_encodings['fs'], errors='strict'),
1552 mode='r', encoding=_encodings['repo.content'],
1554 except EnvironmentError as e:
1555 if e.errno != errno.ENOENT:
1558 self.contentscache = pkgfiles
1560 mylines = myc.readlines()
1563 normalize_needed = self._normalize_needed
1564 contents_re = self._contents_re
1565 obj_index = contents_re.groupindex['obj']
1566 dir_index = contents_re.groupindex['dir']
1567 sym_index = contents_re.groupindex['sym']
1568 # The old symlink format may exist on systems that have packages
1569 # which were installed many years ago (see bug #351814).
1570 oldsym_index = contents_re.groupindex['oldsym']
1571 # CONTENTS files already contain EPREFIX
1572 myroot = self.settings['ROOT']
1573 if myroot == os.path.sep:
1575 # used to generate parent dir entries
1576 dir_entry = (_unicode_decode("dir"),)
1577 eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
1580 for pos, line in enumerate(mylines):
1581 if null_byte in line:
1582 # Null bytes are a common indication of corruption.
1583 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
1585 line = line.rstrip("\n")
1586 m = contents_re.match(line)
1588 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
1591 if m.group(obj_index) is not None:
1593 #format: type, mtime, md5sum
1594 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1595 elif m.group(dir_index) is not None:
1598 data = (m.group(base+1),)
1599 elif m.group(sym_index) is not None:
1601 if m.group(oldsym_index) is None:
1602 mtime = m.group(base+5)
1604 mtime = m.group(base+8)
1605 #format: type, mtime, dest
1606 data = (m.group(base+1), mtime, m.group(base+3))
1608 # This won't happen as long the regular expression
1609 # is written to only match valid entries.
1610 raise AssertionError(_("required group not found " + \
1611 "in CONTENTS entry: '%s'") % line)
1613 path = m.group(base+2)
1614 if normalize_needed.search(path) is not None:
1615 path = normalize_path(path)
1616 if not path.startswith(os.path.sep):
1617 path = os.path.sep + path
1619 if myroot is not None:
1620 path = os.path.join(myroot, path.lstrip(os.path.sep))
1622 # Implicitly add parent directories, since we can't necessarily
1623 # assume that they are explicitly listed in CONTENTS, and it's
1624 # useful for callers if they can rely on parent directory entries
1625 # being generated here (crucial for things like dblink.isowner()).
1626 path_split = path.split(os.sep)
1628 while len(path_split) > eroot_split_len:
1629 parent = os.sep.join(path_split)
1630 if parent in pkgfiles:
1632 pkgfiles[parent] = dir_entry
1635 pkgfiles[path] = data
1638 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
1639 for pos, e in errors:
1640 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
1641 self.contentscache = pkgfiles
1644 def _prune_plib_registry(self, unmerge=False,
1645 needed=None, preserve_paths=None):
1646 # remove preserved libraries that don't have any consumers left
1647 if not (self._linkmap_broken or
1648 self.vartree.dbapi._linkmap is None or
1649 self.vartree.dbapi._plib_registry is None):
1650 self.vartree.dbapi._fs_lock()
1651 plib_registry = self.vartree.dbapi._plib_registry
1652 plib_registry.lock()
1654 plib_registry.load()
1656 unmerge_with_replacement = \
1657 unmerge and preserve_paths is not None
1658 if unmerge_with_replacement:
1659 # If self.mycpv is about to be unmerged and we
1660 # have a replacement package, we want to exclude
1661 # the irrelevant NEEDED data that belongs to
1662 # files which are being unmerged now.
1663 exclude_pkgs = (self.mycpv,)
1667 self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
1668 include_file=needed, preserve_paths=preserve_paths)
1671 unmerge_preserve = None
1672 if not unmerge_with_replacement:
1673 unmerge_preserve = \
1674 self._find_libs_to_preserve(unmerge=True)
1675 counter = self.vartree.dbapi.cpv_counter(self.mycpv)
1676 plib_registry.unregister(self.mycpv,
1677 self.settings["SLOT"], counter)
1678 if unmerge_preserve:
1679 for path in sorted(unmerge_preserve):
1680 contents_key = self._match_contents(path)
1681 if not contents_key:
1683 obj_type = self.getcontents()[contents_key][0]
1684 self._display_merge(_(">>> needed %s %s\n") % \
1685 (obj_type, contents_key), noiselevel=-1)
1686 plib_registry.register(self.mycpv,
1687 self.settings["SLOT"], counter, unmerge_preserve)
1688 # Remove the preserved files from our contents
1689 # so that they won't be unmerged.
1690 self.vartree.dbapi.removeFromContents(self,
1693 unmerge_no_replacement = \
1694 unmerge and not unmerge_with_replacement
1695 cpv_lib_map = self._find_unused_preserved_libs(
1696 unmerge_no_replacement)
1698 self._remove_preserved_libs(cpv_lib_map)
1699 self.vartree.dbapi.lock()
1701 for cpv, removed in cpv_lib_map.items():
1702 if not self.vartree.dbapi.cpv_exists(cpv):
1704 self.vartree.dbapi.removeFromContents(cpv, removed)
1706 self.vartree.dbapi.unlock()
1708 plib_registry.store()
1710 plib_registry.unlock()
1711 self.vartree.dbapi._fs_unlock()
1713 def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
1714 ldpath_mtimes=None, others_in_slot=None, needed=None,
1715 preserve_paths=None):
1718 Unmerges a given package (CPV)
1723 @param pkgfiles: files to unmerge (generally self.getcontents() )
1724 @type pkgfiles: Dictionary
1725 @param trimworld: Unused
1726 @type trimworld: Boolean
1727 @param cleanup: cleanup to pass to doebuild (see doebuild)
1728 @type cleanup: Boolean
1729 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1730 @type ldpath_mtimes: Dictionary
1731 @param others_in_slot: all dblink instances in this slot, excluding self
1732 @type others_in_slot: list
1733 @param needed: Filename containing libraries needed after unmerge.
1734 @type needed: String
1735 @param preserve_paths: Libraries preserved by a package instance that
1736 is currently being merged. They need to be explicitly passed to the
1737 LinkageMap, since they are not registered in the
1738 PreservedLibsRegistry yet.
1739 @type preserve_paths: set
1742 1. os.EX_OK if everything went well.
1743 2. return code of the failed phase (for prerm, postrm, cleanrm)
1746 if trimworld is not None:
1747 warnings.warn("The trimworld parameter of the " + \
1748 "portage.dbapi.vartree.dblink.unmerge()" + \
1749 " method is now unused.",
1750 DeprecationWarning, stacklevel=2)
1753 log_path = self.settings.get("PORTAGE_LOG_FILE")
1754 if self._scheduler is None:
1755 # We create a scheduler instance and use it to
1756 # log unmerge output separately from merge output.
1757 self._scheduler = PollScheduler().sched_iface
1758 if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
1759 if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
1760 self.settings["PORTAGE_BACKGROUND"] = "1"
1761 self.settings.backup_changes("PORTAGE_BACKGROUND")
1763 elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
1764 self.settings["PORTAGE_BACKGROUND"] = "0"
1765 self.settings.backup_changes("PORTAGE_BACKGROUND")
1766 elif self.settings.get("PORTAGE_BACKGROUND") == "1":
1769 self.vartree.dbapi._bump_mtime(self.mycpv)
1770 showMessage = self._display_merge
1771 if self.vartree.dbapi._categories is not None:
1772 self.vartree.dbapi._categories = None
1773 # When others_in_slot is supplied, the security check has already been
1774 # done for this slot, so it shouldn't be repeated until the next
1775 # replacement or unmerge operation.
1776 if others_in_slot is None:
1777 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1778 slot_matches = self.vartree.dbapi.match(
1779 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1781 for cur_cpv in slot_matches:
1782 if cur_cpv == self.mycpv:
1784 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1785 settings=self.settings, vartree=self.vartree,
1786 treetype="vartree", pipe=self._pipe))
1788 retval = self._security_check([self] + others_in_slot)
1792 contents = self.getcontents()
1793 # Now, don't assume that the name of the ebuild is the same as the
1794 # name of the dir; the package may have been moved.
1795 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1797 ebuild_phase = "prerm"
1798 mystuff = os.listdir(self.dbdir)
1800 if x.endswith(".ebuild"):
1801 if x[:-7] != self.pkg:
1802 # Clean up after vardbapi.move_ent() breakage in
1803 # portage versions before 2.1.2
1804 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1805 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1808 if self.mycpv != self.settings.mycpv or \
1809 "EAPI" not in self.settings.configdict["pkg"]:
1810 # We avoid a redundant setcpv call here when
1811 # the caller has already taken care of it.
1812 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1814 eapi_unsupported = False
1816 doebuild_environment(myebuildpath, "prerm",
1817 settings=self.settings, db=self.vartree.dbapi)
1818 except UnsupportedAPIException as e:
1819 eapi_unsupported = e
1821 self._prune_plib_registry(unmerge=True, needed=needed,
1822 preserve_paths=preserve_paths)
1824 builddir_lock = None
1825 scheduler = self._scheduler
1828 # Only create builddir_lock if the caller
1829 # has not already acquired the lock.
1830 if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
1831 builddir_lock = EbuildBuildDir(
1832 scheduler=scheduler,
1833 settings=self.settings)
1834 builddir_lock.lock()
1835 prepare_build_dirs(settings=self.settings, cleanup=True)
1836 log_path = self.settings.get("PORTAGE_LOG_FILE")
1838 # Log the error after PORTAGE_LOG_FILE is initialized
1839 # by prepare_build_dirs above.
1840 if eapi_unsupported:
1841 # Sometimes this happens due to corruption of the EAPI file.
1843 showMessage(_("!!! FAILED prerm: %s\n") % \
1844 os.path.join(self.dbdir, "EAPI"),
1845 level=logging.ERROR, noiselevel=-1)
1846 showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
1847 level=logging.ERROR, noiselevel=-1)
1848 elif os.path.isfile(myebuildpath):
1849 phase = EbuildPhase(background=background,
1850 phase=ebuild_phase, scheduler=scheduler,
1851 settings=self.settings)
1853 retval = phase.wait()
1855 # XXX: Decide how to handle failures here.
1856 if retval != os.EX_OK:
1858 showMessage(_("!!! FAILED prerm: %s\n") % retval,
1859 level=logging.ERROR, noiselevel=-1)
1861 self.vartree.dbapi._fs_lock()
1863 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1865 self.vartree.dbapi._fs_unlock()
1866 self._clear_contents_cache()
1868 if not eapi_unsupported and os.path.isfile(myebuildpath):
1869 ebuild_phase = "postrm"
1870 phase = EbuildPhase(background=background,
1871 phase=ebuild_phase, scheduler=scheduler,
1872 settings=self.settings)
1874 retval = phase.wait()
1876 # XXX: Decide how to handle failures here.
1877 if retval != os.EX_OK:
1879 showMessage(_("!!! FAILED postrm: %s\n") % retval,
1880 level=logging.ERROR, noiselevel=-1)
1883 self.vartree.dbapi._bump_mtime(self.mycpv)
1885 if not eapi_unsupported and os.path.isfile(myebuildpath):
1886 if retval != os.EX_OK:
1888 msg = _("The '%(ebuild_phase)s' "
1889 "phase of the '%(cpv)s' package "
1890 "has failed with exit value %(retval)s.") % \
1891 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
1893 from textwrap import wrap
1894 msg_lines.extend(wrap(msg, 72))
1895 msg_lines.append("")
1897 ebuild_name = os.path.basename(myebuildpath)
1898 ebuild_dir = os.path.dirname(myebuildpath)
1899 msg = _("The problem occurred while executing "
1900 "the ebuild file named '%(ebuild_name)s' "
1901 "located in the '%(ebuild_dir)s' directory. "
1902 "If necessary, manually remove "
1903 "the environment.bz2 file and/or the "
1904 "ebuild file located in that directory.") % \
1905 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
1906 msg_lines.extend(wrap(msg, 72))
1907 msg_lines.append("")
1910 "of the environment.bz2 file is "
1911 "preferred since it may allow the "
1912 "removal phases to execute successfully. "
1913 "The ebuild will be "
1914 "sourced and the eclasses "
1915 "from the current portage tree will be used "
1916 "when necessary. Removal of "
1917 "the ebuild file will cause the "
1918 "pkg_prerm() and pkg_postrm() removal "
1919 "phases to be skipped entirely.")
1920 msg_lines.extend(wrap(msg, 72))
1922 self._eerror(ebuild_phase, msg_lines)
1924 self._elog_process(phasefilter=("prerm", "postrm"))
1926 if retval == os.EX_OK:
1928 doebuild_environment(myebuildpath, "cleanrm",
1929 settings=self.settings, db=self.vartree.dbapi)
1930 except UnsupportedAPIException:
1932 phase = EbuildPhase(background=background,
1933 phase="cleanrm", scheduler=scheduler,
1934 settings=self.settings)
1936 retval = phase.wait()
1938 if builddir_lock is not None:
1939 builddir_lock.unlock()
1941 if log_path is not None:
1943 if not failures and 'unmerge-logs' not in self.settings.features:
1950 st = os.stat(log_path)
1960 if log_path is not None and os.path.exists(log_path):
1961 # Restore this since it gets lost somewhere above and it
1962 # needs to be set for _display_merge() to be able to log.
1963 # Note that the log isn't necessarily supposed to exist
1964 # since if PORT_LOGDIR is unset then it's a temp file
1965 # so it gets cleaned above.
1966 self.settings["PORTAGE_LOG_FILE"] = log_path
1968 self.settings.pop("PORTAGE_LOG_FILE", None)
1970 env_update(target_root=self.settings['ROOT'],
1971 prev_mtimes=ldpath_mtimes,
1972 contents=contents, env=self.settings,
1973 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
1975 unmerge_with_replacement = preserve_paths is not None
1976 if not unmerge_with_replacement:
1977 # When there's a replacement package which calls us via treewalk,
1978 # treewalk will automatically call _prune_plib_registry for us.
1979 # Otherwise, we need to call _prune_plib_registry ourselves.
1980 # Don't pass in the "unmerge=True" flag here, since that flag
1981 # is intended to be used _prior_ to unmerge, not after.
1982 self._prune_plib_registry()
1986 def _display_merge(self, msg, level=0, noiselevel=0):
1987 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1989 if self._scheduler is None:
1990 writemsg_level(msg, level=level, noiselevel=noiselevel)
1993 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
1994 log_path = self.settings.get("PORTAGE_LOG_FILE")
1995 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
1997 if background and log_path is None:
1998 if level >= logging.WARN:
1999 writemsg_level(msg, level=level, noiselevel=noiselevel)
2001 self._scheduler.output(msg,
2002 log_path=log_path, background=background,
2003 level=level, noiselevel=noiselevel)
2005 def _show_unmerge(self, zing, desc, file_type, file_name):
2006 self._display_merge("%s %s %s %s\n" % \
2007 (zing, desc.ljust(8), file_type, file_name))
2009 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
2012 Unmerges the contents of a package from the liveFS
2013 Removes the VDB entry for self
2015 @param pkgfiles: typically self.getcontents()
2016 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
2017 @param others_in_slot: all dblink instances in this slot, excluding self
2018 @type others_in_slot: list
2023 perf_md5 = perform_md5
2024 showMessage = self._display_merge
2025 show_unmerge = self._show_unmerge
2026 ignored_unlink_errnos = self._ignored_unlink_errnos
2027 ignored_rmdir_errnos = self._ignored_rmdir_errnos
2030 showMessage(_("No package files given... Grabbing a set.\n"))
2031 pkgfiles = self.getcontents()
2033 if others_in_slot is None:
2035 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
2036 slot_matches = self.vartree.dbapi.match(
2037 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
2038 for cur_cpv in slot_matches:
2039 if cur_cpv == self.mycpv:
2041 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2042 settings=self.settings,
2043 vartree=self.vartree, treetype="vartree", pipe=self._pipe))
2045 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
2047 protected_symlinks = {}
2049 unmerge_orphans = "unmerge-orphans" in self.settings.features
2050 calc_prelink = "prelink-checksums" in self.settings.features
2053 self.updateprotect()
2054 mykeys = list(pkgfiles)
2058 #process symlinks second-to-last, directories last.
2060 modprotect = os.path.join(self._eroot, "lib/modules/")
2062 def unlink(file_name, lstatobj):
2064 if lstatobj.st_flags != 0:
2065 bsd_chflags.lchflags(file_name, 0)
2066 parent_name = os.path.dirname(file_name)
2067 # Use normal stat/chflags for the parent since we want to
2068 # follow any symlinks to the real parent directory.
2069 pflags = os.stat(parent_name).st_flags
2071 bsd_chflags.chflags(parent_name, 0)
2073 if not stat.S_ISLNK(lstatobj.st_mode):
2074 # Remove permissions to ensure that any hardlinks to
2075 # suid/sgid files are rendered harmless.
2076 os.chmod(file_name, 0)
2077 os.unlink(file_name)
2078 except OSError as ose:
2079 # If the chmod or unlink fails, you are in trouble.
2080 # With Prefix this can be because the file is owned
2081 # by someone else (a screwup by root?), on a normal
2082 # system maybe filesystem corruption. In any case,
2083 # if we backtrace and die here, we leave the system
2084 # in a totally undefined state, hence we just bleed
2085 # like hell and continue to hopefully finish all our
2086 # administrative and pkg_postinst stuff.
2087 self._eerror("postrm",
2088 ["Could not chmod or unlink '%s': %s" % \
2091 if bsd_chflags and pflags != 0:
2092 # Restore the parent flags we saved before unlinking
2093 bsd_chflags.chflags(parent_name, pflags)
2096 unmerge_desc["cfgpro"] = _("cfgpro")
2097 unmerge_desc["replaced"] = _("replaced")
2098 unmerge_desc["!dir"] = _("!dir")
2099 unmerge_desc["!empty"] = _("!empty")
2100 unmerge_desc["!fif"] = _("!fif")
2101 unmerge_desc["!found"] = _("!found")
2102 unmerge_desc["!md5"] = _("!md5")
2103 unmerge_desc["!mtime"] = _("!mtime")
2104 unmerge_desc["!obj"] = _("!obj")
2105 unmerge_desc["!sym"] = _("!sym")
2106 unmerge_desc["!prefix"] = _("!prefix")
2108 real_root = self.settings['ROOT']
2109 real_root_len = len(real_root) - 1
2110 eroot = self.settings["EROOT"]
2112 infodirs = frozenset(infodir for infodir in chain(
2113 self.settings.get("INFOPATH", "").split(":"),
2114 self.settings.get("INFODIR", "").split(":")) if infodir)
2115 infodirs_inodes = set()
2116 for infodir in infodirs:
2117 infodir = os.path.join(real_root, infodir.lstrip(os.sep))
2119 statobj = os.stat(infodir)
2123 infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
2125 for i, objkey in enumerate(mykeys):
2127 obj = normalize_path(objkey)
2130 _unicode_encode(obj,
2131 encoding=_encodings['merge'], errors='strict')
2132 except UnicodeEncodeError:
2133 # The package appears to have been merged with a
2134 # different value of sys.getfilesystemencoding(),
2135 # so fall back to utf_8 if appropriate.
2137 _unicode_encode(obj,
2138 encoding=_encodings['fs'], errors='strict')
2139 except UnicodeEncodeError:
2143 perf_md5 = portage.checksum.perform_md5
2145 file_data = pkgfiles[objkey]
2146 file_type = file_data[0]
2148 # don't try to unmerge the prefix offset itself
2149 if len(obj) <= len(eroot) or not obj.startswith(eroot):
2150 show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
2155 statobj = os.stat(obj)
2160 lstatobj = os.lstat(obj)
2161 except (OSError, AttributeError):
2163 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2164 if lstatobj is None:
2165 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
2167 # don't use EROOT, CONTENTS entries already contain EPREFIX
2168 if obj.startswith(real_root):
2169 relative_path = obj[real_root_len:]
2171 for dblnk in others_in_slot:
2172 if dblnk.isowner(relative_path):
2176 if file_type == "sym" and is_owned and \
2177 (islink and statobj and stat.S_ISDIR(statobj.st_mode)):
2178 # A new instance of this package claims the file, so
2179 # don't unmerge it. If the file is symlink to a
2180 # directory and the unmerging package installed it as
2181 # a symlink, but the new owner has it listed as a
2182 # directory, then we'll produce a warning since the
2183 # symlink is a sort of orphan in this case (see
2185 symlink_orphan = False
2186 for dblnk in others_in_slot:
2187 parent_contents_key = \
2188 dblnk._match_contents(relative_path)
2189 if not parent_contents_key:
2191 if not parent_contents_key.startswith(
2194 if dblnk.getcontents()[
2195 parent_contents_key][0] == "dir":
2196 symlink_orphan = True
2200 protected_symlinks.setdefault(
2201 (statobj.st_dev, statobj.st_ino),
2202 []).append(relative_path)
2205 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
2207 elif relative_path in cfgfiledict:
2208 stale_confmem.append(relative_path)
2209 # next line includes a tweak to protect modules from being unmerged,
2210 # but we don't protect modules from being overwritten if they are
2211 # upgraded. We effectively only want one half of the config protection
2212 # functionality for /lib/modules. For portage-ng both capabilities
2213 # should be able to be independently specified.
2214 # TODO: For rebuilds, re-parent previous modules to the new
2215 # installed instance (so they are not orphans). For normal
2216 # uninstall (not rebuild/reinstall), remove the modules along
2217 # with all other files (leave no orphans).
2218 if obj.startswith(modprotect):
2219 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
2222 # Don't unlink symlinks to directories here since that can
2223 # remove /lib and /usr/lib symlinks.
2224 if unmerge_orphans and \
2225 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2226 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2227 not self.isprotected(obj):
2229 unlink(obj, lstatobj)
2230 except EnvironmentError as e:
2231 if e.errno not in ignored_unlink_errnos:
2234 show_unmerge("<<<", "", file_type, obj)
2237 lmtime = str(lstatobj[stat.ST_MTIME])
2238 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2239 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
2242 if pkgfiles[objkey][0] == "dir":
2243 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
2244 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
2246 mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
2247 elif pkgfiles[objkey][0] == "sym":
2249 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
2252 # If this symlink points to a directory then we don't want
2253 # to unmerge it if there are any other packages that
2254 # installed files into the directory via this symlink
2255 # (see bug #326685).
2256 # TODO: Resolving a symlink to a directory will require
2257 # simulation if $ROOT != / and the link is not relative.
2258 if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
2259 and obj.startswith(real_root):
2261 relative_path = obj[real_root_len:]
2263 target_dir_contents = os.listdir(obj)
2267 if target_dir_contents:
2268 # If all the children are regular files owned
2269 # by this package, then the symlink should be
2272 for child in target_dir_contents:
2273 child = os.path.join(relative_path, child)
2274 if not self.isowner(child):
2278 child_lstat = os.lstat(os.path.join(
2279 real_root, child.lstrip(os.sep)))
2283 if not stat.S_ISREG(child_lstat.st_mode):
2284 # Nested symlinks or directories make
2285 # the issue very complex, so just
2286 # preserve the symlink in order to be
2292 protected_symlinks.setdefault(
2293 (statobj.st_dev, statobj.st_ino),
2294 []).append(relative_path)
2295 show_unmerge("---", unmerge_desc["!empty"],
2299 # Go ahead and unlink symlinks to directories here when
2300 # they're actually recorded as symlinks in the contents.
2301 # Normally, symlinks such as /lib -> lib64 are not recorded
2302 # as symlinks in the contents of a package. If a package
2303 # installs something into ${D}/lib/, it is recorded in the
2304 # contents as a directory even if it happens to correspond
2305 # to a symlink when it's merged to the live filesystem.
2307 unlink(obj, lstatobj)
2308 show_unmerge("<<<", "", file_type, obj)
2309 except (OSError, IOError) as e:
2310 if e.errno not in ignored_unlink_errnos:
2313 show_unmerge("!!!", "", file_type, obj)
2314 elif pkgfiles[objkey][0] == "obj":
2315 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2316 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2320 mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
2321 except FileNotFound as e:
2322 # the file has disappeared between now and our stat call
2323 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2326 # string.lower is needed because db entries used to be in upper-case. The
2327 # string.lower allows for backwards compatibility.
2328 if mymd5 != pkgfiles[objkey][2].lower():
2329 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
2332 unlink(obj, lstatobj)
2333 except (OSError, IOError) as e:
2334 if e.errno not in ignored_unlink_errnos:
2337 show_unmerge("<<<", "", file_type, obj)
2338 elif pkgfiles[objkey][0] == "fif":
2339 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2340 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
2342 show_unmerge("---", "", file_type, obj)
2343 elif pkgfiles[objkey][0] == "dev":
2344 show_unmerge("---", "", file_type, obj)
2346 self._unmerge_dirs(mydirs, infodirs_inodes,
2347 protected_symlinks, unmerge_desc, unlink, os)
2350 if protected_symlinks:
2351 self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
2352 protected_symlinks, unmerge_desc, unlink, os)
2354 if protected_symlinks:
2355 msg = "One or more symlinks to directories have been " + \
2356 "preserved in order to ensure that files installed " + \
2357 "via these symlinks remain accessible:"
2358 lines = textwrap.wrap(msg, 72)
2361 flat_list.update(*protected_symlinks.values())
2362 flat_list = sorted(flat_list)
2364 lines.append("\t%s" % (os.path.join(real_root,
2367 self._elog("eerror", "postrm", lines)
2369 # Remove stale entries from config memory.
2371 for filename in stale_confmem:
2372 del cfgfiledict[filename]
2373 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
2375 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2376 self.vartree.zap(self.mycpv)
2378 def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
2379 protected_symlinks, unmerge_desc, unlink, os):
2381 real_root = self.settings['ROOT']
2382 show_unmerge = self._show_unmerge
2383 ignored_unlink_errnos = self._ignored_unlink_errnos
2386 flat_list.update(*protected_symlinks.values())
2387 flat_list = sorted(flat_list)
2390 for dblnk in others_in_slot:
2391 if dblnk.isowner(f):
2392 # If another package in the same slot installed
2393 # a file via a protected symlink, return early
2394 # and don't bother searching for any other owners.
2399 msg.append(_("Directory symlink(s) may need protection:"))
2403 msg.append("\t%s" % \
2404 os.path.join(real_root, f.lstrip(os.path.sep)))
2407 msg.append(_("Searching all installed"
2408 " packages for files installed via above symlink(s)..."))
2410 self._elog("elog", "postrm", msg)
2414 owners = self.vartree.dbapi._owners.get_owners(flat_list)
2415 self.vartree.dbapi.flush_cache()
2419 for owner in list(owners):
2420 if owner.mycpv == self.mycpv:
2421 owners.pop(owner, None)
2425 msg.append(_("The above directory symlink(s) are all "
2426 "safe to remove. Removing them now..."))
2428 self._elog("elog", "postrm", msg)
2430 for unmerge_syms in protected_symlinks.values():
2431 for relative_path in unmerge_syms:
2432 obj = os.path.join(real_root,
2433 relative_path.lstrip(os.sep))
2434 parent = os.path.dirname(obj)
2435 while len(parent) > len(self._eroot):
2437 lstatobj = os.lstat(parent)
2442 (lstatobj.st_dev, lstatobj.st_ino)))
2443 parent = os.path.dirname(parent)
2445 unlink(obj, os.lstat(obj))
2446 show_unmerge("<<<", "", "sym", obj)
2447 except (OSError, IOError) as e:
2448 if e.errno not in ignored_unlink_errnos:
2451 show_unmerge("!!!", "", "sym", obj)
2453 protected_symlinks.clear()
2454 self._unmerge_dirs(dirs, infodirs_inodes,
2455 protected_symlinks, unmerge_desc, unlink, os)
2458 def _unmerge_dirs(self, dirs, infodirs_inodes,
2459 protected_symlinks, unmerge_desc, unlink, os):
2461 show_unmerge = self._show_unmerge
2462 infodir_cleanup = self._infodir_cleanup
2463 ignored_unlink_errnos = self._ignored_unlink_errnos
2464 ignored_rmdir_errnos = self._ignored_rmdir_errnos
2465 real_root = self.settings['ROOT']
2470 for obj, inode_key in dirs:
2471 # Treat any directory named "info" as a candidate here,
2472 # since it might have been in INFOPATH previously even
2473 # though it may not be there now.
2474 if inode_key in infodirs_inodes or \
2475 os.path.basename(obj) == "info":
2477 remaining = os.listdir(obj)
2481 cleanup_info_dir = ()
2483 len(remaining) <= len(infodir_cleanup):
2484 if not set(remaining).difference(infodir_cleanup):
2485 cleanup_info_dir = remaining
2487 for child in cleanup_info_dir:
2488 child = os.path.join(obj, child)
2490 lstatobj = os.lstat(child)
2491 if stat.S_ISREG(lstatobj.st_mode):
2492 unlink(child, lstatobj)
2493 show_unmerge("<<<", "", "obj", child)
2494 except EnvironmentError as e:
2495 if e.errno not in ignored_unlink_errnos:
2498 show_unmerge("!!!", "", "obj", child)
2501 lstatobj = os.lstat(obj)
2502 if lstatobj.st_flags != 0:
2503 bsd_chflags.lchflags(obj, 0)
2504 parent_name = os.path.dirname(obj)
2505 # Use normal stat/chflags for the parent since we want to
2506 # follow any symlinks to the real parent directory.
2507 pflags = os.stat(parent_name).st_flags
2509 bsd_chflags.chflags(parent_name, 0)
2513 if bsd_chflags and pflags != 0:
2514 # Restore the parent flags we saved before unlinking
2515 bsd_chflags.chflags(parent_name, pflags)
2516 show_unmerge("<<<", "", "dir", obj)
2517 except EnvironmentError as e:
2518 if e.errno not in ignored_rmdir_errnos:
2520 if e.errno != errno.ENOENT:
2521 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
2524 # When a directory is successfully removed, there's
2525 # no need to protect symlinks that point to it.
2526 unmerge_syms = protected_symlinks.pop(inode_key, None)
2527 if unmerge_syms is not None:
2528 for relative_path in unmerge_syms:
2529 obj = os.path.join(real_root,
2530 relative_path.lstrip(os.sep))
2532 unlink(obj, os.lstat(obj))
2533 show_unmerge("<<<", "", "sym", obj)
2534 except (OSError, IOError) as e:
2535 if e.errno not in ignored_unlink_errnos:
2538 show_unmerge("!!!", "", "sym", obj)
2540 def isowner(self, filename, destroot=None):
2542 Check if a file belongs to this package. This may
2543 result in a stat call for the parent directory of
2544 every installed file, since the inode numbers are
2545 used to work around the problem of ambiguous paths
2546 caused by symlinked directories. The results of
2547 stat calls are cached to optimize multiple calls
2556 1. True if this package owns the file.
2557 2. False if this package does not own the file.
2560 if destroot is not None and destroot != self._eroot:
2561 warnings.warn("The second parameter of the " + \
2562 "portage.dbapi.vartree.dblink.isowner()" + \
2563 " is now unused. Instead " + \
2564 "self.settings['EROOT'] will be used.",
2565 DeprecationWarning, stacklevel=2)
2567 return bool(self._match_contents(filename))
2569 def _match_contents(self, filename, destroot=None):
2571 The matching contents entry is returned, which is useful
2572 since the path may differ from the one given by the caller,
2576 @return: the contents entry corresponding to the given path, or False
2577 if the file is not owned by this package.
2580 filename = _unicode_decode(filename,
2581 encoding=_encodings['content'], errors='strict')
2583 if destroot is not None and destroot != self._eroot:
2584 warnings.warn("The second parameter of the " + \
2585 "portage.dbapi.vartree.dblink._match_contents()" + \
2586 " is now unused. Instead " + \
2587 "self.settings['ROOT'] will be used.",
2588 DeprecationWarning, stacklevel=2)
2590 # don't use EROOT here, image already contains EPREFIX
2591 destroot = self.settings['ROOT']
2593 # The given filename argument might have a different encoding than the
2594 # the filenames contained in the contents, so use separate wrapped os
2595 # modules for each. The basename is more likely to contain non-ascii
2596 # characters than the directory path, so use os_filename_arg for all
2597 # operations involving the basename of the filename arg.
2598 os_filename_arg = _os_merge
2602 _unicode_encode(filename,
2603 encoding=_encodings['merge'], errors='strict')
2604 except UnicodeEncodeError:
2605 # The package appears to have been merged with a
2606 # different value of sys.getfilesystemencoding(),
2607 # so fall back to utf_8 if appropriate.
2609 _unicode_encode(filename,
2610 encoding=_encodings['fs'], errors='strict')
2611 except UnicodeEncodeError:
2614 os_filename_arg = portage.os
2616 destfile = normalize_path(
2617 os_filename_arg.path.join(destroot,
2618 filename.lstrip(os_filename_arg.path.sep)))
2620 pkgfiles = self.getcontents()
2621 if pkgfiles and destfile in pkgfiles:
2624 basename = os_filename_arg.path.basename(destfile)
2625 if self._contents_basenames is None:
2630 encoding=_encodings['merge'],
2632 except UnicodeEncodeError:
2633 # The package appears to have been merged with a
2634 # different value of sys.getfilesystemencoding(),
2635 # so fall back to utf_8 if appropriate.
2639 encoding=_encodings['fs'],
2641 except UnicodeEncodeError:
2646 self._contents_basenames = set(
2647 os.path.basename(x) for x in pkgfiles)
2648 if basename not in self._contents_basenames:
2649 # This is a shortcut that, in most cases, allows us to
2650 # eliminate this package as an owner without the need
2651 # to examine inode numbers of parent directories.
2654 # Use stat rather than lstat since we want to follow
2655 # any symlinks to the real parent directory.
2656 parent_path = os_filename_arg.path.dirname(destfile)
2658 parent_stat = os_filename_arg.stat(parent_path)
2659 except EnvironmentError as e:
2660 if e.errno != errno.ENOENT:
2664 if self._contents_inodes is None:
2670 encoding=_encodings['merge'],
2672 except UnicodeEncodeError:
2673 # The package appears to have been merged with a
2674 # different value of sys.getfilesystemencoding(),
2675 # so fall back to utf_8 if appropriate.
2679 encoding=_encodings['fs'],
2681 except UnicodeEncodeError:
2686 self._contents_inodes = {}
2687 parent_paths = set()
2689 p_path = os.path.dirname(x)
2690 if p_path in parent_paths:
2692 parent_paths.add(p_path)
2698 inode_key = (s.st_dev, s.st_ino)
2699 # Use lists of paths in case multiple
2700 # paths reference the same inode.
2701 p_path_list = self._contents_inodes.get(inode_key)
2702 if p_path_list is None:
2704 self._contents_inodes[inode_key] = p_path_list
2705 if p_path not in p_path_list:
2706 p_path_list.append(p_path)
2708 p_path_list = self._contents_inodes.get(
2709 (parent_stat.st_dev, parent_stat.st_ino))
2711 for p_path in p_path_list:
2712 x = os_filename_arg.path.join(p_path, basename)
2718 def _linkmap_rebuild(self, **kwargs):
2720 Rebuild the self._linkmap if it's not broken due to missing
2721 scanelf binary. Also, return early if preserve-libs is disabled
2722 and the preserve-libs registry is empty.
2724 if self._linkmap_broken or \
2725 self.vartree.dbapi._linkmap is None or \
2726 self.vartree.dbapi._plib_registry is None or \
2727 ("preserve-libs" not in self.settings.features and \
2728 not self.vartree.dbapi._plib_registry.hasEntries()):
2731 self.vartree.dbapi._linkmap.rebuild(**kwargs)
2732 except CommandNotFound as e:
2733 self._linkmap_broken = True
2734 self._display_merge(_("!!! Disabling preserve-libs " \
2735 "due to error: Command Not Found: %s\n") % (e,),
2736 level=logging.ERROR, noiselevel=-1)
2738 def _find_libs_to_preserve(self, unmerge=False):
2740 Get set of relative paths for libraries to be preserved. When
2741 unmerge is False, file paths to preserve are selected from
2742 self._installed_instance. Otherwise, paths are selected from
2745 if self._linkmap_broken or \
2746 self.vartree.dbapi._linkmap is None or \
2747 self.vartree.dbapi._plib_registry is None or \
2748 (not unmerge and self._installed_instance is None) or \
2749 "preserve-libs" not in self.settings.features:
2753 linkmap = self.vartree.dbapi._linkmap
2755 installed_instance = self
2757 installed_instance = self._installed_instance
2758 old_contents = installed_instance.getcontents()
2759 root = self.settings['ROOT']
2760 root_len = len(root) - 1
2761 lib_graph = digraph()
2764 def path_to_node(path):
2765 node = path_node_map.get(path)
2767 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2768 alt_path_node = lib_graph.get(node)
2769 if alt_path_node is not None:
2770 node = alt_path_node
2771 node.alt_paths.add(path)
2772 path_node_map[path] = node
2776 provider_nodes = set()
2777 # Create provider nodes and add them to the graph.
2778 for f_abs in old_contents:
2782 _unicode_encode(f_abs,
2783 encoding=_encodings['merge'], errors='strict')
2784 except UnicodeEncodeError:
2785 # The package appears to have been merged with a
2786 # different value of sys.getfilesystemencoding(),
2787 # so fall back to utf_8 if appropriate.
2789 _unicode_encode(f_abs,
2790 encoding=_encodings['fs'], errors='strict')
2791 except UnicodeEncodeError:
2796 f = f_abs[root_len:]
2797 if not unmerge and self.isowner(f):
2798 # We have an indentically named replacement file,
2799 # so we don't try to preserve the old copy.
2802 consumers = linkmap.findConsumers(f,
2803 exclude_providers=(installed_instance.isowner,))
2808 provider_node = path_to_node(f)
2809 lib_graph.add(provider_node, None)
2810 provider_nodes.add(provider_node)
2811 consumer_map[provider_node] = consumers
2813 # Create consumer nodes and add them to the graph.
2814 # Note that consumers can also be providers.
2815 for provider_node, consumers in consumer_map.items():
2817 consumer_node = path_to_node(c)
2818 if installed_instance.isowner(c) and \
2819 consumer_node not in provider_nodes:
2820 # This is not a provider, so it will be uninstalled.
2822 lib_graph.add(provider_node, consumer_node)
2824 # Locate nodes which should be preserved. They consist of all
2825 # providers that are reachable from consumers that are not
2826 # providers themselves.
2827 preserve_nodes = set()
2828 for consumer_node in lib_graph.root_nodes():
2829 if consumer_node in provider_nodes:
2831 # Preserve all providers that are reachable from this consumer.
2832 node_stack = lib_graph.child_nodes(consumer_node)
2834 provider_node = node_stack.pop()
2835 if provider_node in preserve_nodes:
2837 preserve_nodes.add(provider_node)
2838 node_stack.extend(lib_graph.child_nodes(provider_node))
2840 preserve_paths = set()
2841 for preserve_node in preserve_nodes:
2842 # Preserve the library itself, and also preserve the
2843 # soname symlink which is the only symlink that is
2844 # strictly required.
2846 soname_symlinks = set()
2847 soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
2848 for f in preserve_node.alt_paths:
2849 f_abs = os.path.join(root, f.lstrip(os.sep))
2851 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2853 elif os.path.basename(f) == soname:
2854 soname_symlinks.add(f)
2859 preserve_paths.update(hardlinks)
2860 preserve_paths.update(soname_symlinks)
2862 return preserve_paths
2864 def _add_preserve_libs_to_contents(self, preserve_paths):
2866 Preserve libs returned from _find_libs_to_preserve().
2869 if not preserve_paths:
2873 showMessage = self._display_merge
2874 root = self.settings['ROOT']
2876 # Copy contents entries from the old package to the new one.
2877 new_contents = self.getcontents().copy()
2878 old_contents = self._installed_instance.getcontents()
2879 for f in sorted(preserve_paths):
2880 f = _unicode_decode(f,
2881 encoding=_encodings['content'], errors='strict')
2882 f_abs = os.path.join(root, f.lstrip(os.sep))
2883 contents_entry = old_contents.get(f_abs)
2884 if contents_entry is None:
2885 # This will probably never happen, but it might if one of the
2886 # paths returned from findConsumers() refers to one of the libs
2887 # that should be preserved yet the path is not listed in the
2888 # contents. Such a path might belong to some other package, so
2889 # it shouldn't be preserved here.
2890 showMessage(_("!!! File '%s' will not be preserved "
2891 "due to missing contents entry\n") % (f_abs,),
2892 level=logging.ERROR, noiselevel=-1)
2893 preserve_paths.remove(f)
2895 new_contents[f_abs] = contents_entry
2896 obj_type = contents_entry[0]
2897 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2899 # Add parent directories to contents if necessary.
2900 parent_dir = os.path.dirname(f_abs)
2901 while len(parent_dir) > len(root):
2902 new_contents[parent_dir] = ["dir"]
2904 parent_dir = os.path.dirname(parent_dir)
2905 if prev == parent_dir:
2907 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
2908 write_contents(new_contents, root, outfile)
2910 self._clear_contents_cache()
2912 def _find_unused_preserved_libs(self, unmerge_no_replacement):
2914 Find preserved libraries that don't have any consumers left.
2917 if self._linkmap_broken or \
2918 self.vartree.dbapi._linkmap is None or \
2919 self.vartree.dbapi._plib_registry is None or \
2920 not self.vartree.dbapi._plib_registry.hasEntries():
2923 # Since preserved libraries can be consumers of other preserved
2924 # libraries, use a graph to track consumer relationships.
2925 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2926 linkmap = self.vartree.dbapi._linkmap
2927 lib_graph = digraph()
2928 preserved_nodes = set()
2929 preserved_paths = set()
2932 root = self.settings['ROOT']
2934 def path_to_node(path):
2935 node = path_node_map.get(path)
2937 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2938 alt_path_node = lib_graph.get(node)
2939 if alt_path_node is not None:
2940 node = alt_path_node
2941 node.alt_paths.add(path)
2942 path_node_map[path] = node
2945 for cpv, plibs in plib_dict.items():
2947 path_cpv_map[f] = cpv
2948 preserved_node = path_to_node(f)
2949 if not preserved_node.file_exists():
2951 lib_graph.add(preserved_node, None)
2952 preserved_paths.add(f)
2953 preserved_nodes.add(preserved_node)
2954 for c in self.vartree.dbapi._linkmap.findConsumers(f):
2955 consumer_node = path_to_node(c)
2956 if not consumer_node.file_exists():
2958 # Note that consumers may also be providers.
2959 lib_graph.add(preserved_node, consumer_node)
2961 # Eliminate consumers having providers with the same soname as an
2962 # installed library that is not preserved. This eliminates
2963 # libraries that are erroneously preserved due to a move from one
2964 # directory to another.
2965 # Also eliminate consumers that are going to be unmerged if
2966 # unmerge_no_replacement is True.
2968 for preserved_node in preserved_nodes:
2969 soname = linkmap.getSoname(preserved_node)
2970 for consumer_node in lib_graph.parent_nodes(preserved_node):
2971 if consumer_node in preserved_nodes:
2973 if unmerge_no_replacement:
2974 will_be_unmerged = True
2975 for path in consumer_node.alt_paths:
2976 if not self.isowner(path):
2977 will_be_unmerged = False
2979 if will_be_unmerged:
2980 # This consumer is not preserved and it is
2981 # being unmerged, so drop this edge.
2982 lib_graph.remove_edge(preserved_node, consumer_node)
2985 providers = provider_cache.get(consumer_node)
2986 if providers is None:
2987 providers = linkmap.findProviders(consumer_node)
2988 provider_cache[consumer_node] = providers
2989 providers = providers.get(soname)
2990 if providers is None:
2992 for provider in providers:
2993 if provider in preserved_paths:
2995 provider_node = path_to_node(provider)
2996 if not provider_node.file_exists():
2998 if provider_node in preserved_nodes:
3000 # An alternative provider seems to be
3001 # installed, so drop this edge.
3002 lib_graph.remove_edge(preserved_node, consumer_node)
3007 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
3010 lib_graph.difference_update(root_nodes)
3012 for node in root_nodes:
3013 unlink_list.update(node.alt_paths)
3014 unlink_list = sorted(unlink_list)
3015 for obj in unlink_list:
3016 cpv = path_cpv_map.get(obj)
3018 # This means that a symlink is in the preserved libs
3019 # registry, but the actual lib it points to is not.
3020 self._display_merge(_("!!! symlink to lib is preserved, "
3021 "but not the lib itself:\n!!! '%s'\n") % (obj,),
3022 level=logging.ERROR, noiselevel=-1)
3024 removed = cpv_lib_map.get(cpv)
3027 cpv_lib_map[cpv] = removed
3032 def _remove_preserved_libs(self, cpv_lib_map):
3034 Remove files returned from _find_unused_preserved_libs().
3039 files_to_remove = set()
3040 for files in cpv_lib_map.values():
3041 files_to_remove.update(files)
3042 files_to_remove = sorted(files_to_remove)
3043 showMessage = self._display_merge
3044 root = self.settings['ROOT']
3047 for obj in files_to_remove:
3048 obj = os.path.join(root, obj.lstrip(os.sep))
3049 parent_dirs.add(os.path.dirname(obj))
3050 if os.path.islink(obj):
3056 except OSError as e:
3057 if e.errno != errno.ENOENT:
3061 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
3064 # Remove empty parent directories if possible.
3066 x = parent_dirs.pop()
3073 x = os.path.dirname(x)
3077 self.vartree.dbapi._plib_registry.pruneNonExisting()
3079 def _collision_protect(self, srcroot, destroot, mypkglist,
3080 file_list, symlink_list):
3084 collision_ignore = []
3085 for x in portage.util.shlex_split(
3086 self.settings.get("COLLISION_IGNORE", "")):
3087 if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
3088 x = normalize_path(x)
3090 collision_ignore.append(x)
3092 # For collisions with preserved libraries, the current package
3093 # will assume ownership and the libraries will be unregistered.
3094 if self.vartree.dbapi._plib_registry is None:
3095 # preserve-libs is entirely disabled
3100 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
3103 for cpv, paths in plib_dict.items():
3104 plib_paths.update(paths)
3106 plib_cpv_map[f] = cpv
3107 plib_inodes = self._lstat_inode_map(plib_paths)
3109 plib_collisions = {}
3111 showMessage = self._display_merge
3114 symlink_collisions = []
3115 destroot = self.settings['ROOT']
3116 showMessage(_(" %s checking %d files for package collisions\n") % \
3117 (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
3118 for i, (f, f_type) in enumerate(chain(
3119 ((f, "reg") for f in file_list),
3120 ((f, "sym") for f in symlink_list))):
3121 if i % 1000 == 0 and i != 0:
3122 showMessage(_("%d files checked ...\n") % i)
3124 dest_path = normalize_path(
3125 os.path.join(destroot, f.lstrip(os.path.sep)))
3127 dest_lstat = os.lstat(dest_path)
3128 except EnvironmentError as e:
3129 if e.errno == errno.ENOENT:
3132 elif e.errno == errno.ENOTDIR:
3134 # A non-directory is in a location where this package
3135 # expects to have a directory.
3137 parent_path = dest_path
3138 while len(parent_path) > len(destroot):
3139 parent_path = os.path.dirname(parent_path)
3141 dest_lstat = os.lstat(parent_path)
3143 except EnvironmentError as e:
3144 if e.errno != errno.ENOTDIR:
3148 raise AssertionError(
3149 "unable to find non-directory " + \
3150 "parent for '%s'" % dest_path)
3151 dest_path = parent_path
3152 f = os.path.sep + dest_path[len(destroot):]
3160 if stat.S_ISDIR(dest_lstat.st_mode):
3162 # This case is explicitly banned
3163 # by PMS (see bug #326685).
3164 symlink_collisions.append(f)
3165 collisions.append(f)
3168 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
3171 cpv = plib_cpv_map[path]
3172 paths = plib_collisions.get(cpv)
3175 plib_collisions[cpv] = paths
3177 # The current package will assume ownership and the
3178 # libraries will be unregistered, so exclude this
3179 # path from the normal collisions.
3183 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
3184 for ver in mypkglist:
3188 if not isowned and self.isprotected(full_path):
3191 f_match = full_path[len(self._eroot)-1:]
3193 for pattern in collision_ignore:
3194 if fnmatch.fnmatch(f_match, pattern):
3198 collisions.append(f)
3199 return collisions, symlink_collisions, plib_collisions
3201 def _lstat_inode_map(self, path_iter):
3203 Use lstat to create a map of the form:
3204 {(st_dev, st_ino) : set([path1, path2, ...])}
3205 Multiple paths may reference the same inode due to hardlinks.
3206 All lstat() calls are relative to self.myroot.
3211 root = self.settings['ROOT']
3214 path = os.path.join(root, f.lstrip(os.sep))
3217 except OSError as e:
3218 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3222 key = (st.st_dev, st.st_ino)
3223 paths = inode_map.get(key)
3226 inode_map[key] = paths
3230 def _security_check(self, installed_instances):
3231 if not installed_instances:
3236 showMessage = self._display_merge
3239 for dblnk in installed_instances:
3240 file_paths.update(dblnk.getcontents())
3243 for i, path in enumerate(file_paths):
3247 _unicode_encode(path,
3248 encoding=_encodings['merge'], errors='strict')
3249 except UnicodeEncodeError:
3250 # The package appears to have been merged with a
3251 # different value of sys.getfilesystemencoding(),
3252 # so fall back to utf_8 if appropriate.
3254 _unicode_encode(path,
3255 encoding=_encodings['fs'], errors='strict')
3256 except UnicodeEncodeError:
3263 except OSError as e:
3264 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3268 if not stat.S_ISREG(s.st_mode):
3270 path = os.path.realpath(path)
3271 if path in real_paths:
3273 real_paths.add(path)
3274 if s.st_nlink > 1 and \
3275 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
3276 k = (s.st_dev, s.st_ino)
3277 inode_map.setdefault(k, []).append((path, s))
3278 suspicious_hardlinks = []
3279 for path_list in inode_map.values():
3280 path, s = path_list[0]
3281 if len(path_list) == s.st_nlink:
3282 # All hardlinks seem to be owned by this package.
3284 suspicious_hardlinks.append(path_list)
3285 if not suspicious_hardlinks:
3289 msg.append(_("suid/sgid file(s) "
3290 "with suspicious hardlink(s):"))
3292 for path_list in suspicious_hardlinks:
3293 for path, s in path_list:
3294 msg.append("\t%s" % path)
3296 msg.append(_("See the Gentoo Security Handbook "
3297 "guide for advice on how to proceed."))
3299 self._eerror("preinst", msg)
3303 def _eqawarn(self, phase, lines):
3304 self._elog("eqawarn", phase, lines)
3306 def _eerror(self, phase, lines):
3307 self._elog("eerror", phase, lines)
3309 def _elog(self, funcname, phase, lines):
3310 func = getattr(portage.elog.messages, funcname)
3311 if self._scheduler is None:
3313 func(l, phase=phase, key=self.mycpv)
3315 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
3317 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
3318 log_path = self.settings.get("PORTAGE_LOG_FILE")
3321 func(line, phase=phase, key=self.mycpv, out=out)
3322 msg = out.getvalue()
3323 self._scheduler.output(msg,
3324 background=background, log_path=log_path)
3326 def _elog_process(self, phasefilter=None):
3328 if self._pipe is None:
3329 elog_process(cpv, self.settings, phasefilter=phasefilter)
3331 logdir = os.path.join(self.settings["T"], "logging")
3332 ebuild_logentries = collect_ebuild_messages(logdir)
3333 py_logentries = collect_messages(key=cpv).get(cpv, {})
3334 logentries = _merge_logentries(py_logentries, ebuild_logentries)
3343 for phase, messages in logentries.items():
3344 for key, lines in messages:
3345 funcname = funcnames[key]
3346 if isinstance(lines, basestring):
3349 for line in line.split('\n'):
3350 fields = (funcname, phase, cpv, line)
3351 str_buffer.append(' '.join(fields))
3352 str_buffer.append('\n')
3354 os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
3356 def _emerge_log(self, msg):
3357 emergelog(False, msg)
3359 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
3360 mydbapi=None, prev_mtimes=None, counter=None):
3363 This function does the following:
3365 calls self._preserve_libs if FEATURES=preserve-libs
3366 calls self._collision_protect if FEATURES=collision-protect
3367 calls doebuild(mydo=pkg_preinst)
3368 Merges the package to the livefs
3369 unmerges old version (if required)
3370 calls doebuild(mydo=pkg_postinst)
3373 @param srcroot: Typically this is ${D}
3374 @type srcroot: String (Path)
3375 @param destroot: ignored, self.settings['ROOT'] is used instead
3376 @type destroot: String (Path)
3377 @param inforoot: root of the vardb entry ?
3378 @type inforoot: String (Path)
3379 @param myebuild: path to the ebuild that we are processing
3380 @type myebuild: String (Path)
3381 @param mydbapi: dbapi which is handed to doebuild.
3382 @type mydbapi: portdbapi instance
3383 @param prev_mtimes: { Filename:mtime } mapping for env_update
3384 @type prev_mtimes: Dictionary
3390 secondhand is a list of symlinks that have been skipped due to their target
3391 not existing; we will merge these symlinks at a later time.
3396 srcroot = _unicode_decode(srcroot,
3397 encoding=_encodings['content'], errors='strict')
3398 destroot = self.settings['ROOT']
3399 inforoot = _unicode_decode(inforoot,
3400 encoding=_encodings['content'], errors='strict')
3401 myebuild = _unicode_decode(myebuild,
3402 encoding=_encodings['content'], errors='strict')
3404 showMessage = self._display_merge
3405 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
3407 if not os.path.isdir(srcroot):
3408 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
3409 level=logging.ERROR, noiselevel=-1)
3413 for var_name in ('CHOST', 'SLOT'):
3414 if var_name == 'CHOST' and self.cat == 'virtual':
3416 os.unlink(os.path.join(inforoot, var_name))
3423 f = io.open(_unicode_encode(
3424 os.path.join(inforoot, var_name),
3425 encoding=_encodings['fs'], errors='strict'),
3426 mode='r', encoding=_encodings['repo.content'],
3428 val = f.readline().strip()
3429 except EnvironmentError as e:
3430 if e.errno != errno.ENOENT:
3438 if var_name == 'SLOT':
3441 if not slot.strip():
3442 slot = self.settings.get(var_name, '')
3443 if not slot.strip():
3444 showMessage(_("!!! SLOT is undefined\n"),
3445 level=logging.ERROR, noiselevel=-1)
3447 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
3449 if val != self.settings.get(var_name, ''):
3450 self._eqawarn('preinst',
3451 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
3452 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
3455 self._eerror("preinst", lines)
3457 if not os.path.exists(self.dbcatdir):
3458 ensure_dirs(self.dbcatdir)
3460 cp = self.mysplit[0]
3461 slot_atom = "%s:%s" % (cp, slot)
3463 # filter any old-style virtual matches
3464 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
3465 if cpv_getkey(cpv) == cp]
3467 if self.mycpv not in slot_matches and \
3468 self.vartree.dbapi.cpv_exists(self.mycpv):
3469 # handle multislot or unapplied slotmove
3470 slot_matches.append(self.mycpv)
3473 from portage import config
3474 for cur_cpv in slot_matches:
3475 # Clone the config in case one of these has to be unmerged since
3476 # we need it to have private ${T} etc... for things like elog.
3477 settings_clone = config(clone=self.settings)
3478 settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
3479 settings_clone.reset()
3480 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
3481 settings=settings_clone,
3482 vartree=self.vartree, treetype="vartree",
3483 scheduler=self._scheduler, pipe=self._pipe))
3485 retval = self._security_check(others_in_slot)
3490 # Used by self.isprotected().
3493 for dblnk in others_in_slot:
3494 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
3495 if cur_counter > max_counter:
3496 max_counter = cur_counter
3498 self._installed_instance = max_dblnk
3500 if self.settings.get("INSTALL_MASK") or \
3501 "nodoc" in self.settings.features or \
3502 "noinfo" in self.settings.features or \
3503 "noman" in self.settings.features:
3504 # Apply INSTALL_MASK before collision-protect, since it may
3505 # be useful to avoid collisions in some scenarios.
3506 phase = MiscFunctionsProcess(background=False,
3507 commands=["preinst_mask"], phase="preinst",
3508 scheduler=self._scheduler, settings=self.settings)
3512 # We check for unicode encoding issues after src_install. However,
3513 # the check must be repeated here for binary packages (it's
3514 # inexpensive since we call os.walk() here anyway).
3516 line_ending_re = re.compile('[\n\r]')
3517 srcroot_len = len(srcroot)
3518 ed_len = len(self.settings["ED"])
3522 unicode_error = False
3523 eagain_error = False
3527 paths_with_newlines = []
3530 walk_iter = os.walk(srcroot, onerror=onerror)
3533 parent, dirs, files = next(walk_iter)
3534 except StopIteration:
3536 except OSError as e:
3537 if e.errno != errno.EAGAIN:
3539 # Observed with PyPy 1.8.
3544 parent = _unicode_decode(parent,
3545 encoding=_encodings['merge'], errors='strict')
3546 except UnicodeDecodeError:
3547 new_parent = _unicode_decode(parent,
3548 encoding=_encodings['merge'], errors='replace')
3549 new_parent = _unicode_encode(new_parent,
3550 encoding='ascii', errors='backslashreplace')
3551 new_parent = _unicode_decode(new_parent,
3552 encoding=_encodings['merge'], errors='replace')
3553 os.rename(parent, new_parent)
3554 unicode_error = True
3555 unicode_errors.append(new_parent[ed_len:])
3560 fname = _unicode_decode(fname,
3561 encoding=_encodings['merge'], errors='strict')
3562 except UnicodeDecodeError:
3563 fpath = portage._os.path.join(
3564 parent.encode(_encodings['merge']), fname)
3565 new_fname = _unicode_decode(fname,
3566 encoding=_encodings['merge'], errors='replace')
3567 new_fname = _unicode_encode(new_fname,
3568 encoding='ascii', errors='backslashreplace')
3569 new_fname = _unicode_decode(new_fname,
3570 encoding=_encodings['merge'], errors='replace')
3571 new_fpath = os.path.join(parent, new_fname)
3572 os.rename(fpath, new_fpath)
3573 unicode_error = True
3574 unicode_errors.append(new_fpath[ed_len:])
3578 fpath = os.path.join(parent, fname)
3580 relative_path = fpath[srcroot_len:]
3582 if line_ending_re.search(relative_path) is not None:
3583 paths_with_newlines.append(relative_path)
3585 file_mode = os.lstat(fpath).st_mode
3586 if stat.S_ISREG(file_mode):
3587 myfilelist.append(relative_path)
3588 elif stat.S_ISLNK(file_mode):
3589 # Note: os.walk puts symlinks to directories in the "dirs"
3590 # list and it does not traverse them since that could lead
3591 # to an infinite recursion loop.
3592 mylinklist.append(relative_path)
3597 if not (unicode_error or eagain_error):
3601 self._elog("eqawarn", "preinst",
3602 _merge_unicode_error(unicode_errors))
3604 if paths_with_newlines:
3606 msg.append(_("This package installs one or more files containing line ending characters:"))
3608 paths_with_newlines.sort()
3609 for f in paths_with_newlines:
3610 msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
3612 msg.append(_("package %s NOT merged") % self.mycpv)
3617 # If there are no files to merge, and an installed package in the same
3618 # slot has files, it probably means that something went wrong.
3619 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
3620 not myfilelist and not mylinklist and others_in_slot:
3621 installed_files = None
3622 for other_dblink in others_in_slot:
3623 installed_files = other_dblink.getcontents()
3624 if not installed_files:
3626 from textwrap import wrap
3630 "new_cpv":self.mycpv,
3631 "old_cpv":other_dblink.mycpv
3633 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3634 "any files, but the currently installed '%(old_cpv)s'"
3635 " package has the following files: ") % d, wrap_width))
3637 msg.extend(sorted(installed_files))
3639 msg.append(_("package %s NOT merged") % self.mycpv)
3642 _("Manually run `emerge --unmerge =%s` if you "
3643 "really want to remove the above files. Set "
3644 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3645 "/etc/make.conf if you do not want to "
3646 "abort in cases like this.") % other_dblink.mycpv,
3652 # Make sure the ebuild environment is initialized and that ${T}/elog
3653 # exists for logging of collision-protect eerror messages.
3654 if myebuild is None:
3655 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3656 doebuild_environment(myebuild, "preinst",
3657 settings=self.settings, db=mydbapi)
3658 self.settings["REPLACING_VERSIONS"] = " ".join(
3659 [portage.versions.cpv_getversion(other.mycpv)
3660 for other in others_in_slot])
3661 prepare_build_dirs(settings=self.settings, cleanup=cleanup)
3663 # check for package collisions
3664 blockers = self._blockers
3665 if blockers is None:
3667 collisions, symlink_collisions, plib_collisions = \
3668 self._collision_protect(srcroot, destroot,
3669 others_in_slot + blockers, myfilelist, mylinklist)
3671 if symlink_collisions:
3672 # Symlink collisions need to be distinguished from other types
3673 # of collisions, in order to avoid confusion (see bug #409359).
3674 msg = _("Package '%s' has one or more collisions "
3675 "between symlinks and directories, which is explicitly "
3676 "forbidden by PMS section 13.4 (see bug #326685):") % \
3677 (self.settings.mycpv,)
3678 msg = textwrap.wrap(msg, 70)
3680 for f in symlink_collisions:
3681 msg.append("\t%s" % os.path.join(destroot,
3682 f.lstrip(os.path.sep)))
3684 self._elog("eerror", "preinst", msg)
3687 collision_protect = "collision-protect" in self.settings.features
3688 protect_owned = "protect-owned" in self.settings.features
3689 msg = _("This package will overwrite one or more files that"
3690 " may belong to other packages (see list below).")
3691 if not (collision_protect or protect_owned):
3692 msg += _(" Add either \"collision-protect\" or"
3693 " \"protect-owned\" to FEATURES in"
3694 " make.conf if you would like the merge to abort"
3695 " in cases like this. See the make.conf man page for"
3696 " more information about these features.")
3697 if self.settings.get("PORTAGE_QUIET") != "1":
3698 msg += _(" You can use a command such as"
3699 " `portageq owners / <filename>` to identify the"
3700 " installed package that owns a file. If portageq"
3701 " reports that only one package owns a file then do NOT"
3702 " file a bug report. A bug report is only useful if it"
3703 " identifies at least two or more packages that are known"
3704 " to install the same file(s)."
3705 " If a collision occurs and you"
3706 " can not explain where the file came from then you"
3707 " should simply ignore the collision since there is not"
3708 " enough information to determine if a real problem"
3709 " exists. Please do NOT file a bug report at"
3710 " http://bugs.gentoo.org unless you report exactly which"
3711 " two packages install the same file(s). Once again,"
3712 " please do NOT file a bug report unless you have"
3713 " completely understood the above message.")
3715 self.settings["EBUILD_PHASE"] = "preinst"
3716 from textwrap import wrap
3718 if collision_protect:
3720 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3722 msg.append(_("Detected file collision(s):"))
3725 for f in collisions:
3726 msg.append("\t%s" % \
3727 os.path.join(destroot, f.lstrip(os.path.sep)))
3732 if collision_protect or protect_owned or symlink_collisions:
3735 msg.append(_("Searching all installed"
3736 " packages for file collisions..."))
3738 msg.append(_("Press Ctrl-C to Stop"))
3742 if len(collisions) > 20:
3743 # get_owners is slow for large numbers of files, so
3744 # don't look them all up.
3745 collisions = collisions[:20]
3748 owners = self.vartree.dbapi._owners.get_owners(collisions)
3749 self.vartree.dbapi.flush_cache()
3753 for pkg, owned_files in owners.items():
3756 msg.append("%s" % cpv)
3757 for f in sorted(owned_files):
3758 msg.append("\t%s" % os.path.join(destroot,
3759 f.lstrip(os.path.sep)))
3764 eerror([_("None of the installed"
3765 " packages claim the file(s)."), ""])
3767 symlink_abort_msg =_("Package '%s' NOT merged since it has "
3768 "one or more collisions between symlinks and directories, "
3769 "which is explicitly forbidden by PMS section 13.4 "
3770 "(see bug #326685).")
3772 # The explanation about the collision and how to solve
3773 # it may not be visible via a scrollback buffer, especially
3774 # if the number of file collisions is large. Therefore,
3775 # show a summary at the end.
3777 if symlink_collisions:
3779 msg = symlink_abort_msg % (self.settings.mycpv,)
3780 elif collision_protect:
3782 msg = _("Package '%s' NOT merged due to file collisions.") % \
3784 elif protect_owned and owners:
3786 msg = _("Package '%s' NOT merged due to file collisions.") % \
3789 msg = _("Package '%s' merged despite file collisions.") % \
3791 msg += _(" If necessary, refer to your elog "
3792 "messages for the whole content of the above message.")
3793 eerror(wrap(msg, 70))
3798 # The merge process may move files out of the image directory,
3799 # which causes invalidation of the .installed flag.
3801 os.unlink(os.path.join(
3802 os.path.dirname(normalize_path(srcroot)), ".installed"))
3803 except OSError as e:
3804 if e.errno != errno.ENOENT:
3808 self.dbdir = self.dbtmpdir
3810 ensure_dirs(self.dbtmpdir)
3812 # run preinst script
3813 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
3814 {"cpv":self.mycpv, "destroot":destroot})
3815 phase = EbuildPhase(background=False, phase="preinst",
3816 scheduler=self._scheduler, settings=self.settings)
3820 # XXX: Decide how to handle failures here.
3822 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3823 level=logging.ERROR, noiselevel=-1)
3826 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3827 for x in os.listdir(inforoot):
3828 self.copyfile(inforoot+"/"+x)
3830 # write local package counter for recording
3832 counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
3833 f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3834 encoding=_encodings['fs'], errors='strict'),
3835 mode='w', encoding=_encodings['repo.content'],
3836 errors='backslashreplace')
3837 f.write(_unicode_decode(str(counter)))
3840 self.updateprotect()
3842 #if we have a file containing previously-merged config file md5sums, grab it.
3843 self.vartree.dbapi._fs_lock()
3845 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
3846 if "NOCONFMEM" in self.settings:
3847 cfgfiledict["IGNORE"]=1
3849 cfgfiledict["IGNORE"]=0
3851 # Always behave like --noconfmem is enabled for downgrades
3852 # so that people who don't know about this option are less
3853 # likely to get confused when doing upgrade/downgrade cycles.
3854 pv_split = catpkgsplit(self.mycpv)[1:]
3855 for other in others_in_slot:
3856 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3857 cfgfiledict["IGNORE"] = 1
3860 rval = self._merge_contents(srcroot, destroot, cfgfiledict)
3861 if rval != os.EX_OK:
3864 self.vartree.dbapi._fs_unlock()
3866 # These caches are populated during collision-protect and the data
3867 # they contain is now invalid. It's very important to invalidate
3868 # the contents_inodes cache so that FEATURES=unmerge-orphans
3869 # doesn't unmerge anything that belongs to this package that has
3871 for dblnk in others_in_slot:
3872 dblnk._clear_contents_cache()
3873 self._clear_contents_cache()
3875 linkmap = self.vartree.dbapi._linkmap
3876 plib_registry = self.vartree.dbapi._plib_registry
3877 # We initialize preserve_paths to an empty set rather
3878 # than None here because it plays an important role
3879 # in prune_plib_registry logic by serving to indicate
3880 # that we have a replacement for a package that's
3883 preserve_paths = set()
3885 if not (self._linkmap_broken or linkmap is None or
3886 plib_registry is None):
3887 self.vartree.dbapi._fs_lock()
3888 plib_registry.lock()
3890 plib_registry.load()
3891 needed = os.path.join(inforoot, linkmap._needed_aux_key)
3892 self._linkmap_rebuild(include_file=needed)
3894 # Preserve old libs if they are still in use
3895 # TODO: Handle cases where the previous instance
3896 # has already been uninstalled but it still has some
3897 # preserved libraries in the registry that we may
3898 # want to preserve here.
3899 preserve_paths = self._find_libs_to_preserve()
3901 plib_registry.unlock()
3902 self.vartree.dbapi._fs_unlock()
3905 self._add_preserve_libs_to_contents(preserve_paths)
3907 # If portage is reinstalling itself, remove the old
3908 # version now since we want to use the temporary
3909 # PORTAGE_BIN_PATH that will be removed when we return.
3910 reinstall_self = False
3911 if self.myroot == "/" and \
3912 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3913 reinstall_self = True
3915 emerge_log = self._emerge_log
3917 # If we have any preserved libraries then autoclean
3918 # is forced so that preserve-libs logic doesn't have
3919 # to account for the additional complexity of the
3920 # AUTOCLEAN=no mode.
3921 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
3925 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3927 others_in_slot.append(self) # self has just been merged
3928 for dblnk in list(others_in_slot):
3931 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3933 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3934 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
3935 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3936 dblnk._linkmap_broken = self._linkmap_broken
3937 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
3938 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
3939 unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
3940 others_in_slot=others_in_slot, needed=needed,
3941 preserve_paths=preserve_paths)
3942 dblnk.settings.pop("REPLACED_BY_VERSION", None)
3944 if unmerge_rval == os.EX_OK:
3945 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
3947 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
3951 # TODO: Check status and abort if necessary.
3955 showMessage(_(">>> Original instance of package unmerged safely.\n"))
3957 if len(others_in_slot) > 1:
3958 showMessage(colorize("WARN", _("WARNING:"))
3959 + _(" AUTOCLEAN is disabled. This can cause serious"
3960 " problems due to overlapping packages.\n"),
3961 level=logging.WARN, noiselevel=-1)
3963 # We hold both directory locks.
3964 self.dbdir = self.dbpkgdir
3968 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3972 # Check for file collisions with blocking packages
3973 # and remove any colliding files from their CONTENTS
3974 # since they now belong to this package.
3975 self._clear_contents_cache()
3976 contents = self.getcontents()
3977 destroot_len = len(destroot) - 1
3980 for blocker in blockers:
3981 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3982 relative_paths=False)
3986 plib_registry = self.vartree.dbapi._plib_registry
3988 self.vartree.dbapi._fs_lock()
3989 plib_registry.lock()
3991 plib_registry.load()
3994 # keep track of the libs we preserved
3995 plib_registry.register(self.mycpv, slot, counter,
3996 sorted(preserve_paths))
3998 # Unregister any preserved libs that this package has overwritten
3999 # and update the contents of the packages that owned them.
4000 plib_dict = plib_registry.getPreservedLibs()
4001 for cpv, paths in plib_collisions.items():
4002 if cpv not in plib_dict:
4004 has_vdb_entry = False
4005 if cpv != self.mycpv:
4006 # If we've replaced another instance with the
4007 # same cpv then the vdb entry no longer belongs
4008 # to it, so we'll have to get the slot and counter
4009 # from plib_registry._data instead.
4010 self.vartree.dbapi.lock()
4013 slot, counter = self.vartree.dbapi.aux_get(
4014 cpv, ["SLOT", "COUNTER"])
4018 has_vdb_entry = True
4019 self.vartree.dbapi.removeFromContents(
4022 self.vartree.dbapi.unlock()
4024 if not has_vdb_entry:
4025 # It's possible for previously unmerged packages
4026 # to have preserved libs in the registry, so try
4027 # to retrieve the slot and counter from there.
4028 has_registry_entry = False
4029 for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
4030 plib_registry._data.items():
4034 cp, slot = plib_cps.split(":", 1)
4037 counter = plib_counter
4038 has_registry_entry = True
4041 if not has_registry_entry:
4044 remaining = [f for f in plib_dict[cpv] if f not in paths]
4045 plib_registry.register(cpv, slot, counter, remaining)
4047 plib_registry.store()
4049 plib_registry.unlock()
4050 self.vartree.dbapi._fs_unlock()
4052 self.vartree.dbapi._add(self)
4053 contents = self.getcontents()
4056 self.settings["PORTAGE_UPDATE_ENV"] = \
4057 os.path.join(self.dbpkgdir, "environment.bz2")
4058 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
4060 phase = EbuildPhase(background=False, phase="postinst",
4061 scheduler=self._scheduler, settings=self.settings)
4065 showMessage(_(">>> %s merged.\n") % self.mycpv)
4067 self.settings.pop("PORTAGE_UPDATE_ENV", None)
4070 # It's stupid to bail out here, so keep going regardless of
4071 # phase return code.
4072 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
4073 level=logging.ERROR, noiselevel=-1)
4075 #update environment settings, library paths. DO NOT change symlinks.
4077 target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
4078 contents=contents, env=self.settings,
4079 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
4081 # For gcc upgrades, preserved libs have to be removed after the
4082 # the library path has been updated.
4083 self._prune_plib_registry()
4087 def _new_backup_path(self, p):
4089 The works for any type path, such as a regular file, symlink,
4090 or directory. The parent directory is assumed to exist.
4091 The returned filename is of the form p + '.backup.' + x, where
4092 x guarantees that the returned path does not exist yet.
4099 backup_p = p + '.backup.' + str(x).rjust(4, '0')
4107 def _merge_contents(self, srcroot, destroot, cfgfiledict):
4109 cfgfiledict_orig = cfgfiledict.copy()
4111 # open CONTENTS file (possibly overwriting old one) for recording
4112 # Use atomic_ofstream for automatic coercion of raw bytes to
4113 # unicode, in order to prevent TypeError when writing raw bytes
4114 # to TextIOWrapper with python2.
4115 outfile = atomic_ofstream(_unicode_encode(
4116 os.path.join(self.dbtmpdir, 'CONTENTS'),
4117 encoding=_encodings['fs'], errors='strict'),
4118 mode='w', encoding=_encodings['repo.content'],
4119 errors='backslashreplace')
4121 # Don't bump mtimes on merge since some application require
4122 # preservation of timestamps. This means that the unmerge phase must
4123 # check to see if file belongs to an installed instance in the same
4127 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
4128 prevmask = os.umask(0)
4131 # we do a first merge; this will recurse through all files in our srcroot but also build up a
4132 # "second hand" of symlinks to merge later
4133 if self.mergeme(srcroot, destroot, outfile, secondhand,
4134 self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
4137 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
4138 # broken symlinks. We'll merge them too.
4140 while len(secondhand) and len(secondhand)!=lastlen:
4141 # clear the thirdhand. Anything from our second hand that
4142 # couldn't get merged will be added to thirdhand.
4145 if self.mergeme(srcroot, destroot, outfile, thirdhand,
4146 secondhand, cfgfiledict, mymtime):
4150 lastlen = len(secondhand)
4152 # our thirdhand now becomes our secondhand. It's ok to throw
4153 # away secondhand since thirdhand contains all the stuff that
4154 # couldn't be merged.
4155 secondhand = thirdhand
4158 # force merge of remaining symlinks (broken or circular; oh well)
4159 if self.mergeme(srcroot, destroot, outfile, None,
4160 secondhand, cfgfiledict, mymtime):
4166 #if we opened it, close it
4170 # write out our collection of md5sums
4171 if cfgfiledict != cfgfiledict_orig:
4172 cfgfiledict.pop("IGNORE", None)
4174 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
4175 except InvalidLocation:
4176 self.settings._init_dirs()
4177 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
4181 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
4184 This function handles actual merging of the package contents to the livefs.
4185 It also handles config protection.
4187 @param srcroot: Where are we copying files from (usually ${D})
4188 @type srcroot: String (Path)
4189 @param destroot: Typically ${ROOT}
4190 @type destroot: String (Path)
4191 @param outfile: File to log operations to
4192 @type outfile: File Object
4193 @param secondhand: A set of items to merge in pass two (usually
4194 or symlinks that point to non-existing files that may get merged later)
4195 @type secondhand: List
4196 @param stufftomerge: Either a diretory to merge, or a list of items.
4197 @type stufftomerge: String or List
4198 @param cfgfiledict: { File:mtime } mapping for config_protected files
4199 @type cfgfiledict: Dictionary
4200 @param thismtime: The current time (typically long(time.time())
4201 @type thismtime: Long
4202 @rtype: None or Boolean
4209 showMessage = self._display_merge
4210 writemsg = self._display_merge
4215 srcroot = normalize_path(srcroot).rstrip(sep) + sep
4216 destroot = normalize_path(destroot).rstrip(sep) + sep
4217 calc_prelink = "prelink-checksums" in self.settings.features
4219 protect_if_modified = \
4220 "config-protect-if-modified" in self.settings.features and \
4221 self._installed_instance is not None
4223 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
4224 if isinstance(stufftomerge, basestring):
4225 #A directory is specified. Figure out protection paths, listdir() it and process it.
4226 mergelist = os.listdir(join(srcroot, stufftomerge))
4227 offset = stufftomerge
4229 mergelist = stufftomerge
4232 for i, x in enumerate(mergelist):
4234 mysrc = join(srcroot, offset, x)
4235 mydest = join(destroot, offset, x)
4236 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
4237 myrealdest = join(sep, offset, x)
4238 # stat file once, test using S_* macros many times (faster that way)
4239 mystat = os.lstat(mysrc)
4240 mymode = mystat[stat.ST_MODE]
4241 # handy variables; mydest is the target object on the live filesystems;
4242 # mysrc is the source object in the temporary install dir
4244 mydstat = os.lstat(mydest)
4245 mydmode = mydstat.st_mode
4246 except OSError as e:
4247 if e.errno != errno.ENOENT:
4250 #dest file doesn't exist
4254 if stat.S_ISLNK(mymode):
4255 # we are merging a symbolic link
4256 # The file name of mysrc and the actual file that it points to
4257 # will have earlier been forcefully converted to the 'merge'
4258 # encoding if necessary, but the content of the symbolic link
4259 # may need to be forcefully converted here.
4260 myto = _os.readlink(_unicode_encode(mysrc,
4261 encoding=_encodings['merge'], errors='strict'))
4263 myto = _unicode_decode(myto,
4264 encoding=_encodings['merge'], errors='strict')
4265 except UnicodeDecodeError:
4266 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4268 myto = _unicode_encode(myto, encoding='ascii',
4269 errors='backslashreplace')
4270 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4273 os.symlink(myto, mysrc)
4275 # Pass in the symlink target in order to bypass the
4276 # os.readlink() call inside abssymlink(), since that
4277 # call is unsafe if the merge encoding is not ascii
4278 # or utf_8 (see bug #382021).
4279 myabsto = abssymlink(mysrc, target=myto)
4281 if myabsto.startswith(srcroot):
4282 myabsto = myabsto[len(srcroot):]
4283 myabsto = myabsto.lstrip(sep)
4284 if self.settings and self.settings["D"]:
4285 if myto.startswith(self.settings["D"]):
4286 myto = myto[len(self.settings["D"]):]
4287 # myrealto contains the path of the real file to which this symlink points.
4288 # we can simply test for existence of this file to see if the target has been merged yet
4289 myrealto = normalize_path(os.path.join(destroot, myabsto))
4292 if stat.S_ISDIR(mydmode):
4293 # we can't merge a symlink over a directory
4294 newdest = self._new_backup_path(mydest)
4297 msg.append(_("Installation of a symlink is blocked by a directory:"))
4298 msg.append(" '%s'" % mydest)
4299 msg.append(_("This symlink will be merged with a different name:"))
4300 msg.append(" '%s'" % newdest)
4302 self._eerror("preinst", msg)
4305 elif not stat.S_ISLNK(mydmode):
4306 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
4307 # Kill file blocking installation of symlink to dir #71787
4309 elif self.isprotected(mydest):
4310 # Use md5 of the target in ${D} if it exists...
4312 newmd5 = perform_md5(join(srcroot, myabsto))
4313 except FileNotFound:
4314 # Maybe the target is merged already.
4316 newmd5 = perform_md5(myrealto)
4317 except FileNotFound:
4319 mydest = new_protect_filename(mydest, newmd5=newmd5)
4321 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
4322 if (secondhand != None) and (not os.path.exists(myrealto)):
4323 # either the target directory doesn't exist yet or the target file doesn't exist -- or
4324 # the target is a broken symlink. We will add this file to our "second hand" and merge
4326 secondhand.append(mysrc[len(srcroot):])
4328 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
4329 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4330 sstat=mystat, mysettings=self.settings,
4331 encoding=_encodings['merge'])
4333 showMessage(">>> %s -> %s\n" % (mydest, myto))
4334 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
4336 showMessage(_("!!! Failed to move file.\n"),
4337 level=logging.ERROR, noiselevel=-1)
4338 showMessage("!!! %s -> %s\n" % (mydest, myto),
4339 level=logging.ERROR, noiselevel=-1)
4341 elif stat.S_ISDIR(mymode):
4342 # we are merging a directory
4344 # destination exists
4347 # Save then clear flags on dest.
4348 dflags = mydstat.st_flags
4350 bsd_chflags.lchflags(mydest, 0)
4352 if not os.access(mydest, os.W_OK):
4353 pkgstuff = pkgsplit(self.pkg)
4354 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
4355 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
4356 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
4357 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
4358 writemsg(_("!!! And finish by running this: env-update\n\n"))
4361 if stat.S_ISDIR(mydmode) or \
4362 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
4363 # a symlink to an existing directory will work for us; keep it:
4364 showMessage("--- %s/\n" % mydest)
4366 bsd_chflags.lchflags(mydest, dflags)
4368 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
4369 backup_dest = self._new_backup_path(mydest)
4372 msg.append(_("Installation of a directory is blocked by a file:"))
4373 msg.append(" '%s'" % mydest)
4374 msg.append(_("This file will be renamed to a different name:"))
4375 msg.append(" '%s'" % backup_dest)
4377 self._eerror("preinst", msg)
4378 if movefile(mydest, backup_dest,
4379 mysettings=self.settings,
4380 encoding=_encodings['merge']) is None:
4382 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
4383 level=logging.ERROR, noiselevel=-1)
4384 #now create our directory
4386 if self.settings.selinux_enabled():
4387 _selinux_merge.mkdir(mydest, mysrc)
4390 except OSError as e:
4391 # Error handling should be equivalent to
4392 # portage.util.ensure_dirs() for cases
4394 if e.errno in (errno.EEXIST,):
4396 elif os.path.isdir(mydest):
4403 bsd_chflags.lchflags(mydest, dflags)
4404 os.chmod(mydest, mystat[0])
4405 os.chown(mydest, mystat[4], mystat[5])
4406 showMessage(">>> %s/\n" % mydest)
4409 #destination doesn't exist
4410 if self.settings.selinux_enabled():
4411 _selinux_merge.mkdir(mydest, mysrc)
4414 except OSError as e:
4415 # Error handling should be equivalent to
4416 # portage.util.ensure_dirs() for cases
4418 if e.errno in (errno.EEXIST,):
4420 elif os.path.isdir(mydest):
4425 os.chmod(mydest, mystat[0])
4426 os.chown(mydest, mystat[4], mystat[5])
4427 showMessage(">>> %s/\n" % mydest)
4428 outfile.write("dir "+myrealdest+"\n")
4429 # recurse and merge this directory
4430 if self.mergeme(srcroot, destroot, outfile, secondhand,
4431 join(offset, x), cfgfiledict, thismtime):
4433 elif stat.S_ISREG(mymode):
4434 # we are merging a regular file
4435 mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
4436 # calculate config file protection stuff
4437 mydestdir = os.path.dirname(mydest)
4441 protected = self.isprotected(mydest)
4443 # destination file exists
4445 if stat.S_ISDIR(mydmode):
4446 # install of destination is blocked by an existing directory with the same name
4447 newdest = self._new_backup_path(mydest)
4450 msg.append(_("Installation of a regular file is blocked by a directory:"))
4451 msg.append(" '%s'" % mydest)
4452 msg.append(_("This file will be merged with a different name:"))
4453 msg.append(" '%s'" % newdest)
4455 self._eerror("preinst", msg)
4458 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
4459 # install of destination is blocked by an existing regular file,
4460 # or by a symlink to an existing regular file;
4461 # now, config file management may come into play.
4462 # we only need to tweak mydest if cfg file management is in play.
4464 destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
4465 if protect_if_modified:
4467 self._installed_instance._match_contents(myrealdest)
4469 inst_info = self._installed_instance.getcontents()[contents_key]
4470 if inst_info[0] == "obj" and inst_info[2] == destmd5:
4474 # we have a protection path; enable config file management.
4476 if mymd5 == destmd5:
4477 #file already in place; simply update mtimes of destination
4480 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
4481 """ An identical update has previously been
4482 merged. Skip it unless the user has chosen
4484 moveme = cfgfiledict["IGNORE"]
4485 cfgprot = cfgfiledict["IGNORE"]
4488 mymtime = mystat[stat.ST_MTIME]
4493 # Merging a new file, so update confmem.
4494 cfgfiledict[myrealdest] = [mymd5]
4495 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
4496 """A previously remembered update has been
4497 accepted, so it is removed from confmem."""
4498 del cfgfiledict[myrealdest]
4501 mydest = new_protect_filename(mydest, newmd5=mymd5)
4503 # whether config protection or not, we merge the new file the
4504 # same way. Unless moveme=0 (blocking directory)
4506 # Create hardlinks only for source files that already exist
4507 # as hardlinks (having identical st_dev and st_ino).
4508 hardlink_key = (mystat.st_dev, mystat.st_ino)
4510 hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
4511 if hardlink_candidates is None:
4512 hardlink_candidates = []
4513 self._hardlink_merge_map[hardlink_key] = hardlink_candidates
4515 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4516 sstat=mystat, mysettings=self.settings,
4517 hardlink_candidates=hardlink_candidates,
4518 encoding=_encodings['merge'])
4521 hardlink_candidates.append(mydest)
4525 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
4526 showMessage("%s %s\n" % (zing,mydest))
4528 # we are merging a fifo or device node
4531 # destination doesn't exist
4532 if movefile(mysrc, mydest, newmtime=thismtime,
4533 sstat=mystat, mysettings=self.settings,
4534 encoding=_encodings['merge']) is not None:
4538 if stat.S_ISFIFO(mymode):
4539 outfile.write("fif %s\n" % myrealdest)
4541 outfile.write("dev %s\n" % myrealdest)
4542 showMessage(zing + " " + mydest + "\n")
4544 def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
4545 mydbapi=None, prev_mtimes=None, counter=None):
4547 @param myroot: ignored, self._eroot is used instead
4551 parallel_install = "parallel-install" in self.settings.features
4552 if not parallel_install:
4554 self.vartree.dbapi._bump_mtime(self.mycpv)
4555 if self._scheduler is None:
4556 self._scheduler = PollScheduler().sched_iface
4558 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
4559 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
4562 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
4563 # fail-clean is enabled, and the success/die hooks have
4564 # already been called by EbuildPhase.
4565 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
4567 if retval == os.EX_OK:
4568 phase = 'success_hooks'
4572 ebuild_phase = MiscFunctionsProcess(
4573 background=False, commands=[phase],
4574 scheduler=self._scheduler, settings=self.settings)
4575 ebuild_phase.start()
4577 self._elog_process()
4579 if 'noclean' not in self.settings.features and \
4580 (retval == os.EX_OK or \
4581 'fail-clean' in self.settings.features):
4582 if myebuild is None:
4583 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
4585 doebuild_environment(myebuild, "clean",
4586 settings=self.settings, db=mydbapi)
4587 phase = EbuildPhase(background=False, phase="clean",
4588 scheduler=self._scheduler, settings=self.settings)
4592 self.settings.pop('REPLACING_VERSIONS', None)
4593 if self.vartree.dbapi._linkmap is None:
4594 # preserve-libs is entirely disabled
4597 self.vartree.dbapi._linkmap._clear_cache()
4598 self.vartree.dbapi._bump_mtime(self.mycpv)
4599 if not parallel_install:
4603 def getstring(self,name):
4604 "returns contents of a file with whitespace converted to spaces"
4605 if not os.path.exists(self.dbdir+"/"+name):
4608 _unicode_encode(os.path.join(self.dbdir, name),
4609 encoding=_encodings['fs'], errors='strict'),
4610 mode='r', encoding=_encodings['repo.content'], errors='replace'
4612 return " ".join(mydata)
4614 def copyfile(self,fname):
4615 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4617 def getfile(self,fname):
4618 if not os.path.exists(self.dbdir+"/"+fname):
4620 return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
4621 encoding=_encodings['fs'], errors='strict'),
4622 mode='r', encoding=_encodings['repo.content'], errors='replace'
4625 def setfile(self,fname,data):
4627 if fname == 'environment.bz2' or not isinstance(data, basestring):
4628 kwargs['mode'] = 'wb'
4630 kwargs['mode'] = 'w'
4631 kwargs['encoding'] = _encodings['repo.content']
4632 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
4634 def getelements(self,ename):
4635 if not os.path.exists(self.dbdir+"/"+ename):
4637 mylines = io.open(_unicode_encode(
4638 os.path.join(self.dbdir, ename),
4639 encoding=_encodings['fs'], errors='strict'),
4640 mode='r', encoding=_encodings['repo.content'], errors='replace'
4644 for y in x[:-1].split():
4648 def setelements(self,mylist,ename):
4649 myelement = io.open(_unicode_encode(
4650 os.path.join(self.dbdir, ename),
4651 encoding=_encodings['fs'], errors='strict'),
4652 mode='w', encoding=_encodings['repo.content'],
4653 errors='backslashreplace')
4655 myelement.write(_unicode_decode(x+"\n"))
4658 def isregular(self):
4659 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4660 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
4662 def merge(mycat, mypkg, pkgloc, infloc,
4663 myroot=None, settings=None, myebuild=None,
4664 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
4667 @param myroot: ignored, settings['EROOT'] is used instead
4670 if settings is None:
4671 raise TypeError("settings argument is required")
4672 if not os.access(settings['EROOT'], os.W_OK):
4673 writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
4676 background = (settings.get('PORTAGE_BACKGROUND') == '1')
4677 merge_task = MergeProcess(
4678 mycat=mycat, mypkg=mypkg, settings=settings,
4679 treetype=mytree, vartree=vartree,
4680 scheduler=(scheduler or PollScheduler().sched_iface),
4681 background=background, blockers=blockers, pkgloc=pkgloc,
4682 infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
4683 prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
4685 retcode = merge_task.wait()
4688 def unmerge(cat, pkg, myroot=None, settings=None,
4689 mytrimworld=None, vartree=None,
4690 ldpath_mtimes=None, scheduler=None):
4692 @param myroot: ignored, settings['EROOT'] is used instead
4693 @param mytrimworld: ignored
4696 if settings is None:
4697 raise TypeError("settings argument is required")
4698 mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
4699 vartree=vartree, scheduler=scheduler)
4700 vartree = mylink.vartree
4701 parallel_install = "parallel-install" in settings.features
4702 if not parallel_install:
4706 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
4707 if retval == os.EX_OK:
4716 if vartree.dbapi._linkmap is None:
4717 # preserve-libs is entirely disabled
4720 vartree.dbapi._linkmap._clear_cache()
4721 if not parallel_install:
4724 def write_contents(contents, root, f):
4726 Write contents to any file like object. The file will be left open.
4728 root_len = len(root) - 1
4729 for filename in sorted(contents):
4730 entry_data = contents[filename]
4731 entry_type = entry_data[0]
4732 relative_filename = filename[root_len:]
4733 if entry_type == "obj":
4734 entry_type, mtime, md5sum = entry_data
4735 line = "%s %s %s %s\n" % \
4736 (entry_type, relative_filename, md5sum, mtime)
4737 elif entry_type == "sym":
4738 entry_type, mtime, link = entry_data
4739 line = "%s %s -> %s %s\n" % \
4740 (entry_type, relative_filename, link, mtime)
4741 else: # dir, dev, fif
4742 line = "%s %s\n" % (entry_type, relative_filename)
4745 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4747 encoding = _encodings['merge']
4752 encoding=_encodings['merge'],
4754 except UnicodeEncodeError:
4755 # The package appears to have been merged with a
4756 # different value of sys.getfilesystemencoding(),
4757 # so fall back to utf_8 if appropriate.
4761 encoding=_encodings['fs'],
4763 except UnicodeEncodeError:
4767 encoding = _encodings['fs']
4769 tar.encoding = encoding
4770 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4772 maxval = len(contents)
4775 onProgress(maxval, 0)
4776 paths = list(contents)
4781 lst = os.lstat(path)
4782 except OSError as e:
4783 if e.errno != errno.ENOENT:
4787 onProgress(maxval, curval)
4789 contents_type = contents[path][0]
4790 if path.startswith(root):
4791 arcname = "./" + path[len(root):]
4793 raise ValueError("invalid root argument: '%s'" % root)
4795 if 'dir' == contents_type and \
4796 not stat.S_ISDIR(lst.st_mode) and \
4797 os.path.isdir(live_path):
4798 # Even though this was a directory in the original ${D}, it exists
4799 # as a symlink to a directory in the live filesystem. It must be
4800 # recorded as a real directory in the tar file to ensure that tar
4801 # can properly extract it's children.
4802 live_path = os.path.realpath(live_path)
4803 lst = os.lstat(live_path)
4805 # Since os.lstat() inside TarFile.gettarinfo() can trigger a
4806 # UnicodeEncodeError when python has something other than utf_8
4807 # return from sys.getfilesystemencoding() (as in bug #388773),
4808 # we implement the needed functionality here, using the result
4809 # of our successful lstat call. An alternative to this would be
4810 # to pass in the fileobj argument to TarFile.gettarinfo(), so
4811 # that it could use fstat instead of lstat. However, that would
4812 # have the unwanted effect of dereferencing symlinks.
4814 tarinfo = tar.tarinfo()
4815 tarinfo.name = arcname
4816 tarinfo.mode = lst.st_mode
4817 tarinfo.uid = lst.st_uid
4818 tarinfo.gid = lst.st_gid
4820 tarinfo.mtime = lst.st_mtime
4821 tarinfo.linkname = ""
4822 if stat.S_ISREG(lst.st_mode):
4823 inode = (lst.st_ino, lst.st_dev)
4824 if (lst.st_nlink > 1 and
4825 inode in tar.inodes and
4826 arcname != tar.inodes[inode]):
4827 tarinfo.type = tarfile.LNKTYPE
4828 tarinfo.linkname = tar.inodes[inode]
4830 tar.inodes[inode] = arcname
4831 tarinfo.type = tarfile.REGTYPE
4832 tarinfo.size = lst.st_size
4833 elif stat.S_ISDIR(lst.st_mode):
4834 tarinfo.type = tarfile.DIRTYPE
4835 elif stat.S_ISLNK(lst.st_mode):
4836 tarinfo.type = tarfile.SYMTYPE
4837 tarinfo.linkname = os.readlink(live_path)
4841 tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
4845 tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
4849 if stat.S_ISREG(lst.st_mode):
4850 if protect and protect(path):
4851 # Create an empty file as a place holder in order to avoid
4852 # potential collision-protect issues.
4853 f = tempfile.TemporaryFile()
4854 f.write(_unicode_encode(
4855 "# empty file because --include-config=n " + \
4856 "when `quickpkg` was used\n"))
4859 tarinfo.size = os.fstat(f.fileno()).st_size
4860 tar.addfile(tarinfo, f)
4863 f = open(_unicode_encode(path,
4865 errors='strict'), 'rb')
4867 tar.addfile(tarinfo, f)
4871 tar.addfile(tarinfo)
4873 onProgress(maxval, curval)