1 # Copyright 1998-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dbapi._MergeProcess:MergeProcess',
14 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
15 'use_reduce,_get_slot_re',
16 'portage.eapi:_get_eapi_attrs',
17 'portage.elog:collect_ebuild_messages,collect_messages,' + \
18 'elog_process,_merge_logentries',
19 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
20 'portage.output:bold,colorize',
21 'portage.package.ebuild.doebuild:doebuild_environment,' + \
22 '_merge_unicode_error', '_spawn_phase',
23 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
24 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
25 'portage.update:fixdbentries',
26 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
27 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
28 'grabdict,normalize_path,new_protect_filename',
29 'portage.util.digraph:digraph',
30 'portage.util.env_update:env_update',
31 'portage.util.listdir:dircache,listdir',
32 'portage.util.movefile:movefile',
33 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
34 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
35 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
36 '_pkgsplit@pkgsplit,_pkg_str',
41 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
42 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
43 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
44 from portage.dbapi import dbapi
45 from portage.exception import CommandNotFound, \
46 InvalidData, InvalidLocation, InvalidPackageName, \
47 FileNotFound, PermissionDenied, UnsupportedAPIException
48 from portage.localization import _
50 from portage import abssymlink, _movefile, bsd_chflags
52 # This is a special version of the os module, wrapped for unicode support.
53 from portage import os
54 from portage import shutil
55 from portage import _encodings
56 from portage import _os_merge
57 from portage import _selinux_merge
58 from portage import _unicode_decode
59 from portage import _unicode_encode
61 from _emerge.EbuildBuildDir import EbuildBuildDir
62 from _emerge.EbuildPhase import EbuildPhase
63 from _emerge.emergelog import emergelog
64 from _emerge.PollScheduler import PollScheduler
65 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
66 from _emerge.SpawnProcess import SpawnProcess
73 from itertools import chain
86 import cPickle as pickle
90 if sys.hexversion >= 0x3000000:
97 class vardbapi(dbapi):
99 _excluded_dirs = ["CVS", "lost+found"]
100 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
101 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
102 "|".join(_excluded_dirs) + r')$')
104 _aux_cache_version = "1"
105 _owners_cache_version = "1"
107 # Number of uncached packages to trigger cache update, since
108 # it's wasteful to update it for every vdb change.
109 _aux_cache_threshold = 5
111 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
112 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
114 def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
116 The categories parameter is unused since the dbapi class
117 now has a categories property that is generated from the
121 # Used by emerge to check whether any packages
122 # have been added or removed.
123 self._pkgs_changed = False
125 # The _aux_cache_threshold doesn't work as designed
126 # if the cache is flushed from a subprocess, so we
127 # use this to avoid waste vdb cache updates.
128 self._flush_cache_enabled = True
130 #cache for category directory mtimes
133 #cache for dependency checks
136 #cache for cp_list results
141 settings = portage.settings
142 self.settings = settings
144 if _unused_param is not None and _unused_param != settings['ROOT']:
145 warnings.warn("The first parameter of the "
146 "portage.dbapi.vartree.vardbapi"
147 " constructor is now unused. Use "
148 "settings['ROOT'] instead.",
149 DeprecationWarning, stacklevel=2)
151 self._eroot = settings['EROOT']
152 self._dbroot = self._eroot + VDB_PATH
156 self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
157 self._fs_lock_obj = None
158 self._fs_lock_count = 0
161 vartree = portage.db[settings['EROOT']]['vartree']
162 self.vartree = vartree
163 self._aux_cache_keys = set(
164 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
165 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
166 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
167 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
169 self._aux_cache_obj = None
170 self._aux_cache_filename = os.path.join(self._eroot,
171 CACHE_PATH, "vdb_metadata.pickle")
172 self._counter_path = os.path.join(self._eroot,
173 CACHE_PATH, "counter")
175 self._plib_registry = None
176 if _ENABLE_PRESERVE_LIBS:
177 self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
178 os.path.join(self._eroot, PRIVATE_PATH,
179 "preserved_libs_registry"))
182 if _ENABLE_DYN_LINK_MAP:
183 self._linkmap = LinkageMap(self)
184 self._owners = self._owners_db(self)
186 self._cached_counter = None
190 warnings.warn("The root attribute of "
191 "portage.dbapi.vartree.vardbapi"
192 " is deprecated. Use "
193 "settings['ROOT'] instead.",
194 DeprecationWarning, stacklevel=3)
195 return self.settings['ROOT']
197 def getpath(self, mykey, filename=None):
198 # This is an optimized hotspot, so don't use unicode-wrapped
199 # os module and don't use os.path.join().
200 rValue = self._eroot + VDB_PATH + _os.sep + mykey
201 if filename is not None:
202 # If filename is always relative, we can do just
203 # rValue += _os.sep + filename
204 rValue = _os.path.join(rValue, filename)
209 Acquire a reentrant lock, blocking, for cooperation with concurrent
210 processes. State is inherited by subprocesses, allowing subprocesses
211 to reenter a lock that was acquired by a parent process. However,
212 a lock can be released only by the same process that acquired it.
215 self._lock_count += 1
217 if self._lock is not None:
218 raise AssertionError("already locked")
219 # At least the parent needs to exist for the lock file.
220 ensure_dirs(self._dbroot)
221 self._lock = lockdir(self._dbroot)
222 self._lock_count += 1
226 Release a lock, decrementing the recursion level. Each unlock() call
227 must be matched with a prior lock() call, or else an AssertionError
228 will be raised if unlock() is called while not locked.
230 if self._lock_count > 1:
231 self._lock_count -= 1
233 if self._lock is None:
234 raise AssertionError("not locked")
236 unlockdir(self._lock)
241 Acquire a reentrant lock, blocking, for cooperation with concurrent
244 if self._fs_lock_count < 1:
245 if self._fs_lock_obj is not None:
246 raise AssertionError("already locked")
248 self._fs_lock_obj = lockfile(self._conf_mem_file)
249 except InvalidLocation:
250 self.settings._init_dirs()
251 self._fs_lock_obj = lockfile(self._conf_mem_file)
252 self._fs_lock_count += 1
254 def _fs_unlock(self):
256 Release a lock, decrementing the recursion level.
258 if self._fs_lock_count <= 1:
259 if self._fs_lock_obj is None:
260 raise AssertionError("not locked")
261 unlockfile(self._fs_lock_obj)
262 self._fs_lock_obj = None
263 self._fs_lock_count -= 1
265 def _bump_mtime(self, cpv):
267 This is called before an after any modifications, so that consumers
268 can use directory mtimes to validate caches. See bug #290428.
270 base = self._eroot + VDB_PATH
271 cat = catsplit(cpv)[0]
272 catdir = base + _os.sep + cat
276 for x in (catdir, base):
281 def cpv_exists(self, mykey, myrepo=None):
282 "Tells us whether an actual ebuild exists on disk (no masking)"
283 return os.path.exists(self.getpath(mykey))
285 def cpv_counter(self, mycpv):
286 "This method will grab the COUNTER. Returns a counter value."
288 return long(self.aux_get(mycpv, ["COUNTER"])[0])
289 except (KeyError, ValueError):
291 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
292 "resetting to value of 0\n") % (mycpv,),
293 level=logging.ERROR, noiselevel=-1)
296 def cpv_inject(self, mycpv):
297 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
298 ensure_dirs(self.getpath(mycpv))
299 counter = self.counter_tick(mycpv=mycpv)
300 # write local package counter so that emerge clean does the right thing
301 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
303 def isInjected(self, mycpv):
304 if self.cpv_exists(mycpv):
305 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
307 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
311 def move_ent(self, mylist, repo_match=None):
316 for atom in (origcp, newcp):
317 if not isjustname(atom):
318 raise InvalidPackageName(str(atom))
319 origmatches = self.match(origcp, use_cache=0)
323 for mycpv in origmatches:
325 mycpv = self._pkg_str(mycpv, None)
326 except (KeyError, InvalidData):
328 mycpv_cp = cpv_getkey(mycpv)
329 if mycpv_cp != origcp:
330 # Ignore PROVIDE virtual match.
332 if repo_match is not None \
333 and not repo_match(mycpv.repo):
336 # Use isvalidatom() to check if this move is valid for the
337 # EAPI (characters allowed in package names may vary).
338 if not isvalidatom(newcp, eapi=mycpv.eapi):
341 mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
342 mynewcat = catsplit(newcp)[0]
343 origpath = self.getpath(mycpv)
344 if not os.path.exists(origpath):
347 if not os.path.exists(self.getpath(mynewcat)):
348 #create the directory
349 ensure_dirs(self.getpath(mynewcat))
350 newpath = self.getpath(mynewcpv)
351 if os.path.exists(newpath):
352 #dest already exists; keep this puppy where it is.
354 _movefile(origpath, newpath, mysettings=self.settings)
355 self._clear_pkg_cache(self._dblink(mycpv))
356 self._clear_pkg_cache(self._dblink(mynewcpv))
358 # We need to rename the ebuild now.
359 old_pf = catsplit(mycpv)[1]
360 new_pf = catsplit(mynewcpv)[1]
363 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
364 os.path.join(newpath, new_pf + ".ebuild"))
365 except EnvironmentError as e:
366 if e.errno != errno.ENOENT:
369 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
370 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
371 fixdbentries([mylist], newpath, eapi=mycpv.eapi)
374 def cp_list(self, mycp, use_cache=1):
375 mysplit=catsplit(mycp)
376 if mysplit[0] == '*':
377 mysplit[0] = mysplit[0][1:]
379 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
382 if use_cache and mycp in self.cpcache:
383 cpc = self.cpcache[mycp]
386 cat_dir = self.getpath(mysplit[0])
388 dir_list = os.listdir(cat_dir)
389 except EnvironmentError as e:
390 if e.errno == PermissionDenied.errno:
391 raise PermissionDenied(cat_dir)
397 if self._excluded_dirs.match(x) is not None:
401 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
404 if ps[0] == mysplit[1]:
405 returnme.append(_pkg_str(mysplit[0]+"/"+x))
406 self._cpv_sort_ascending(returnme)
408 self.cpcache[mycp] = [mystat, returnme[:]]
409 elif mycp in self.cpcache:
410 del self.cpcache[mycp]
413 def cpv_all(self, use_cache=1):
415 Set use_cache=0 to bypass the portage.cachedir() cache in cases
416 when the accuracy of mtime staleness checks should not be trusted
417 (generally this is only necessary in critical sections that
418 involve merge or unmerge of packages).
421 basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
424 from portage import listdir
426 def listdir(p, **kwargs):
428 return [x for x in os.listdir(p) \
429 if os.path.isdir(os.path.join(p, x))]
430 except EnvironmentError as e:
431 if e.errno == PermissionDenied.errno:
432 raise PermissionDenied(p)
436 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
437 if self._excluded_dirs.match(x) is not None:
439 if not self._category_re.match(x):
441 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
442 if self._excluded_dirs.match(y) is not None:
444 subpath = x + "/" + y
445 # -MERGING- should never be a cpv, nor should files.
447 if catpkgsplit(subpath) is None:
448 self.invalidentry(self.getpath(subpath))
451 self.invalidentry(self.getpath(subpath))
453 returnme.append(subpath)
457 def cp_all(self, use_cache=1):
458 mylist = self.cpv_all(use_cache=use_cache)
464 mysplit = catpkgsplit(y)
466 self.invalidentry(self.getpath(y))
469 self.invalidentry(self.getpath(y))
471 d[mysplit[0]+"/"+mysplit[1]] = None
474 def checkblockers(self, origdep):
477 def _clear_cache(self):
478 self.mtdircache.clear()
479 self.matchcache.clear()
481 self._aux_cache_obj = None
483 def _add(self, pkg_dblink):
484 self._pkgs_changed = True
485 self._clear_pkg_cache(pkg_dblink)
487 def _remove(self, pkg_dblink):
488 self._pkgs_changed = True
489 self._clear_pkg_cache(pkg_dblink)
491 def _clear_pkg_cache(self, pkg_dblink):
492 # Due to 1 second mtime granularity in <python-2.5, mtime checks
493 # are not always sufficient to invalidate vardbapi caches. Therefore,
494 # the caches need to be actively invalidated here.
495 self.mtdircache.pop(pkg_dblink.cat, None)
496 self.matchcache.pop(pkg_dblink.cat, None)
497 self.cpcache.pop(pkg_dblink.mysplit[0], None)
498 dircache.pop(pkg_dblink.dbcatdir, None)
500 def match(self, origdep, use_cache=1):
501 "caching match function"
503 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
504 cache_key = (mydep, mydep.unevaluated_atom)
505 mykey = dep_getkey(mydep)
506 mycat = catsplit(mykey)[0]
508 if mycat in self.matchcache:
509 del self.mtdircache[mycat]
510 del self.matchcache[mycat]
511 return list(self._iter_match(mydep,
512 self.cp_list(mydep.cp, use_cache=use_cache)))
514 curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
515 except (IOError, OSError):
518 if mycat not in self.matchcache or \
519 self.mtdircache[mycat] != curmtime:
521 self.mtdircache[mycat] = curmtime
522 self.matchcache[mycat] = {}
523 if mydep not in self.matchcache[mycat]:
524 mymatch = list(self._iter_match(mydep,
525 self.cp_list(mydep.cp, use_cache=use_cache)))
526 self.matchcache[mycat][cache_key] = mymatch
527 return self.matchcache[mycat][cache_key][:]
529 def findname(self, mycpv, myrepo=None):
530 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
532 def flush_cache(self):
533 """If the current user has permission and the internal aux_get cache has
534 been updated, save it to disk and mark it unmodified. This is called
535 by emerge after it has loaded the full vdb for use in dependency
536 calculations. Currently, the cache is only written if the user has
537 superuser privileges (since that's required to obtain a lock), but all
538 users have read access and benefit from faster metadata lookups (as
539 long as at least part of the cache is still valid)."""
540 if self._flush_cache_enabled and \
541 self._aux_cache is not None and \
542 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
544 self._owners.populate() # index any unindexed contents
545 valid_nodes = set(self.cpv_all())
546 for cpv in list(self._aux_cache["packages"]):
547 if cpv not in valid_nodes:
548 del self._aux_cache["packages"][cpv]
549 del self._aux_cache["modified"]
551 f = atomic_ofstream(self._aux_cache_filename, 'wb')
552 pickle.dump(self._aux_cache, f, protocol=2)
554 apply_secpass_permissions(
555 self._aux_cache_filename, gid=portage_gid, mode=0o644)
556 except (IOError, OSError) as e:
558 self._aux_cache["modified"] = set()
561 def _aux_cache(self):
562 if self._aux_cache_obj is None:
563 self._aux_cache_init()
564 return self._aux_cache_obj
566 def _aux_cache_init(self):
569 if sys.hexversion >= 0x3000000:
570 # Buffered io triggers extreme performance issues in
571 # Unpickler.load() (problem observed with python-3.0.1).
572 # Unfortunately, performance is still poor relative to
573 # python-2.x, but buffering makes it much worse.
574 open_kwargs["buffering"] = 0
576 f = open(_unicode_encode(self._aux_cache_filename,
577 encoding=_encodings['fs'], errors='strict'),
578 mode='rb', **open_kwargs)
579 mypickle = pickle.Unpickler(f)
581 mypickle.find_global = None
582 except AttributeError:
583 # TODO: If py3k, override Unpickler.find_class().
585 aux_cache = mypickle.load()
588 except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
589 if isinstance(e, EnvironmentError) and \
590 getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
593 writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
594 (self._aux_cache_filename, e), noiselevel=-1)
597 if not aux_cache or \
598 not isinstance(aux_cache, dict) or \
599 aux_cache.get("version") != self._aux_cache_version or \
600 not aux_cache.get("packages"):
601 aux_cache = {"version": self._aux_cache_version}
602 aux_cache["packages"] = {}
604 owners = aux_cache.get("owners")
605 if owners is not None:
606 if not isinstance(owners, dict):
608 elif "version" not in owners:
610 elif owners["version"] != self._owners_cache_version:
612 elif "base_names" not in owners:
614 elif not isinstance(owners["base_names"], dict):
620 "version" : self._owners_cache_version
622 aux_cache["owners"] = owners
624 aux_cache["modified"] = set()
625 self._aux_cache_obj = aux_cache
627 def aux_get(self, mycpv, wants, myrepo = None):
628 """This automatically caches selected keys that are frequently needed
629 by emerge for dependency calculations. The cached metadata is
630 considered valid if the mtime of the package directory has not changed
631 since the data was cached. The cache is stored in a pickled dict
632 object with the following format:
634 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
636 If an error occurs while loading the cache pickle or the version is
637 unrecognized, the cache will simple be recreated from scratch (it is
638 completely disposable).
640 cache_these_wants = self._aux_cache_keys.intersection(wants)
642 if self._aux_cache_keys_re.match(x) is not None:
643 cache_these_wants.add(x)
645 if not cache_these_wants:
646 mydata = self._aux_get(mycpv, wants)
647 return [mydata[x] for x in wants]
649 cache_these = set(self._aux_cache_keys)
650 cache_these.update(cache_these_wants)
652 mydir = self.getpath(mycpv)
655 mydir_stat = os.stat(mydir)
657 if e.errno != errno.ENOENT:
659 raise KeyError(mycpv)
660 # Use float mtime when available.
661 mydir_mtime = mydir_stat.st_mtime
662 pkg_data = self._aux_cache["packages"].get(mycpv)
663 pull_me = cache_these.union(wants)
664 mydata = {"_mtime_" : mydir_mtime}
666 cache_incomplete = False
669 if pkg_data is not None:
670 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
673 cache_mtime, metadata = pkg_data
674 if not isinstance(cache_mtime, (float, long, int)) or \
675 not isinstance(metadata, dict):
679 cache_mtime, metadata = pkg_data
680 if isinstance(cache_mtime, float):
681 cache_valid = cache_mtime == mydir_stat.st_mtime
683 # Cache may contain integer mtime.
684 cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
687 # Migrate old metadata to unicode.
688 for k, v in metadata.items():
689 metadata[k] = _unicode_decode(v,
690 encoding=_encodings['repo.content'], errors='replace')
692 mydata.update(metadata)
693 pull_me.difference_update(mydata)
696 # pull any needed data and cache it
697 aux_keys = list(pull_me)
698 mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
699 if not cache_valid or cache_these.difference(metadata):
701 if cache_valid and metadata:
702 cache_data.update(metadata)
703 for aux_key in cache_these:
704 cache_data[aux_key] = mydata[aux_key]
705 self._aux_cache["packages"][_unicode(mycpv)] = \
706 (mydir_mtime, cache_data)
707 self._aux_cache["modified"].add(mycpv)
709 eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
710 if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
711 # Empty or invalid slot triggers InvalidAtom exceptions when
712 # generating slot atoms for packages, so translate it to '0' here.
713 mydata['SLOT'] = _unicode_decode('0')
715 return [mydata[x] for x in wants]
717 def _aux_get(self, mycpv, wants, st=None):
718 mydir = self.getpath(mycpv)
723 if e.errno == errno.ENOENT:
724 raise KeyError(mycpv)
725 elif e.errno == PermissionDenied.errno:
726 raise PermissionDenied(mydir)
729 if not stat.S_ISDIR(st.st_mode):
730 raise KeyError(mycpv)
735 results[x] = st[stat.ST_MTIME]
739 _unicode_encode(os.path.join(mydir, x),
740 encoding=_encodings['fs'], errors='strict'),
741 mode='r', encoding=_encodings['repo.content'],
748 if x not in self._aux_cache_keys and \
749 self._aux_cache_keys_re.match(x) is None:
752 myd = _unicode_decode('')
754 # Preserve \n for metadata that is known to
755 # contain multiple lines.
756 if self._aux_multi_line_re.match(x) is None:
757 myd = " ".join(myd.split())
762 env_results = self._aux_env_search(mycpv, env_keys)
764 v = env_results.get(k)
766 v = _unicode_decode('')
767 if self._aux_multi_line_re.match(k) is None:
768 v = " ".join(v.split())
771 if results.get("EAPI") == "":
772 results[_unicode_decode("EAPI")] = _unicode_decode('0')
776 def _aux_env_search(self, cpv, variables):
778 Search environment.bz2 for the specified variables. Returns
779 a dict mapping variables to values, and any variables not
780 found in the environment will not be included in the dict.
781 This is useful for querying variables like ${SRC_URI} and
782 ${A}, which are not saved in separate files but are available
783 in environment.bz2 (see bug #395463).
785 env_file = self.getpath(cpv, filename="environment.bz2")
786 if not os.path.isfile(env_file):
788 bunzip2_cmd = portage.util.shlex_split(
789 self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
791 bunzip2_cmd = portage.util.shlex_split(
792 self.settings["PORTAGE_BZIP2_COMMAND"])
793 bunzip2_cmd.append("-d")
794 args = bunzip2_cmd + ["-c", env_file]
796 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
797 except EnvironmentError as e:
798 if e.errno != errno.ENOENT:
800 raise portage.exception.CommandNotFound(args[0])
802 # Parts of the following code are borrowed from
803 # filter-bash-environment.py (keep them in sync).
804 var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
805 close_quote_re = re.compile(r'(\\"|"|\')\s*$')
806 def have_end_quote(quote, line):
807 close_quote_match = close_quote_re.search(line)
808 return close_quote_match is not None and \
809 close_quote_match.group(1) == quote
811 variables = frozenset(variables)
813 for line in proc.stdout:
814 line = _unicode_decode(line,
815 encoding=_encodings['content'], errors='replace')
816 var_assign_match = var_assign_re.match(line)
817 if var_assign_match is not None:
818 key = var_assign_match.group(2)
819 quote = var_assign_match.group(3)
820 if quote is not None:
821 if have_end_quote(quote,
822 line[var_assign_match.end(2)+2:]):
823 value = var_assign_match.group(4)
825 value = [var_assign_match.group(4)]
826 for line in proc.stdout:
827 line = _unicode_decode(line,
828 encoding=_encodings['content'],
831 if have_end_quote(quote, line):
833 value = ''.join(value)
834 # remove trailing quote and whitespace
835 value = value.rstrip()[:-1]
837 value = var_assign_match.group(4).rstrip()
846 def aux_update(self, cpv, values):
847 mylink = self._dblink(cpv)
848 if not mylink.exists():
850 self._bump_mtime(cpv)
851 self._clear_pkg_cache(mylink)
852 for k, v in values.items():
857 os.unlink(os.path.join(self.getpath(cpv), k))
858 except EnvironmentError:
860 self._bump_mtime(cpv)
862 def counter_tick(self, myroot=None, mycpv=None):
864 @param myroot: ignored, self._eroot is used instead
866 return self.counter_tick_core(incrementing=1, mycpv=mycpv)
868 def get_counter_tick_core(self, myroot=None, mycpv=None):
870 Use this method to retrieve the counter instead
871 of having to trust the value of a global counter
872 file that can lead to invalid COUNTER
873 generation. When cache is valid, the package COUNTER
874 files are not read and we rely on the timestamp of
875 the package directory to validate cache. The stat
876 calls should only take a short time, so performance
877 is sufficient without having to rely on a potentially
878 corrupt global counter file.
880 The global counter file located at
881 $CACHE_PATH/counter serves to record the
882 counter of the last installed package and
883 it also corresponds to the total number of
884 installation actions that have occurred in
885 the history of this package database.
887 @param myroot: ignored, self._eroot is used instead
893 _unicode_encode(self._counter_path,
894 encoding=_encodings['fs'], errors='strict'),
895 mode='r', encoding=_encodings['repo.content'],
897 except EnvironmentError as e:
898 # Silently allow ENOENT since files under
899 # /var/cache/ are allowed to disappear.
900 if e.errno != errno.ENOENT:
901 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
902 self._counter_path, noiselevel=-1)
903 writemsg("!!! %s\n" % str(e), noiselevel=-1)
908 counter = long(cfile.readline().strip())
911 except (OverflowError, ValueError) as e:
912 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
913 self._counter_path, noiselevel=-1)
914 writemsg("!!! %s\n" % str(e), noiselevel=-1)
917 if self._cached_counter == counter:
918 max_counter = counter
920 # We must ensure that we return a counter
921 # value that is at least as large as the
922 # highest one from the installed packages,
923 # since having a corrupt value that is too low
924 # can trigger incorrect AUTOCLEAN behavior due
925 # to newly installed packages having lower
926 # COUNTERs than the previous version in the
928 max_counter = counter
929 for cpv in self.cpv_all():
931 pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
932 except (KeyError, OverflowError, ValueError):
934 if pkg_counter > max_counter:
935 max_counter = pkg_counter
937 return max_counter + 1
939 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
941 This method will grab the next COUNTER value and record it back
942 to the global file. Note that every package install must have
943 a unique counter, since a slotmove update can move two packages
944 into the same SLOT and in that case it's important that both
945 packages have different COUNTER metadata.
947 @param myroot: ignored, self._eroot is used instead
948 @param mycpv: ignored
950 @return: new counter value
956 counter = self.get_counter_tick_core() - 1
960 # update new global counter file
962 write_atomic(self._counter_path, str(counter))
963 except InvalidLocation:
964 self.settings._init_dirs()
965 write_atomic(self._counter_path, str(counter))
966 self._cached_counter = counter
968 # Since we hold a lock, this is a good opportunity
969 # to flush the cache. Note that this will only
970 # flush the cache periodically in the main process
971 # when _aux_cache_threshold is exceeded.
978 def _dblink(self, cpv):
979 category, pf = catsplit(cpv)
980 return dblink(category, pf, settings=self.settings,
981 vartree=self.vartree, treetype="vartree")
983 def removeFromContents(self, pkg, paths, relative_paths=True):
985 @param pkg: cpv for an installed package
987 @param paths: paths of files to remove from contents
988 @type paths: iterable
990 if not hasattr(pkg, "getcontents"):
991 pkg = self._dblink(pkg)
992 root = self.settings['ROOT']
993 root_len = len(root) - 1
994 new_contents = pkg.getcontents().copy()
997 for filename in paths:
998 filename = _unicode_decode(filename,
999 encoding=_encodings['content'], errors='strict')
1000 filename = normalize_path(filename)
1002 relative_filename = filename
1004 relative_filename = filename[root_len:]
1005 contents_key = pkg._match_contents(relative_filename)
1007 del new_contents[contents_key]
1011 self._bump_mtime(pkg.mycpv)
1012 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
1013 write_contents(new_contents, root, f)
1015 self._bump_mtime(pkg.mycpv)
1016 pkg._clear_contents_cache()
1018 class _owners_cache(object):
1020 This class maintains an hash table that serves to index package
1021 contents by mapping the basename of file to a list of possible
1022 packages that own it. This is used to optimize owner lookups
1023 by narrowing the search down to a smaller number of packages.
1026 from hashlib import md5 as _new_hash
1028 from md5 import new as _new_hash
1031 _hex_chars = int(_hash_bits / 4)
1033 def __init__(self, vardb):
1037 eroot_len = len(self._vardb._eroot)
1038 contents = self._vardb._dblink(cpv).getcontents()
1039 pkg_hash = self._hash_pkg(cpv)
1041 # Empty path is a code used to represent empty contents.
1042 self._add_path("", pkg_hash)
1045 self._add_path(x[eroot_len:], pkg_hash)
1047 self._vardb._aux_cache["modified"].add(cpv)
1049 def _add_path(self, path, pkg_hash):
1051 Empty path is a code that represents empty contents.
1054 name = os.path.basename(path.rstrip(os.path.sep))
1059 name_hash = self._hash_str(name)
1060 base_names = self._vardb._aux_cache["owners"]["base_names"]
1061 pkgs = base_names.get(name_hash)
1064 base_names[name_hash] = pkgs
1065 pkgs[pkg_hash] = None
1067 def _hash_str(self, s):
1068 h = self._new_hash()
1069 # Always use a constant utf_8 encoding here, since
1070 # the "default" encoding can change.
1071 h.update(_unicode_encode(s,
1072 encoding=_encodings['repo.content'],
1073 errors='backslashreplace'))
1075 h = h[-self._hex_chars:]
1079 def _hash_pkg(self, cpv):
1080 counter, mtime = self._vardb.aux_get(
1081 cpv, ["COUNTER", "_mtime_"])
1083 counter = int(counter)
1086 return (_unicode(cpv), counter, mtime)
1088 class _owners_db(object):
1090 def __init__(self, vardb):
1096 def _populate(self):
1097 owners_cache = vardbapi._owners_cache(self._vardb)
1098 cached_hashes = set()
1099 base_names = self._vardb._aux_cache["owners"]["base_names"]
1101 # Take inventory of all cached package hashes.
1102 for name, hash_values in list(base_names.items()):
1103 if not isinstance(hash_values, dict):
1104 del base_names[name]
1106 cached_hashes.update(hash_values)
1108 # Create sets of valid package hashes and uncached packages.
1109 uncached_pkgs = set()
1110 hash_pkg = owners_cache._hash_pkg
1111 valid_pkg_hashes = set()
1112 for cpv in self._vardb.cpv_all():
1113 hash_value = hash_pkg(cpv)
1114 valid_pkg_hashes.add(hash_value)
1115 if hash_value not in cached_hashes:
1116 uncached_pkgs.add(cpv)
1118 # Cache any missing packages.
1119 for cpv in uncached_pkgs:
1120 owners_cache.add(cpv)
1122 # Delete any stale cache.
1123 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
1125 for base_name_hash, bucket in list(base_names.items()):
1126 for hash_value in stale_hashes.intersection(bucket):
1127 del bucket[hash_value]
1129 del base_names[base_name_hash]
1133 def get_owners(self, path_iter):
1135 @return the owners as a dblink -> set(files) mapping.
1138 for owner, f in self.iter_owners(path_iter):
1139 owned_files = owners.get(owner)
1140 if owned_files is None:
1142 owners[owner] = owned_files
1146 def getFileOwnerMap(self, path_iter):
1147 owners = self.get_owners(path_iter)
1149 for pkg_dblink, files in owners.items():
1151 owner_set = file_owners.get(f)
1152 if owner_set is None:
1154 file_owners[f] = owner_set
1155 owner_set.add(pkg_dblink)
1158 def iter_owners(self, path_iter):
1160 Iterate over tuples of (dblink, path). In order to avoid
1161 consuming too many resources for too much time, resources
1162 are only allocated for the duration of a given iter_owners()
1163 call. Therefore, to maximize reuse of resources when searching
1164 for multiple files, it's best to search for them all in a single
1168 if not isinstance(path_iter, list):
1169 path_iter = list(path_iter)
1170 owners_cache = self._populate()
1173 hash_pkg = owners_cache._hash_pkg
1174 hash_str = owners_cache._hash_str
1175 base_names = self._vardb._aux_cache["owners"]["base_names"]
1180 x = dblink_cache.get(cpv)
1182 if len(dblink_cache) > 20:
1183 # Ensure that we don't run out of memory.
1184 raise StopIteration()
1185 x = self._vardb._dblink(cpv)
1186 dblink_cache[cpv] = x
1191 path = path_iter.pop()
1192 is_basename = os.sep != path[:1]
1196 name = os.path.basename(path.rstrip(os.path.sep))
1201 name_hash = hash_str(name)
1202 pkgs = base_names.get(name_hash)
1204 if pkgs is not None:
1206 for hash_value in pkgs:
1207 if not isinstance(hash_value, tuple) or \
1208 len(hash_value) != 3:
1210 cpv, counter, mtime = hash_value
1211 if not isinstance(cpv, basestring):
1214 current_hash = hash_pkg(cpv)
1218 if current_hash != hash_value:
1222 for p in dblink(cpv).getcontents():
1223 if os.path.basename(p) == name:
1224 owners.append((cpv, p[len(root):]))
1226 if dblink(cpv).isowner(path):
1227 owners.append((cpv, path))
1229 except StopIteration:
1230 path_iter.append(path)
1232 dblink_cache.clear()
1234 for x in self._iter_owners_low_mem(path_iter):
1238 for cpv, p in owners:
1239 yield (dblink(cpv), p)
1241 def _iter_owners_low_mem(self, path_list):
1243 This implemention will make a short-lived dblink instance (and
1244 parse CONTENTS) for every single installed package. This is
1245 slower and but uses less memory than the method which uses the
1253 for path in path_list:
1254 is_basename = os.sep != path[:1]
1258 name = os.path.basename(path.rstrip(os.path.sep))
1259 path_info_list.append((path, name, is_basename))
1261 root = self._vardb._eroot
1262 for cpv in self._vardb.cpv_all():
1263 dblnk = self._vardb._dblink(cpv)
1265 for path, name, is_basename in path_info_list:
1267 for p in dblnk.getcontents():
1268 if os.path.basename(p) == name:
1269 yield dblnk, p[len(root):]
1271 if dblnk.isowner(path):
1274 class vartree(object):
1275 "this tree will scan a var/db/pkg database located at root (passed to init)"
1276 def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
1279 if settings is None:
1280 settings = portage.settings
1282 if root is not None and root != settings['ROOT']:
1283 warnings.warn("The 'root' parameter of the "
1284 "portage.dbapi.vartree.vartree"
1285 " constructor is now unused. Use "
1286 "settings['ROOT'] instead.",
1287 DeprecationWarning, stacklevel=2)
1289 if virtual is not DeprecationWarning:
1290 warnings.warn("The 'virtual' parameter of the "
1291 "portage.dbapi.vartree.vartree"
1292 " constructor is unused",
1293 DeprecationWarning, stacklevel=2)
1295 self.settings = settings
1296 self.dbapi = vardbapi(settings=settings, vartree=self)
1301 warnings.warn("The root attribute of "
1302 "portage.dbapi.vartree.vartree"
1303 " is deprecated. Use "
1304 "settings['ROOT'] instead.",
1305 DeprecationWarning, stacklevel=3)
1306 return self.settings['ROOT']
1308 def getpath(self, mykey, filename=None):
1309 return self.dbapi.getpath(mykey, filename=filename)
1311 def zap(self, mycpv):
1314 def inject(self, mycpv):
1317 def get_provide(self, mycpv):
1321 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1323 myuse = myuse.split()
1324 mylines = use_reduce(mylines, uselist=myuse, flat=True)
1325 for myprovide in mylines:
1326 mys = catpkgsplit(myprovide)
1328 mys = myprovide.split("/")
1329 myprovides += [mys[0] + "/" + mys[1]]
1331 except SystemExit as e:
1333 except Exception as e:
1334 mydir = self.dbapi.getpath(mycpv)
1335 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1338 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1340 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1343 def get_all_provides(self):
1345 for node in self.getallcpv():
1346 for mykey in self.get_provide(node):
1347 if mykey in myprovides:
1348 myprovides[mykey] += [node]
1350 myprovides[mykey] = [node]
1353 def dep_bestmatch(self, mydep, use_cache=1):
1354 "compatibility method -- all matches, not just visible ones"
1355 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1356 mymatch = best(self.dbapi.match(
1357 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1358 use_cache=use_cache))
1364 def dep_match(self, mydep, use_cache=1):
1365 "compatibility method -- we want to see all matches, not just visible ones"
1366 #mymatch = match(mydep,self.dbapi)
1367 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1373 def exists_specific(self, cpv):
1374 return self.dbapi.cpv_exists(cpv)
1376 def getallcpv(self):
1377 """temporary function, probably to be renamed --- Gets a list of all
1378 category/package-versions installed on the system."""
1379 return self.dbapi.cpv_all()
1381 def getallnodes(self):
1382 """new behavior: these are all *unmasked* nodes. There may or may not be available
1383 masked package for nodes in this nodes list."""
1384 return self.dbapi.cp_all()
1386 def getebuildpath(self, fullpackage):
1387 cat, package = catsplit(fullpackage)
1388 return self.getpath(fullpackage, filename=package+".ebuild")
1390 def getslot(self, mycatpkg):
1391 "Get a slot for a catpkg; assume it exists."
1393 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1400 class dblink(object):
1402 This class provides an interface to the installed package database
1403 At present this is implemented as a text backend in /var/db/pkg.
1407 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1409 _contents_re = re.compile(r'^(' + \
1410 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1411 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1412 r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
1413 r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
1417 # These files are generated by emerge, so we need to remove
1418 # them when they are the only thing left in a directory.
1419 _infodir_cleanup = frozenset(["dir", "dir.old"])
1421 _ignored_unlink_errnos = (
1422 errno.EBUSY, errno.ENOENT,
1423 errno.ENOTDIR, errno.EISDIR)
1425 _ignored_rmdir_errnos = (
1426 errno.EEXIST, errno.ENOTEMPTY,
1427 errno.EBUSY, errno.ENOENT,
1428 errno.ENOTDIR, errno.EISDIR,
1431 def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
1432 vartree=None, blockers=None, scheduler=None, pipe=None):
1434 Creates a DBlink object for a given CPV.
1435 The given CPV may not be present in the database already.
1437 @param cat: Category
1439 @param pkg: Package (PV)
1441 @param myroot: ignored, settings['ROOT'] is used instead
1442 @type myroot: String (Path)
1443 @param settings: Typically portage.settings
1444 @type settings: portage.config
1445 @param treetype: one of ['porttree','bintree','vartree']
1446 @type treetype: String
1447 @param vartree: an instance of vartree corresponding to myroot.
1448 @type vartree: vartree
1451 if settings is None:
1452 raise TypeError("settings argument is required")
1454 mysettings = settings
1455 self._eroot = mysettings['EROOT']
1458 self.mycpv = self.cat + "/" + self.pkg
1459 if self.mycpv == settings.mycpv and \
1460 isinstance(settings.mycpv, _pkg_str):
1461 self.mycpv = settings.mycpv
1463 self.mycpv = _pkg_str(self.mycpv)
1464 self.mysplit = list(self.mycpv.cpv_split[1:])
1465 self.mysplit[0] = self.mycpv.cp
1466 self.treetype = treetype
1468 vartree = portage.db[self._eroot]["vartree"]
1469 self.vartree = vartree
1470 self._blockers = blockers
1471 self._scheduler = scheduler
1472 self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
1473 self.dbcatdir = self.dbroot+"/"+cat
1474 self.dbpkgdir = self.dbcatdir+"/"+pkg
1475 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1476 self.dbdir = self.dbpkgdir
1477 self.settings = mysettings
1478 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1480 self.myroot = self.settings['ROOT']
1481 self._installed_instance = None
1482 self.contentscache = None
1483 self._contents_inodes = None
1484 self._contents_basenames = None
1485 self._linkmap_broken = False
1486 self._hardlink_merge_map = {}
1487 self._hash_key = (self._eroot, self.mycpv)
1488 self._protect_obj = None
1492 return hash(self._hash_key)
1494 def __eq__(self, other):
1495 return isinstance(other, dblink) and \
1496 self._hash_key == other._hash_key
1498 def _get_protect_obj(self):
1500 if self._protect_obj is None:
1501 self._protect_obj = ConfigProtect(self._eroot,
1502 portage.util.shlex_split(
1503 self.settings.get("CONFIG_PROTECT", "")),
1504 portage.util.shlex_split(
1505 self.settings.get("CONFIG_PROTECT_MASK", "")))
1507 return self._protect_obj
1509 def isprotected(self, obj):
1510 return self._get_protect_obj().isprotected(obj)
1512 def updateprotect(self):
1513 self._get_protect_obj().updateprotect()
1516 self.vartree.dbapi.lock()
1519 self.vartree.dbapi.unlock()
1522 "return path to location of db information (for >>> informational display)"
1526 "does the db entry exist? boolean."
1527 return os.path.exists(self.dbdir)
1531 Remove this entry from the database
1533 if not os.path.exists(self.dbdir):
1536 # Check validity of self.dbdir before attempting to remove it.
1537 if not self.dbdir.startswith(self.dbroot):
1538 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1539 self.dbdir, noiselevel=-1)
1542 shutil.rmtree(self.dbdir)
1543 # If empty, remove parent category directory.
1545 os.rmdir(os.path.dirname(self.dbdir))
1548 self.vartree.dbapi._remove(self)
1550 def clearcontents(self):
1552 For a given db entry (self), erase the CONTENTS values.
1556 if os.path.exists(self.dbdir+"/CONTENTS"):
1557 os.unlink(self.dbdir+"/CONTENTS")
1561 def _clear_contents_cache(self):
1562 self.contentscache = None
1563 self._contents_inodes = None
1564 self._contents_basenames = None
1566 def getcontents(self):
1568 Get the installed files of a given package (aka what that package installed)
1570 contents_file = os.path.join(self.dbdir, "CONTENTS")
1571 if self.contentscache is not None:
1572 return self.contentscache
1575 myc = io.open(_unicode_encode(contents_file,
1576 encoding=_encodings['fs'], errors='strict'),
1577 mode='r', encoding=_encodings['repo.content'],
1579 except EnvironmentError as e:
1580 if e.errno != errno.ENOENT:
1583 self.contentscache = pkgfiles
1585 mylines = myc.readlines()
1588 normalize_needed = self._normalize_needed
1589 contents_re = self._contents_re
1590 obj_index = contents_re.groupindex['obj']
1591 dir_index = contents_re.groupindex['dir']
1592 sym_index = contents_re.groupindex['sym']
1593 # The old symlink format may exist on systems that have packages
1594 # which were installed many years ago (see bug #351814).
1595 oldsym_index = contents_re.groupindex['oldsym']
1596 # CONTENTS files already contain EPREFIX
1597 myroot = self.settings['ROOT']
1598 if myroot == os.path.sep:
1600 # used to generate parent dir entries
1601 dir_entry = (_unicode_decode("dir"),)
1602 eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
1605 for pos, line in enumerate(mylines):
1606 if null_byte in line:
1607 # Null bytes are a common indication of corruption.
1608 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
1610 line = line.rstrip("\n")
1611 m = contents_re.match(line)
1613 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
1616 if m.group(obj_index) is not None:
1618 #format: type, mtime, md5sum
1619 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1620 elif m.group(dir_index) is not None:
1623 data = (m.group(base+1),)
1624 elif m.group(sym_index) is not None:
1626 if m.group(oldsym_index) is None:
1627 mtime = m.group(base+5)
1629 mtime = m.group(base+8)
1630 #format: type, mtime, dest
1631 data = (m.group(base+1), mtime, m.group(base+3))
1633 # This won't happen as long the regular expression
1634 # is written to only match valid entries.
1635 raise AssertionError(_("required group not found " + \
1636 "in CONTENTS entry: '%s'") % line)
1638 path = m.group(base+2)
1639 if normalize_needed.search(path) is not None:
1640 path = normalize_path(path)
1641 if not path.startswith(os.path.sep):
1642 path = os.path.sep + path
1644 if myroot is not None:
1645 path = os.path.join(myroot, path.lstrip(os.path.sep))
1647 # Implicitly add parent directories, since we can't necessarily
1648 # assume that they are explicitly listed in CONTENTS, and it's
1649 # useful for callers if they can rely on parent directory entries
1650 # being generated here (crucial for things like dblink.isowner()).
1651 path_split = path.split(os.sep)
1653 while len(path_split) > eroot_split_len:
1654 parent = os.sep.join(path_split)
1655 if parent in pkgfiles:
1657 pkgfiles[parent] = dir_entry
1660 pkgfiles[path] = data
1663 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
1664 for pos, e in errors:
1665 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
1666 self.contentscache = pkgfiles
1669 def _prune_plib_registry(self, unmerge=False,
1670 needed=None, preserve_paths=None):
1671 # remove preserved libraries that don't have any consumers left
1672 if not (self._linkmap_broken or
1673 self.vartree.dbapi._linkmap is None or
1674 self.vartree.dbapi._plib_registry is None):
1675 self.vartree.dbapi._fs_lock()
1676 plib_registry = self.vartree.dbapi._plib_registry
1677 plib_registry.lock()
1679 plib_registry.load()
1681 unmerge_with_replacement = \
1682 unmerge and preserve_paths is not None
1683 if unmerge_with_replacement:
1684 # If self.mycpv is about to be unmerged and we
1685 # have a replacement package, we want to exclude
1686 # the irrelevant NEEDED data that belongs to
1687 # files which are being unmerged now.
1688 exclude_pkgs = (self.mycpv,)
1692 self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
1693 include_file=needed, preserve_paths=preserve_paths)
1696 unmerge_preserve = None
1697 if not unmerge_with_replacement:
1698 unmerge_preserve = \
1699 self._find_libs_to_preserve(unmerge=True)
1700 counter = self.vartree.dbapi.cpv_counter(self.mycpv)
1701 plib_registry.unregister(self.mycpv,
1702 self.settings["SLOT"], counter)
1703 if unmerge_preserve:
1704 for path in sorted(unmerge_preserve):
1705 contents_key = self._match_contents(path)
1706 if not contents_key:
1708 obj_type = self.getcontents()[contents_key][0]
1709 self._display_merge(_(">>> needed %s %s\n") % \
1710 (obj_type, contents_key), noiselevel=-1)
1711 plib_registry.register(self.mycpv,
1712 self.settings["SLOT"], counter, unmerge_preserve)
1713 # Remove the preserved files from our contents
1714 # so that they won't be unmerged.
1715 self.vartree.dbapi.removeFromContents(self,
1718 unmerge_no_replacement = \
1719 unmerge and not unmerge_with_replacement
1720 cpv_lib_map = self._find_unused_preserved_libs(
1721 unmerge_no_replacement)
1723 self._remove_preserved_libs(cpv_lib_map)
1724 self.vartree.dbapi.lock()
1726 for cpv, removed in cpv_lib_map.items():
1727 if not self.vartree.dbapi.cpv_exists(cpv):
1729 self.vartree.dbapi.removeFromContents(cpv, removed)
1731 self.vartree.dbapi.unlock()
1733 plib_registry.store()
1735 plib_registry.unlock()
1736 self.vartree.dbapi._fs_unlock()
1738 def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
1739 ldpath_mtimes=None, others_in_slot=None, needed=None,
1740 preserve_paths=None):
1743 Unmerges a given package (CPV)
1748 @param pkgfiles: files to unmerge (generally self.getcontents() )
1749 @type pkgfiles: Dictionary
1750 @param trimworld: Unused
1751 @type trimworld: Boolean
1752 @param cleanup: cleanup to pass to doebuild (see doebuild)
1753 @type cleanup: Boolean
1754 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1755 @type ldpath_mtimes: Dictionary
1756 @param others_in_slot: all dblink instances in this slot, excluding self
1757 @type others_in_slot: list
1758 @param needed: Filename containing libraries needed after unmerge.
1759 @type needed: String
1760 @param preserve_paths: Libraries preserved by a package instance that
1761 is currently being merged. They need to be explicitly passed to the
1762 LinkageMap, since they are not registered in the
1763 PreservedLibsRegistry yet.
1764 @type preserve_paths: set
1767 1. os.EX_OK if everything went well.
1768 2. return code of the failed phase (for prerm, postrm, cleanrm)
1771 if trimworld is not None:
1772 warnings.warn("The trimworld parameter of the " + \
1773 "portage.dbapi.vartree.dblink.unmerge()" + \
1774 " method is now unused.",
1775 DeprecationWarning, stacklevel=2)
1778 log_path = self.settings.get("PORTAGE_LOG_FILE")
1779 if self._scheduler is None:
1780 # We create a scheduler instance and use it to
1781 # log unmerge output separately from merge output.
1782 self._scheduler = PollScheduler().sched_iface
1783 if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
1784 if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
1785 self.settings["PORTAGE_BACKGROUND"] = "1"
1786 self.settings.backup_changes("PORTAGE_BACKGROUND")
1788 elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
1789 self.settings["PORTAGE_BACKGROUND"] = "0"
1790 self.settings.backup_changes("PORTAGE_BACKGROUND")
1791 elif self.settings.get("PORTAGE_BACKGROUND") == "1":
1794 self.vartree.dbapi._bump_mtime(self.mycpv)
1795 showMessage = self._display_merge
1796 if self.vartree.dbapi._categories is not None:
1797 self.vartree.dbapi._categories = None
1799 # When others_in_slot is not None, the backup has already been
1800 # handled by the caller.
1801 caller_handles_backup = others_in_slot is not None
1803 # When others_in_slot is supplied, the security check has already been
1804 # done for this slot, so it shouldn't be repeated until the next
1805 # replacement or unmerge operation.
1806 if others_in_slot is None:
1807 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1808 slot_matches = self.vartree.dbapi.match(
1809 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1811 for cur_cpv in slot_matches:
1812 if cur_cpv == self.mycpv:
1814 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1815 settings=self.settings, vartree=self.vartree,
1816 treetype="vartree", pipe=self._pipe))
1818 retval = self._security_check([self] + others_in_slot)
1822 contents = self.getcontents()
1823 # Now, don't assume that the name of the ebuild is the same as the
1824 # name of the dir; the package may have been moved.
1825 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1827 ebuild_phase = "prerm"
1828 mystuff = os.listdir(self.dbdir)
1830 if x.endswith(".ebuild"):
1831 if x[:-7] != self.pkg:
1832 # Clean up after vardbapi.move_ent() breakage in
1833 # portage versions before 2.1.2
1834 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1835 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1838 if self.mycpv != self.settings.mycpv or \
1839 "EAPI" not in self.settings.configdict["pkg"]:
1840 # We avoid a redundant setcpv call here when
1841 # the caller has already taken care of it.
1842 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1844 eapi_unsupported = False
1846 doebuild_environment(myebuildpath, "prerm",
1847 settings=self.settings, db=self.vartree.dbapi)
1848 except UnsupportedAPIException as e:
1849 eapi_unsupported = e
1851 builddir_lock = None
1852 scheduler = self._scheduler
1855 # Only create builddir_lock if the caller
1856 # has not already acquired the lock.
1857 if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
1858 builddir_lock = EbuildBuildDir(
1859 scheduler=scheduler,
1860 settings=self.settings)
1861 builddir_lock.lock()
1862 prepare_build_dirs(settings=self.settings, cleanup=True)
1863 log_path = self.settings.get("PORTAGE_LOG_FILE")
1865 # Do this before the following _prune_plib_registry call, since
1866 # that removes preserved libraries from our CONTENTS, and we
1867 # may want to backup those libraries first.
1868 if not caller_handles_backup:
1869 retval = self._pre_unmerge_backup(background)
1870 if retval != os.EX_OK:
1871 showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
1872 level=logging.ERROR, noiselevel=-1)
1875 self._prune_plib_registry(unmerge=True, needed=needed,
1876 preserve_paths=preserve_paths)
1878 # Log the error after PORTAGE_LOG_FILE is initialized
1879 # by prepare_build_dirs above.
1880 if eapi_unsupported:
1881 # Sometimes this happens due to corruption of the EAPI file.
1883 showMessage(_("!!! FAILED prerm: %s\n") % \
1884 os.path.join(self.dbdir, "EAPI"),
1885 level=logging.ERROR, noiselevel=-1)
1886 showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
1887 level=logging.ERROR, noiselevel=-1)
1888 elif os.path.isfile(myebuildpath):
1889 phase = EbuildPhase(background=background,
1890 phase=ebuild_phase, scheduler=scheduler,
1891 settings=self.settings)
1893 retval = phase.wait()
1895 # XXX: Decide how to handle failures here.
1896 if retval != os.EX_OK:
1898 showMessage(_("!!! FAILED prerm: %s\n") % retval,
1899 level=logging.ERROR, noiselevel=-1)
1901 self.vartree.dbapi._fs_lock()
1903 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1905 self.vartree.dbapi._fs_unlock()
1906 self._clear_contents_cache()
1908 if not eapi_unsupported and os.path.isfile(myebuildpath):
1909 ebuild_phase = "postrm"
1910 phase = EbuildPhase(background=background,
1911 phase=ebuild_phase, scheduler=scheduler,
1912 settings=self.settings)
1914 retval = phase.wait()
1916 # XXX: Decide how to handle failures here.
1917 if retval != os.EX_OK:
1919 showMessage(_("!!! FAILED postrm: %s\n") % retval,
1920 level=logging.ERROR, noiselevel=-1)
1923 self.vartree.dbapi._bump_mtime(self.mycpv)
1925 if not eapi_unsupported and os.path.isfile(myebuildpath):
1926 if retval != os.EX_OK:
1928 msg = _("The '%(ebuild_phase)s' "
1929 "phase of the '%(cpv)s' package "
1930 "has failed with exit value %(retval)s.") % \
1931 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
1933 from textwrap import wrap
1934 msg_lines.extend(wrap(msg, 72))
1935 msg_lines.append("")
1937 ebuild_name = os.path.basename(myebuildpath)
1938 ebuild_dir = os.path.dirname(myebuildpath)
1939 msg = _("The problem occurred while executing "
1940 "the ebuild file named '%(ebuild_name)s' "
1941 "located in the '%(ebuild_dir)s' directory. "
1942 "If necessary, manually remove "
1943 "the environment.bz2 file and/or the "
1944 "ebuild file located in that directory.") % \
1945 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
1946 msg_lines.extend(wrap(msg, 72))
1947 msg_lines.append("")
1950 "of the environment.bz2 file is "
1951 "preferred since it may allow the "
1952 "removal phases to execute successfully. "
1953 "The ebuild will be "
1954 "sourced and the eclasses "
1955 "from the current portage tree will be used "
1956 "when necessary. Removal of "
1957 "the ebuild file will cause the "
1958 "pkg_prerm() and pkg_postrm() removal "
1959 "phases to be skipped entirely.")
1960 msg_lines.extend(wrap(msg, 72))
1962 self._eerror(ebuild_phase, msg_lines)
1964 self._elog_process(phasefilter=("prerm", "postrm"))
1966 if retval == os.EX_OK:
1968 doebuild_environment(myebuildpath, "cleanrm",
1969 settings=self.settings, db=self.vartree.dbapi)
1970 except UnsupportedAPIException:
1972 phase = EbuildPhase(background=background,
1973 phase="cleanrm", scheduler=scheduler,
1974 settings=self.settings)
1976 retval = phase.wait()
1978 if builddir_lock is not None:
1979 builddir_lock.unlock()
1981 if log_path is not None:
1983 if not failures and 'unmerge-logs' not in self.settings.features:
1990 st = os.stat(log_path)
2000 if log_path is not None and os.path.exists(log_path):
2001 # Restore this since it gets lost somewhere above and it
2002 # needs to be set for _display_merge() to be able to log.
2003 # Note that the log isn't necessarily supposed to exist
2004 # since if PORT_LOGDIR is unset then it's a temp file
2005 # so it gets cleaned above.
2006 self.settings["PORTAGE_LOG_FILE"] = log_path
2008 self.settings.pop("PORTAGE_LOG_FILE", None)
2010 env_update(target_root=self.settings['ROOT'],
2011 prev_mtimes=ldpath_mtimes,
2012 contents=contents, env=self.settings,
2013 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
2015 unmerge_with_replacement = preserve_paths is not None
2016 if not unmerge_with_replacement:
2017 # When there's a replacement package which calls us via treewalk,
2018 # treewalk will automatically call _prune_plib_registry for us.
2019 # Otherwise, we need to call _prune_plib_registry ourselves.
2020 # Don't pass in the "unmerge=True" flag here, since that flag
2021 # is intended to be used _prior_ to unmerge, not after.
2022 self._prune_plib_registry()
2026 def _display_merge(self, msg, level=0, noiselevel=0):
2027 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
2029 if self._scheduler is None:
2030 writemsg_level(msg, level=level, noiselevel=noiselevel)
2033 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
2034 log_path = self.settings.get("PORTAGE_LOG_FILE")
2035 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
2037 if background and log_path is None:
2038 if level >= logging.WARN:
2039 writemsg_level(msg, level=level, noiselevel=noiselevel)
2041 self._scheduler.output(msg,
2042 log_path=log_path, background=background,
2043 level=level, noiselevel=noiselevel)
2045 def _show_unmerge(self, zing, desc, file_type, file_name):
2046 self._display_merge("%s %s %s %s\n" % \
2047 (zing, desc.ljust(8), file_type, file_name))
2049 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
2052 Unmerges the contents of a package from the liveFS
2053 Removes the VDB entry for self
2055 @param pkgfiles: typically self.getcontents()
2056 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
2057 @param others_in_slot: all dblink instances in this slot, excluding self
2058 @type others_in_slot: list
2063 perf_md5 = perform_md5
2064 showMessage = self._display_merge
2065 show_unmerge = self._show_unmerge
2066 ignored_unlink_errnos = self._ignored_unlink_errnos
2067 ignored_rmdir_errnos = self._ignored_rmdir_errnos
2070 showMessage(_("No package files given... Grabbing a set.\n"))
2071 pkgfiles = self.getcontents()
2073 if others_in_slot is None:
2075 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
2076 slot_matches = self.vartree.dbapi.match(
2077 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
2078 for cur_cpv in slot_matches:
2079 if cur_cpv == self.mycpv:
2081 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2082 settings=self.settings,
2083 vartree=self.vartree, treetype="vartree", pipe=self._pipe))
2085 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
2087 protected_symlinks = {}
2089 unmerge_orphans = "unmerge-orphans" in self.settings.features
2090 calc_prelink = "prelink-checksums" in self.settings.features
2093 self.updateprotect()
2094 mykeys = list(pkgfiles)
2098 #process symlinks second-to-last, directories last.
2101 uninstall_ignore = portage.util.shlex_split(
2102 self.settings.get("UNINSTALL_IGNORE", ""))
2104 def unlink(file_name, lstatobj):
2106 if lstatobj.st_flags != 0:
2107 bsd_chflags.lchflags(file_name, 0)
2108 parent_name = os.path.dirname(file_name)
2109 # Use normal stat/chflags for the parent since we want to
2110 # follow any symlinks to the real parent directory.
2111 pflags = os.stat(parent_name).st_flags
2113 bsd_chflags.chflags(parent_name, 0)
2115 if not stat.S_ISLNK(lstatobj.st_mode):
2116 # Remove permissions to ensure that any hardlinks to
2117 # suid/sgid files are rendered harmless.
2118 os.chmod(file_name, 0)
2119 os.unlink(file_name)
2120 except OSError as ose:
2121 # If the chmod or unlink fails, you are in trouble.
2122 # With Prefix this can be because the file is owned
2123 # by someone else (a screwup by root?), on a normal
2124 # system maybe filesystem corruption. In any case,
2125 # if we backtrace and die here, we leave the system
2126 # in a totally undefined state, hence we just bleed
2127 # like hell and continue to hopefully finish all our
2128 # administrative and pkg_postinst stuff.
2129 self._eerror("postrm",
2130 ["Could not chmod or unlink '%s': %s" % \
2133 if bsd_chflags and pflags != 0:
2134 # Restore the parent flags we saved before unlinking
2135 bsd_chflags.chflags(parent_name, pflags)
2138 unmerge_desc["cfgpro"] = _("cfgpro")
2139 unmerge_desc["replaced"] = _("replaced")
2140 unmerge_desc["!dir"] = _("!dir")
2141 unmerge_desc["!empty"] = _("!empty")
2142 unmerge_desc["!fif"] = _("!fif")
2143 unmerge_desc["!found"] = _("!found")
2144 unmerge_desc["!md5"] = _("!md5")
2145 unmerge_desc["!mtime"] = _("!mtime")
2146 unmerge_desc["!obj"] = _("!obj")
2147 unmerge_desc["!sym"] = _("!sym")
2148 unmerge_desc["!prefix"] = _("!prefix")
2150 real_root = self.settings['ROOT']
2151 real_root_len = len(real_root) - 1
2152 eroot = self.settings["EROOT"]
2154 infodirs = frozenset(infodir for infodir in chain(
2155 self.settings.get("INFOPATH", "").split(":"),
2156 self.settings.get("INFODIR", "").split(":")) if infodir)
2157 infodirs_inodes = set()
2158 for infodir in infodirs:
2159 infodir = os.path.join(real_root, infodir.lstrip(os.sep))
2161 statobj = os.stat(infodir)
2165 infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
2167 for i, objkey in enumerate(mykeys):
2169 obj = normalize_path(objkey)
2172 _unicode_encode(obj,
2173 encoding=_encodings['merge'], errors='strict')
2174 except UnicodeEncodeError:
2175 # The package appears to have been merged with a
2176 # different value of sys.getfilesystemencoding(),
2177 # so fall back to utf_8 if appropriate.
2179 _unicode_encode(obj,
2180 encoding=_encodings['fs'], errors='strict')
2181 except UnicodeEncodeError:
2185 perf_md5 = portage.checksum.perform_md5
2187 file_data = pkgfiles[objkey]
2188 file_type = file_data[0]
2190 # don't try to unmerge the prefix offset itself
2191 if len(obj) <= len(eroot) or not obj.startswith(eroot):
2192 show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
2197 statobj = os.stat(obj)
2202 lstatobj = os.lstat(obj)
2203 except (OSError, AttributeError):
2205 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
2206 if lstatobj is None:
2207 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
2210 f_match = obj[len(eroot)-1:]
2212 for pattern in uninstall_ignore:
2213 if fnmatch.fnmatch(f_match, pattern):
2218 if islink and f_match in \
2219 ("/lib", "/usr/lib", "/usr/local/lib"):
2220 # Ignore libdir symlinks for bug #423127.
2224 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
2227 # don't use EROOT, CONTENTS entries already contain EPREFIX
2228 if obj.startswith(real_root):
2229 relative_path = obj[real_root_len:]
2231 for dblnk in others_in_slot:
2232 if dblnk.isowner(relative_path):
2236 if is_owned and islink and \
2237 file_type in ("sym", "dir") and \
2238 statobj and stat.S_ISDIR(statobj.st_mode):
2239 # A new instance of this package claims the file, so
2240 # don't unmerge it. If the file is symlink to a
2241 # directory and the unmerging package installed it as
2242 # a symlink, but the new owner has it listed as a
2243 # directory, then we'll produce a warning since the
2244 # symlink is a sort of orphan in this case (see
2246 symlink_orphan = False
2247 for dblnk in others_in_slot:
2248 parent_contents_key = \
2249 dblnk._match_contents(relative_path)
2250 if not parent_contents_key:
2252 if not parent_contents_key.startswith(
2255 if dblnk.getcontents()[
2256 parent_contents_key][0] == "dir":
2257 symlink_orphan = True
2261 protected_symlinks.setdefault(
2262 (statobj.st_dev, statobj.st_ino),
2263 []).append(relative_path)
2266 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
2268 elif relative_path in cfgfiledict:
2269 stale_confmem.append(relative_path)
2271 # Don't unlink symlinks to directories here since that can
2272 # remove /lib and /usr/lib symlinks.
2273 if unmerge_orphans and \
2274 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
2275 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
2276 not self.isprotected(obj):
2278 unlink(obj, lstatobj)
2279 except EnvironmentError as e:
2280 if e.errno not in ignored_unlink_errnos:
2283 show_unmerge("<<<", "", file_type, obj)
2286 lmtime = str(lstatobj[stat.ST_MTIME])
2287 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2288 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
2291 if file_type == "dir" and not islink:
2292 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
2293 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
2295 mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
2296 elif file_type == "sym" or (file_type == "dir" and islink):
2298 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
2301 # If this symlink points to a directory then we don't want
2302 # to unmerge it if there are any other packages that
2303 # installed files into the directory via this symlink
2304 # (see bug #326685).
2305 # TODO: Resolving a symlink to a directory will require
2306 # simulation if $ROOT != / and the link is not relative.
2307 if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
2308 and obj.startswith(real_root):
2310 relative_path = obj[real_root_len:]
2312 target_dir_contents = os.listdir(obj)
2316 if target_dir_contents:
2317 # If all the children are regular files owned
2318 # by this package, then the symlink should be
2321 for child in target_dir_contents:
2322 child = os.path.join(relative_path, child)
2323 if not self.isowner(child):
2327 child_lstat = os.lstat(os.path.join(
2328 real_root, child.lstrip(os.sep)))
2332 if not stat.S_ISREG(child_lstat.st_mode):
2333 # Nested symlinks or directories make
2334 # the issue very complex, so just
2335 # preserve the symlink in order to be
2341 protected_symlinks.setdefault(
2342 (statobj.st_dev, statobj.st_ino),
2343 []).append(relative_path)
2344 show_unmerge("---", unmerge_desc["!empty"],
2348 # Go ahead and unlink symlinks to directories here when
2349 # they're actually recorded as symlinks in the contents.
2350 # Normally, symlinks such as /lib -> lib64 are not recorded
2351 # as symlinks in the contents of a package. If a package
2352 # installs something into ${D}/lib/, it is recorded in the
2353 # contents as a directory even if it happens to correspond
2354 # to a symlink when it's merged to the live filesystem.
2356 unlink(obj, lstatobj)
2357 show_unmerge("<<<", "", file_type, obj)
2358 except (OSError, IOError) as e:
2359 if e.errno not in ignored_unlink_errnos:
2362 show_unmerge("!!!", "", file_type, obj)
2363 elif pkgfiles[objkey][0] == "obj":
2364 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2365 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2369 mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
2370 except FileNotFound as e:
2371 # the file has disappeared between now and our stat call
2372 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2375 # string.lower is needed because db entries used to be in upper-case. The
2376 # string.lower allows for backwards compatibility.
2377 if mymd5 != pkgfiles[objkey][2].lower():
2378 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
2381 unlink(obj, lstatobj)
2382 except (OSError, IOError) as e:
2383 if e.errno not in ignored_unlink_errnos:
2386 show_unmerge("<<<", "", file_type, obj)
2387 elif pkgfiles[objkey][0] == "fif":
2388 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2389 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
2391 show_unmerge("---", "", file_type, obj)
2392 elif pkgfiles[objkey][0] == "dev":
2393 show_unmerge("---", "", file_type, obj)
2395 self._unmerge_dirs(mydirs, infodirs_inodes,
2396 protected_symlinks, unmerge_desc, unlink, os)
2399 if protected_symlinks:
2400 self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
2401 protected_symlinks, unmerge_desc, unlink, os)
2403 if protected_symlinks:
2404 msg = "One or more symlinks to directories have been " + \
2405 "preserved in order to ensure that files installed " + \
2406 "via these symlinks remain accessible. " + \
2407 "This indicates that the mentioned symlink(s) may " + \
2408 "be obsolete remnants of an old install, and it " + \
2409 "may be appropriate to replace a given symlink " + \
2410 "with the directory that it points to."
2411 lines = textwrap.wrap(msg, 72)
2414 flat_list.update(*protected_symlinks.values())
2415 flat_list = sorted(flat_list)
2417 lines.append("\t%s" % (os.path.join(real_root,
2420 self._elog("elog", "postrm", lines)
2422 # Remove stale entries from config memory.
2424 for filename in stale_confmem:
2425 del cfgfiledict[filename]
2426 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
2428 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2429 self.vartree.zap(self.mycpv)
2431 def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
2432 protected_symlinks, unmerge_desc, unlink, os):
2434 real_root = self.settings['ROOT']
2435 show_unmerge = self._show_unmerge
2436 ignored_unlink_errnos = self._ignored_unlink_errnos
2439 flat_list.update(*protected_symlinks.values())
2440 flat_list = sorted(flat_list)
2443 for dblnk in others_in_slot:
2444 if dblnk.isowner(f):
2445 # If another package in the same slot installed
2446 # a file via a protected symlink, return early
2447 # and don't bother searching for any other owners.
2452 msg.append(_("Directory symlink(s) may need protection:"))
2456 msg.append("\t%s" % \
2457 os.path.join(real_root, f.lstrip(os.path.sep)))
2460 msg.append(_("Searching all installed"
2461 " packages for files installed via above symlink(s)..."))
2463 self._elog("elog", "postrm", msg)
2467 owners = self.vartree.dbapi._owners.get_owners(flat_list)
2468 self.vartree.dbapi.flush_cache()
2472 for owner in list(owners):
2473 if owner.mycpv == self.mycpv:
2474 owners.pop(owner, None)
2478 msg.append(_("The above directory symlink(s) are all "
2479 "safe to remove. Removing them now..."))
2481 self._elog("elog", "postrm", msg)
2483 for unmerge_syms in protected_symlinks.values():
2484 for relative_path in unmerge_syms:
2485 obj = os.path.join(real_root,
2486 relative_path.lstrip(os.sep))
2487 parent = os.path.dirname(obj)
2488 while len(parent) > len(self._eroot):
2490 lstatobj = os.lstat(parent)
2495 (lstatobj.st_dev, lstatobj.st_ino)))
2496 parent = os.path.dirname(parent)
2498 unlink(obj, os.lstat(obj))
2499 show_unmerge("<<<", "", "sym", obj)
2500 except (OSError, IOError) as e:
2501 if e.errno not in ignored_unlink_errnos:
2504 show_unmerge("!!!", "", "sym", obj)
2506 protected_symlinks.clear()
2507 self._unmerge_dirs(dirs, infodirs_inodes,
2508 protected_symlinks, unmerge_desc, unlink, os)
2511 def _unmerge_dirs(self, dirs, infodirs_inodes,
2512 protected_symlinks, unmerge_desc, unlink, os):
2514 show_unmerge = self._show_unmerge
2515 infodir_cleanup = self._infodir_cleanup
2516 ignored_unlink_errnos = self._ignored_unlink_errnos
2517 ignored_rmdir_errnos = self._ignored_rmdir_errnos
2518 real_root = self.settings['ROOT']
2523 for obj, inode_key in dirs:
2524 # Treat any directory named "info" as a candidate here,
2525 # since it might have been in INFOPATH previously even
2526 # though it may not be there now.
2527 if inode_key in infodirs_inodes or \
2528 os.path.basename(obj) == "info":
2530 remaining = os.listdir(obj)
2534 cleanup_info_dir = ()
2536 len(remaining) <= len(infodir_cleanup):
2537 if not set(remaining).difference(infodir_cleanup):
2538 cleanup_info_dir = remaining
2540 for child in cleanup_info_dir:
2541 child = os.path.join(obj, child)
2543 lstatobj = os.lstat(child)
2544 if stat.S_ISREG(lstatobj.st_mode):
2545 unlink(child, lstatobj)
2546 show_unmerge("<<<", "", "obj", child)
2547 except EnvironmentError as e:
2548 if e.errno not in ignored_unlink_errnos:
2551 show_unmerge("!!!", "", "obj", child)
2554 lstatobj = os.lstat(obj)
2555 if lstatobj.st_flags != 0:
2556 bsd_chflags.lchflags(obj, 0)
2557 parent_name = os.path.dirname(obj)
2558 # Use normal stat/chflags for the parent since we want to
2559 # follow any symlinks to the real parent directory.
2560 pflags = os.stat(parent_name).st_flags
2562 bsd_chflags.chflags(parent_name, 0)
2566 if bsd_chflags and pflags != 0:
2567 # Restore the parent flags we saved before unlinking
2568 bsd_chflags.chflags(parent_name, pflags)
2569 show_unmerge("<<<", "", "dir", obj)
2570 except EnvironmentError as e:
2571 if e.errno not in ignored_rmdir_errnos:
2573 if e.errno != errno.ENOENT:
2574 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
2577 # When a directory is successfully removed, there's
2578 # no need to protect symlinks that point to it.
2579 unmerge_syms = protected_symlinks.pop(inode_key, None)
2580 if unmerge_syms is not None:
2581 for relative_path in unmerge_syms:
2582 obj = os.path.join(real_root,
2583 relative_path.lstrip(os.sep))
2585 unlink(obj, os.lstat(obj))
2586 show_unmerge("<<<", "", "sym", obj)
2587 except (OSError, IOError) as e:
2588 if e.errno not in ignored_unlink_errnos:
2591 show_unmerge("!!!", "", "sym", obj)
2593 def isowner(self, filename, destroot=None):
2595 Check if a file belongs to this package. This may
2596 result in a stat call for the parent directory of
2597 every installed file, since the inode numbers are
2598 used to work around the problem of ambiguous paths
2599 caused by symlinked directories. The results of
2600 stat calls are cached to optimize multiple calls
2609 1. True if this package owns the file.
2610 2. False if this package does not own the file.
2613 if destroot is not None and destroot != self._eroot:
2614 warnings.warn("The second parameter of the " + \
2615 "portage.dbapi.vartree.dblink.isowner()" + \
2616 " is now unused. Instead " + \
2617 "self.settings['EROOT'] will be used.",
2618 DeprecationWarning, stacklevel=2)
2620 return bool(self._match_contents(filename))
2622 def _match_contents(self, filename, destroot=None):
2624 The matching contents entry is returned, which is useful
2625 since the path may differ from the one given by the caller,
2629 @return: the contents entry corresponding to the given path, or False
2630 if the file is not owned by this package.
2633 filename = _unicode_decode(filename,
2634 encoding=_encodings['content'], errors='strict')
2636 if destroot is not None and destroot != self._eroot:
2637 warnings.warn("The second parameter of the " + \
2638 "portage.dbapi.vartree.dblink._match_contents()" + \
2639 " is now unused. Instead " + \
2640 "self.settings['ROOT'] will be used.",
2641 DeprecationWarning, stacklevel=2)
2643 # don't use EROOT here, image already contains EPREFIX
2644 destroot = self.settings['ROOT']
2646 # The given filename argument might have a different encoding than the
2647 # the filenames contained in the contents, so use separate wrapped os
2648 # modules for each. The basename is more likely to contain non-ascii
2649 # characters than the directory path, so use os_filename_arg for all
2650 # operations involving the basename of the filename arg.
2651 os_filename_arg = _os_merge
2655 _unicode_encode(filename,
2656 encoding=_encodings['merge'], errors='strict')
2657 except UnicodeEncodeError:
2658 # The package appears to have been merged with a
2659 # different value of sys.getfilesystemencoding(),
2660 # so fall back to utf_8 if appropriate.
2662 _unicode_encode(filename,
2663 encoding=_encodings['fs'], errors='strict')
2664 except UnicodeEncodeError:
2667 os_filename_arg = portage.os
2669 destfile = normalize_path(
2670 os_filename_arg.path.join(destroot,
2671 filename.lstrip(os_filename_arg.path.sep)))
2673 pkgfiles = self.getcontents()
2674 if pkgfiles and destfile in pkgfiles:
2677 basename = os_filename_arg.path.basename(destfile)
2678 if self._contents_basenames is None:
2683 encoding=_encodings['merge'],
2685 except UnicodeEncodeError:
2686 # The package appears to have been merged with a
2687 # different value of sys.getfilesystemencoding(),
2688 # so fall back to utf_8 if appropriate.
2692 encoding=_encodings['fs'],
2694 except UnicodeEncodeError:
2699 self._contents_basenames = set(
2700 os.path.basename(x) for x in pkgfiles)
2701 if basename not in self._contents_basenames:
2702 # This is a shortcut that, in most cases, allows us to
2703 # eliminate this package as an owner without the need
2704 # to examine inode numbers of parent directories.
2707 # Use stat rather than lstat since we want to follow
2708 # any symlinks to the real parent directory.
2709 parent_path = os_filename_arg.path.dirname(destfile)
2711 parent_stat = os_filename_arg.stat(parent_path)
2712 except EnvironmentError as e:
2713 if e.errno != errno.ENOENT:
2717 if self._contents_inodes is None:
2723 encoding=_encodings['merge'],
2725 except UnicodeEncodeError:
2726 # The package appears to have been merged with a
2727 # different value of sys.getfilesystemencoding(),
2728 # so fall back to utf_8 if appropriate.
2732 encoding=_encodings['fs'],
2734 except UnicodeEncodeError:
2739 self._contents_inodes = {}
2740 parent_paths = set()
2742 p_path = os.path.dirname(x)
2743 if p_path in parent_paths:
2745 parent_paths.add(p_path)
2751 inode_key = (s.st_dev, s.st_ino)
2752 # Use lists of paths in case multiple
2753 # paths reference the same inode.
2754 p_path_list = self._contents_inodes.get(inode_key)
2755 if p_path_list is None:
2757 self._contents_inodes[inode_key] = p_path_list
2758 if p_path not in p_path_list:
2759 p_path_list.append(p_path)
2761 p_path_list = self._contents_inodes.get(
2762 (parent_stat.st_dev, parent_stat.st_ino))
2764 for p_path in p_path_list:
2765 x = os_filename_arg.path.join(p_path, basename)
2771 def _linkmap_rebuild(self, **kwargs):
2773 Rebuild the self._linkmap if it's not broken due to missing
2774 scanelf binary. Also, return early if preserve-libs is disabled
2775 and the preserve-libs registry is empty.
2777 if self._linkmap_broken or \
2778 self.vartree.dbapi._linkmap is None or \
2779 self.vartree.dbapi._plib_registry is None or \
2780 ("preserve-libs" not in self.settings.features and \
2781 not self.vartree.dbapi._plib_registry.hasEntries()):
2784 self.vartree.dbapi._linkmap.rebuild(**kwargs)
2785 except CommandNotFound as e:
2786 self._linkmap_broken = True
2787 self._display_merge(_("!!! Disabling preserve-libs " \
2788 "due to error: Command Not Found: %s\n") % (e,),
2789 level=logging.ERROR, noiselevel=-1)
2791 def _find_libs_to_preserve(self, unmerge=False):
2793 Get set of relative paths for libraries to be preserved. When
2794 unmerge is False, file paths to preserve are selected from
2795 self._installed_instance. Otherwise, paths are selected from
2798 if self._linkmap_broken or \
2799 self.vartree.dbapi._linkmap is None or \
2800 self.vartree.dbapi._plib_registry is None or \
2801 (not unmerge and self._installed_instance is None) or \
2802 "preserve-libs" not in self.settings.features:
2806 linkmap = self.vartree.dbapi._linkmap
2808 installed_instance = self
2810 installed_instance = self._installed_instance
2811 old_contents = installed_instance.getcontents()
2812 root = self.settings['ROOT']
2813 root_len = len(root) - 1
2814 lib_graph = digraph()
2817 def path_to_node(path):
2818 node = path_node_map.get(path)
2820 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2821 alt_path_node = lib_graph.get(node)
2822 if alt_path_node is not None:
2823 node = alt_path_node
2824 node.alt_paths.add(path)
2825 path_node_map[path] = node
2829 provider_nodes = set()
2830 # Create provider nodes and add them to the graph.
2831 for f_abs in old_contents:
2835 _unicode_encode(f_abs,
2836 encoding=_encodings['merge'], errors='strict')
2837 except UnicodeEncodeError:
2838 # The package appears to have been merged with a
2839 # different value of sys.getfilesystemencoding(),
2840 # so fall back to utf_8 if appropriate.
2842 _unicode_encode(f_abs,
2843 encoding=_encodings['fs'], errors='strict')
2844 except UnicodeEncodeError:
2849 f = f_abs[root_len:]
2850 if not unmerge and self.isowner(f):
2851 # We have an indentically named replacement file,
2852 # so we don't try to preserve the old copy.
2855 consumers = linkmap.findConsumers(f,
2856 exclude_providers=(installed_instance.isowner,))
2861 provider_node = path_to_node(f)
2862 lib_graph.add(provider_node, None)
2863 provider_nodes.add(provider_node)
2864 consumer_map[provider_node] = consumers
2866 # Create consumer nodes and add them to the graph.
2867 # Note that consumers can also be providers.
2868 for provider_node, consumers in consumer_map.items():
2870 consumer_node = path_to_node(c)
2871 if installed_instance.isowner(c) and \
2872 consumer_node not in provider_nodes:
2873 # This is not a provider, so it will be uninstalled.
2875 lib_graph.add(provider_node, consumer_node)
2877 # Locate nodes which should be preserved. They consist of all
2878 # providers that are reachable from consumers that are not
2879 # providers themselves.
2880 preserve_nodes = set()
2881 for consumer_node in lib_graph.root_nodes():
2882 if consumer_node in provider_nodes:
2884 # Preserve all providers that are reachable from this consumer.
2885 node_stack = lib_graph.child_nodes(consumer_node)
2887 provider_node = node_stack.pop()
2888 if provider_node in preserve_nodes:
2890 preserve_nodes.add(provider_node)
2891 node_stack.extend(lib_graph.child_nodes(provider_node))
2893 preserve_paths = set()
2894 for preserve_node in preserve_nodes:
2895 # Preserve the library itself, and also preserve the
2896 # soname symlink which is the only symlink that is
2897 # strictly required.
2899 soname_symlinks = set()
2900 soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
2901 for f in preserve_node.alt_paths:
2902 f_abs = os.path.join(root, f.lstrip(os.sep))
2904 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2906 elif os.path.basename(f) == soname:
2907 soname_symlinks.add(f)
2912 preserve_paths.update(hardlinks)
2913 preserve_paths.update(soname_symlinks)
2915 return preserve_paths
2917 def _add_preserve_libs_to_contents(self, preserve_paths):
2919 Preserve libs returned from _find_libs_to_preserve().
2922 if not preserve_paths:
2926 showMessage = self._display_merge
2927 root = self.settings['ROOT']
2929 # Copy contents entries from the old package to the new one.
2930 new_contents = self.getcontents().copy()
2931 old_contents = self._installed_instance.getcontents()
2932 for f in sorted(preserve_paths):
2933 f = _unicode_decode(f,
2934 encoding=_encodings['content'], errors='strict')
2935 f_abs = os.path.join(root, f.lstrip(os.sep))
2936 contents_entry = old_contents.get(f_abs)
2937 if contents_entry is None:
2938 # This will probably never happen, but it might if one of the
2939 # paths returned from findConsumers() refers to one of the libs
2940 # that should be preserved yet the path is not listed in the
2941 # contents. Such a path might belong to some other package, so
2942 # it shouldn't be preserved here.
2943 showMessage(_("!!! File '%s' will not be preserved "
2944 "due to missing contents entry\n") % (f_abs,),
2945 level=logging.ERROR, noiselevel=-1)
2946 preserve_paths.remove(f)
2948 new_contents[f_abs] = contents_entry
2949 obj_type = contents_entry[0]
2950 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2952 # Add parent directories to contents if necessary.
2953 parent_dir = os.path.dirname(f_abs)
2954 while len(parent_dir) > len(root):
2955 new_contents[parent_dir] = ["dir"]
2957 parent_dir = os.path.dirname(parent_dir)
2958 if prev == parent_dir:
2960 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
2961 write_contents(new_contents, root, outfile)
2963 self._clear_contents_cache()
2965 def _find_unused_preserved_libs(self, unmerge_no_replacement):
2967 Find preserved libraries that don't have any consumers left.
2970 if self._linkmap_broken or \
2971 self.vartree.dbapi._linkmap is None or \
2972 self.vartree.dbapi._plib_registry is None or \
2973 not self.vartree.dbapi._plib_registry.hasEntries():
2976 # Since preserved libraries can be consumers of other preserved
2977 # libraries, use a graph to track consumer relationships.
2978 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2979 linkmap = self.vartree.dbapi._linkmap
2980 lib_graph = digraph()
2981 preserved_nodes = set()
2982 preserved_paths = set()
2985 root = self.settings['ROOT']
2987 def path_to_node(path):
2988 node = path_node_map.get(path)
2990 node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
2991 alt_path_node = lib_graph.get(node)
2992 if alt_path_node is not None:
2993 node = alt_path_node
2994 node.alt_paths.add(path)
2995 path_node_map[path] = node
2998 for cpv, plibs in plib_dict.items():
3000 path_cpv_map[f] = cpv
3001 preserved_node = path_to_node(f)
3002 if not preserved_node.file_exists():
3004 lib_graph.add(preserved_node, None)
3005 preserved_paths.add(f)
3006 preserved_nodes.add(preserved_node)
3007 for c in self.vartree.dbapi._linkmap.findConsumers(f):
3008 consumer_node = path_to_node(c)
3009 if not consumer_node.file_exists():
3011 # Note that consumers may also be providers.
3012 lib_graph.add(preserved_node, consumer_node)
3014 # Eliminate consumers having providers with the same soname as an
3015 # installed library that is not preserved. This eliminates
3016 # libraries that are erroneously preserved due to a move from one
3017 # directory to another.
3018 # Also eliminate consumers that are going to be unmerged if
3019 # unmerge_no_replacement is True.
3021 for preserved_node in preserved_nodes:
3022 soname = linkmap.getSoname(preserved_node)
3023 for consumer_node in lib_graph.parent_nodes(preserved_node):
3024 if consumer_node in preserved_nodes:
3026 if unmerge_no_replacement:
3027 will_be_unmerged = True
3028 for path in consumer_node.alt_paths:
3029 if not self.isowner(path):
3030 will_be_unmerged = False
3032 if will_be_unmerged:
3033 # This consumer is not preserved and it is
3034 # being unmerged, so drop this edge.
3035 lib_graph.remove_edge(preserved_node, consumer_node)
3038 providers = provider_cache.get(consumer_node)
3039 if providers is None:
3040 providers = linkmap.findProviders(consumer_node)
3041 provider_cache[consumer_node] = providers
3042 providers = providers.get(soname)
3043 if providers is None:
3045 for provider in providers:
3046 if provider in preserved_paths:
3048 provider_node = path_to_node(provider)
3049 if not provider_node.file_exists():
3051 if provider_node in preserved_nodes:
3053 # An alternative provider seems to be
3054 # installed, so drop this edge.
3055 lib_graph.remove_edge(preserved_node, consumer_node)
3060 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
3063 lib_graph.difference_update(root_nodes)
3065 for node in root_nodes:
3066 unlink_list.update(node.alt_paths)
3067 unlink_list = sorted(unlink_list)
3068 for obj in unlink_list:
3069 cpv = path_cpv_map.get(obj)
3071 # This means that a symlink is in the preserved libs
3072 # registry, but the actual lib it points to is not.
3073 self._display_merge(_("!!! symlink to lib is preserved, "
3074 "but not the lib itself:\n!!! '%s'\n") % (obj,),
3075 level=logging.ERROR, noiselevel=-1)
3077 removed = cpv_lib_map.get(cpv)
3080 cpv_lib_map[cpv] = removed
3085 def _remove_preserved_libs(self, cpv_lib_map):
3087 Remove files returned from _find_unused_preserved_libs().
3092 files_to_remove = set()
3093 for files in cpv_lib_map.values():
3094 files_to_remove.update(files)
3095 files_to_remove = sorted(files_to_remove)
3096 showMessage = self._display_merge
3097 root = self.settings['ROOT']
3100 for obj in files_to_remove:
3101 obj = os.path.join(root, obj.lstrip(os.sep))
3102 parent_dirs.add(os.path.dirname(obj))
3103 if os.path.islink(obj):
3109 except OSError as e:
3110 if e.errno != errno.ENOENT:
3114 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
3117 # Remove empty parent directories if possible.
3119 x = parent_dirs.pop()
3126 x = os.path.dirname(x)
3130 self.vartree.dbapi._plib_registry.pruneNonExisting()
3132 def _collision_protect(self, srcroot, destroot, mypkglist,
3133 file_list, symlink_list):
3137 collision_ignore = []
3138 for x in portage.util.shlex_split(
3139 self.settings.get("COLLISION_IGNORE", "")):
3140 if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
3141 x = normalize_path(x)
3143 collision_ignore.append(x)
3145 # For collisions with preserved libraries, the current package
3146 # will assume ownership and the libraries will be unregistered.
3147 if self.vartree.dbapi._plib_registry is None:
3148 # preserve-libs is entirely disabled
3153 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
3156 for cpv, paths in plib_dict.items():
3157 plib_paths.update(paths)
3159 plib_cpv_map[f] = cpv
3160 plib_inodes = self._lstat_inode_map(plib_paths)
3162 plib_collisions = {}
3164 showMessage = self._display_merge
3167 symlink_collisions = []
3168 destroot = self.settings['ROOT']
3169 showMessage(_(" %s checking %d files for package collisions\n") % \
3170 (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
3171 for i, (f, f_type) in enumerate(chain(
3172 ((f, "reg") for f in file_list),
3173 ((f, "sym") for f in symlink_list))):
3174 if i % 1000 == 0 and i != 0:
3175 showMessage(_("%d files checked ...\n") % i)
3177 dest_path = normalize_path(
3178 os.path.join(destroot, f.lstrip(os.path.sep)))
3180 dest_lstat = os.lstat(dest_path)
3181 except EnvironmentError as e:
3182 if e.errno == errno.ENOENT:
3185 elif e.errno == errno.ENOTDIR:
3187 # A non-directory is in a location where this package
3188 # expects to have a directory.
3190 parent_path = dest_path
3191 while len(parent_path) > len(destroot):
3192 parent_path = os.path.dirname(parent_path)
3194 dest_lstat = os.lstat(parent_path)
3196 except EnvironmentError as e:
3197 if e.errno != errno.ENOTDIR:
3201 raise AssertionError(
3202 "unable to find non-directory " + \
3203 "parent for '%s'" % dest_path)
3204 dest_path = parent_path
3205 f = os.path.sep + dest_path[len(destroot):]
3213 if stat.S_ISDIR(dest_lstat.st_mode):
3215 # This case is explicitly banned
3216 # by PMS (see bug #326685).
3217 symlink_collisions.append(f)
3218 collisions.append(f)
3221 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
3224 cpv = plib_cpv_map[path]
3225 paths = plib_collisions.get(cpv)
3228 plib_collisions[cpv] = paths
3230 # The current package will assume ownership and the
3231 # libraries will be unregistered, so exclude this
3232 # path from the normal collisions.
3236 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
3237 for ver in mypkglist:
3241 if not isowned and self.isprotected(full_path):
3244 f_match = full_path[len(self._eroot)-1:]
3246 for pattern in collision_ignore:
3247 if fnmatch.fnmatch(f_match, pattern):
3251 collisions.append(f)
3252 return collisions, symlink_collisions, plib_collisions
3254 def _lstat_inode_map(self, path_iter):
3256 Use lstat to create a map of the form:
3257 {(st_dev, st_ino) : set([path1, path2, ...])}
3258 Multiple paths may reference the same inode due to hardlinks.
3259 All lstat() calls are relative to self.myroot.
3264 root = self.settings['ROOT']
3267 path = os.path.join(root, f.lstrip(os.sep))
3270 except OSError as e:
3271 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3275 key = (st.st_dev, st.st_ino)
3276 paths = inode_map.get(key)
3279 inode_map[key] = paths
3283 def _security_check(self, installed_instances):
3284 if not installed_instances:
3289 showMessage = self._display_merge
3292 for dblnk in installed_instances:
3293 file_paths.update(dblnk.getcontents())
3296 for i, path in enumerate(file_paths):
3300 _unicode_encode(path,
3301 encoding=_encodings['merge'], errors='strict')
3302 except UnicodeEncodeError:
3303 # The package appears to have been merged with a
3304 # different value of sys.getfilesystemencoding(),
3305 # so fall back to utf_8 if appropriate.
3307 _unicode_encode(path,
3308 encoding=_encodings['fs'], errors='strict')
3309 except UnicodeEncodeError:
3316 except OSError as e:
3317 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
3321 if not stat.S_ISREG(s.st_mode):
3323 path = os.path.realpath(path)
3324 if path in real_paths:
3326 real_paths.add(path)
3327 if s.st_nlink > 1 and \
3328 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
3329 k = (s.st_dev, s.st_ino)
3330 inode_map.setdefault(k, []).append((path, s))
3331 suspicious_hardlinks = []
3332 for path_list in inode_map.values():
3333 path, s = path_list[0]
3334 if len(path_list) == s.st_nlink:
3335 # All hardlinks seem to be owned by this package.
3337 suspicious_hardlinks.append(path_list)
3338 if not suspicious_hardlinks:
3342 msg.append(_("suid/sgid file(s) "
3343 "with suspicious hardlink(s):"))
3345 for path_list in suspicious_hardlinks:
3346 for path, s in path_list:
3347 msg.append("\t%s" % path)
3349 msg.append(_("See the Gentoo Security Handbook "
3350 "guide for advice on how to proceed."))
3352 self._eerror("preinst", msg)
3356 def _eqawarn(self, phase, lines):
3357 self._elog("eqawarn", phase, lines)
3359 def _eerror(self, phase, lines):
3360 self._elog("eerror", phase, lines)
3362 def _elog(self, funcname, phase, lines):
3363 func = getattr(portage.elog.messages, funcname)
3364 if self._scheduler is None:
3366 func(l, phase=phase, key=self.mycpv)
3368 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
3370 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
3371 log_path = self.settings.get("PORTAGE_LOG_FILE")
3374 func(line, phase=phase, key=self.mycpv, out=out)
3375 msg = out.getvalue()
3376 self._scheduler.output(msg,
3377 background=background, log_path=log_path)
3379 def _elog_process(self, phasefilter=None):
3381 if self._pipe is None:
3382 elog_process(cpv, self.settings, phasefilter=phasefilter)
3384 logdir = os.path.join(self.settings["T"], "logging")
3385 ebuild_logentries = collect_ebuild_messages(logdir)
3386 py_logentries = collect_messages(key=cpv).get(cpv, {})
3387 logentries = _merge_logentries(py_logentries, ebuild_logentries)
3396 for phase, messages in logentries.items():
3397 for key, lines in messages:
3398 funcname = funcnames[key]
3399 if isinstance(lines, basestring):
3402 for line in line.split('\n'):
3403 fields = (funcname, phase, cpv, line)
3404 str_buffer.append(' '.join(fields))
3405 str_buffer.append('\n')
3407 os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
3409 def _emerge_log(self, msg):
3410 emergelog(False, msg)
3412 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
3413 mydbapi=None, prev_mtimes=None, counter=None):
3416 This function does the following:
3418 calls self._preserve_libs if FEATURES=preserve-libs
3419 calls self._collision_protect if FEATURES=collision-protect
3420 calls doebuild(mydo=pkg_preinst)
3421 Merges the package to the livefs
3422 unmerges old version (if required)
3423 calls doebuild(mydo=pkg_postinst)
3426 @param srcroot: Typically this is ${D}
3427 @type srcroot: String (Path)
3428 @param destroot: ignored, self.settings['ROOT'] is used instead
3429 @type destroot: String (Path)
3430 @param inforoot: root of the vardb entry ?
3431 @type inforoot: String (Path)
3432 @param myebuild: path to the ebuild that we are processing
3433 @type myebuild: String (Path)
3434 @param mydbapi: dbapi which is handed to doebuild.
3435 @type mydbapi: portdbapi instance
3436 @param prev_mtimes: { Filename:mtime } mapping for env_update
3437 @type prev_mtimes: Dictionary
3443 secondhand is a list of symlinks that have been skipped due to their target
3444 not existing; we will merge these symlinks at a later time.
3449 srcroot = _unicode_decode(srcroot,
3450 encoding=_encodings['content'], errors='strict')
3451 destroot = self.settings['ROOT']
3452 inforoot = _unicode_decode(inforoot,
3453 encoding=_encodings['content'], errors='strict')
3454 myebuild = _unicode_decode(myebuild,
3455 encoding=_encodings['content'], errors='strict')
3457 showMessage = self._display_merge
3458 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
3460 if not os.path.isdir(srcroot):
3461 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
3462 level=logging.ERROR, noiselevel=-1)
3466 for var_name in ('CHOST', 'SLOT'):
3467 if var_name == 'CHOST' and self.cat == 'virtual':
3469 os.unlink(os.path.join(inforoot, var_name))
3476 f = io.open(_unicode_encode(
3477 os.path.join(inforoot, var_name),
3478 encoding=_encodings['fs'], errors='strict'),
3479 mode='r', encoding=_encodings['repo.content'],
3481 val = f.readline().strip()
3482 except EnvironmentError as e:
3483 if e.errno != errno.ENOENT:
3491 if var_name == 'SLOT':
3494 if not slot.strip():
3495 slot = self.settings.get(var_name, '')
3496 if not slot.strip():
3497 showMessage(_("!!! SLOT is undefined\n"),
3498 level=logging.ERROR, noiselevel=-1)
3500 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
3502 if val != self.settings.get(var_name, ''):
3503 self._eqawarn('preinst',
3504 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
3505 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
3508 self._eerror("preinst", lines)
3510 if not os.path.exists(self.dbcatdir):
3511 ensure_dirs(self.dbcatdir)
3513 # NOTE: We use SLOT obtained from the inforoot
3514 # directory, in order to support USE=multislot.
3515 # Use _pkg_str discard the sub-slot part if necessary.
3516 slot = _pkg_str(self.mycpv, slot=slot).slot
3517 cp = self.mysplit[0]
3518 slot_atom = "%s:%s" % (cp, slot)
3520 # filter any old-style virtual matches
3521 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
3522 if cpv_getkey(cpv) == cp]
3524 if self.mycpv not in slot_matches and \
3525 self.vartree.dbapi.cpv_exists(self.mycpv):
3526 # handle multislot or unapplied slotmove
3527 slot_matches.append(self.mycpv)
3530 from portage import config
3531 for cur_cpv in slot_matches:
3532 # Clone the config in case one of these has to be unmerged since
3533 # we need it to have private ${T} etc... for things like elog.
3534 settings_clone = config(clone=self.settings)
3535 settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
3536 settings_clone.reset()
3537 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
3538 settings=settings_clone,
3539 vartree=self.vartree, treetype="vartree",
3540 scheduler=self._scheduler, pipe=self._pipe))
3542 retval = self._security_check(others_in_slot)
3547 # Used by self.isprotected().
3550 for dblnk in others_in_slot:
3551 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
3552 if cur_counter > max_counter:
3553 max_counter = cur_counter
3555 self._installed_instance = max_dblnk
3557 if self.settings.get("INSTALL_MASK") or \
3558 "nodoc" in self.settings.features or \
3559 "noinfo" in self.settings.features or \
3560 "noman" in self.settings.features:
3561 # Apply INSTALL_MASK before collision-protect, since it may
3562 # be useful to avoid collisions in some scenarios.
3563 phase = MiscFunctionsProcess(background=False,
3564 commands=["preinst_mask"], phase="preinst",
3565 scheduler=self._scheduler, settings=self.settings)
3569 # We check for unicode encoding issues after src_install. However,
3570 # the check must be repeated here for binary packages (it's
3571 # inexpensive since we call os.walk() here anyway).
3573 line_ending_re = re.compile('[\n\r]')
3574 srcroot_len = len(srcroot)
3575 ed_len = len(self.settings["ED"])
3579 unicode_error = False
3580 eagain_error = False
3584 paths_with_newlines = []
3587 walk_iter = os.walk(srcroot, onerror=onerror)
3590 parent, dirs, files = next(walk_iter)
3591 except StopIteration:
3593 except OSError as e:
3594 if e.errno != errno.EAGAIN:
3596 # Observed with PyPy 1.8.
3601 parent = _unicode_decode(parent,
3602 encoding=_encodings['merge'], errors='strict')
3603 except UnicodeDecodeError:
3604 new_parent = _unicode_decode(parent,
3605 encoding=_encodings['merge'], errors='replace')
3606 new_parent = _unicode_encode(new_parent,
3607 encoding='ascii', errors='backslashreplace')
3608 new_parent = _unicode_decode(new_parent,
3609 encoding=_encodings['merge'], errors='replace')
3610 os.rename(parent, new_parent)
3611 unicode_error = True
3612 unicode_errors.append(new_parent[ed_len:])
3617 fname = _unicode_decode(fname,
3618 encoding=_encodings['merge'], errors='strict')
3619 except UnicodeDecodeError:
3620 fpath = portage._os.path.join(
3621 parent.encode(_encodings['merge']), fname)
3622 new_fname = _unicode_decode(fname,
3623 encoding=_encodings['merge'], errors='replace')
3624 new_fname = _unicode_encode(new_fname,
3625 encoding='ascii', errors='backslashreplace')
3626 new_fname = _unicode_decode(new_fname,
3627 encoding=_encodings['merge'], errors='replace')
3628 new_fpath = os.path.join(parent, new_fname)
3629 os.rename(fpath, new_fpath)
3630 unicode_error = True
3631 unicode_errors.append(new_fpath[ed_len:])
3635 fpath = os.path.join(parent, fname)
3637 relative_path = fpath[srcroot_len:]
3639 if line_ending_re.search(relative_path) is not None:
3640 paths_with_newlines.append(relative_path)
3642 file_mode = os.lstat(fpath).st_mode
3643 if stat.S_ISREG(file_mode):
3644 myfilelist.append(relative_path)
3645 elif stat.S_ISLNK(file_mode):
3646 # Note: os.walk puts symlinks to directories in the "dirs"
3647 # list and it does not traverse them since that could lead
3648 # to an infinite recursion loop.
3649 mylinklist.append(relative_path)
3654 if not (unicode_error or eagain_error):
3658 self._elog("eqawarn", "preinst",
3659 _merge_unicode_error(unicode_errors))
3661 if paths_with_newlines:
3663 msg.append(_("This package installs one or more files containing line ending characters:"))
3665 paths_with_newlines.sort()
3666 for f in paths_with_newlines:
3667 msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
3669 msg.append(_("package %s NOT merged") % self.mycpv)
3674 # If there are no files to merge, and an installed package in the same
3675 # slot has files, it probably means that something went wrong.
3676 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
3677 not myfilelist and not mylinklist and others_in_slot:
3678 installed_files = None
3679 for other_dblink in others_in_slot:
3680 installed_files = other_dblink.getcontents()
3681 if not installed_files:
3683 from textwrap import wrap
3687 "new_cpv":self.mycpv,
3688 "old_cpv":other_dblink.mycpv
3690 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3691 "any files, but the currently installed '%(old_cpv)s'"
3692 " package has the following files: ") % d, wrap_width))
3694 msg.extend(sorted(installed_files))
3696 msg.append(_("package %s NOT merged") % self.mycpv)
3699 _("Manually run `emerge --unmerge =%s` if you "
3700 "really want to remove the above files. Set "
3701 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3702 "/etc/make.conf if you do not want to "
3703 "abort in cases like this.") % other_dblink.mycpv,
3709 # Make sure the ebuild environment is initialized and that ${T}/elog
3710 # exists for logging of collision-protect eerror messages.
3711 if myebuild is None:
3712 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3713 doebuild_environment(myebuild, "preinst",
3714 settings=self.settings, db=mydbapi)
3715 self.settings["REPLACING_VERSIONS"] = " ".join(
3716 [portage.versions.cpv_getversion(other.mycpv)
3717 for other in others_in_slot])
3718 prepare_build_dirs(settings=self.settings, cleanup=cleanup)
3720 # check for package collisions
3721 blockers = self._blockers
3722 if blockers is None:
3724 collisions, symlink_collisions, plib_collisions = \
3725 self._collision_protect(srcroot, destroot,
3726 others_in_slot + blockers, myfilelist, mylinklist)
3728 if symlink_collisions:
3729 # Symlink collisions need to be distinguished from other types
3730 # of collisions, in order to avoid confusion (see bug #409359).
3731 msg = _("Package '%s' has one or more collisions "
3732 "between symlinks and directories, which is explicitly "
3733 "forbidden by PMS section 13.4 (see bug #326685):") % \
3734 (self.settings.mycpv,)
3735 msg = textwrap.wrap(msg, 70)
3737 for f in symlink_collisions:
3738 msg.append("\t%s" % os.path.join(destroot,
3739 f.lstrip(os.path.sep)))
3741 self._elog("eerror", "preinst", msg)
3744 collision_protect = "collision-protect" in self.settings.features
3745 protect_owned = "protect-owned" in self.settings.features
3746 msg = _("This package will overwrite one or more files that"
3747 " may belong to other packages (see list below).")
3748 if not (collision_protect or protect_owned):
3749 msg += _(" Add either \"collision-protect\" or"
3750 " \"protect-owned\" to FEATURES in"
3751 " make.conf if you would like the merge to abort"
3752 " in cases like this. See the make.conf man page for"
3753 " more information about these features.")
3754 if self.settings.get("PORTAGE_QUIET") != "1":
3755 msg += _(" You can use a command such as"
3756 " `portageq owners / <filename>` to identify the"
3757 " installed package that owns a file. If portageq"
3758 " reports that only one package owns a file then do NOT"
3759 " file a bug report. A bug report is only useful if it"
3760 " identifies at least two or more packages that are known"
3761 " to install the same file(s)."
3762 " If a collision occurs and you"
3763 " can not explain where the file came from then you"
3764 " should simply ignore the collision since there is not"
3765 " enough information to determine if a real problem"
3766 " exists. Please do NOT file a bug report at"
3767 " http://bugs.gentoo.org unless you report exactly which"
3768 " two packages install the same file(s). Once again,"
3769 " please do NOT file a bug report unless you have"
3770 " completely understood the above message.")
3772 self.settings["EBUILD_PHASE"] = "preinst"
3773 from textwrap import wrap
3775 if collision_protect:
3777 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3779 msg.append(_("Detected file collision(s):"))
3782 for f in collisions:
3783 msg.append("\t%s" % \
3784 os.path.join(destroot, f.lstrip(os.path.sep)))
3789 if collision_protect or protect_owned or symlink_collisions:
3792 msg.append(_("Searching all installed"
3793 " packages for file collisions..."))
3795 msg.append(_("Press Ctrl-C to Stop"))
3799 if len(collisions) > 20:
3800 # get_owners is slow for large numbers of files, so
3801 # don't look them all up.
3802 collisions = collisions[:20]
3805 owners = self.vartree.dbapi._owners.get_owners(collisions)
3806 self.vartree.dbapi.flush_cache()
3810 for pkg, owned_files in owners.items():
3813 msg.append("%s" % cpv)
3814 for f in sorted(owned_files):
3815 msg.append("\t%s" % os.path.join(destroot,
3816 f.lstrip(os.path.sep)))
3821 eerror([_("None of the installed"
3822 " packages claim the file(s)."), ""])
3824 symlink_abort_msg =_("Package '%s' NOT merged since it has "
3825 "one or more collisions between symlinks and directories, "
3826 "which is explicitly forbidden by PMS section 13.4 "
3827 "(see bug #326685).")
3829 # The explanation about the collision and how to solve
3830 # it may not be visible via a scrollback buffer, especially
3831 # if the number of file collisions is large. Therefore,
3832 # show a summary at the end.
3834 if symlink_collisions:
3836 msg = symlink_abort_msg % (self.settings.mycpv,)
3837 elif collision_protect:
3839 msg = _("Package '%s' NOT merged due to file collisions.") % \
3841 elif protect_owned and owners:
3843 msg = _("Package '%s' NOT merged due to file collisions.") % \
3846 msg = _("Package '%s' merged despite file collisions.") % \
3848 msg += _(" If necessary, refer to your elog "
3849 "messages for the whole content of the above message.")
3850 eerror(wrap(msg, 70))
3855 # The merge process may move files out of the image directory,
3856 # which causes invalidation of the .installed flag.
3858 os.unlink(os.path.join(
3859 os.path.dirname(normalize_path(srcroot)), ".installed"))
3860 except OSError as e:
3861 if e.errno != errno.ENOENT:
3865 self.dbdir = self.dbtmpdir
3867 ensure_dirs(self.dbtmpdir)
3870 if self._installed_instance is not None and \
3871 vercmp(self.mycpv.version,
3872 self._installed_instance.mycpv.version) < 0:
3875 if self._installed_instance is not None:
3876 rval = self._pre_merge_backup(self._installed_instance, downgrade)
3877 if rval != os.EX_OK:
3878 showMessage(_("!!! FAILED preinst: ") +
3879 "quickpkg: %s\n" % rval,
3880 level=logging.ERROR, noiselevel=-1)
3883 # run preinst script
3884 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
3885 {"cpv":self.mycpv, "destroot":destroot})
3886 phase = EbuildPhase(background=False, phase="preinst",
3887 scheduler=self._scheduler, settings=self.settings)
3891 # XXX: Decide how to handle failures here.
3893 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3894 level=logging.ERROR, noiselevel=-1)
3897 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3898 for x in os.listdir(inforoot):
3899 self.copyfile(inforoot+"/"+x)
3901 # write local package counter for recording
3903 counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
3904 f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3905 encoding=_encodings['fs'], errors='strict'),
3906 mode='w', encoding=_encodings['repo.content'],
3907 errors='backslashreplace')
3908 f.write(_unicode_decode(str(counter)))
3911 self.updateprotect()
3913 #if we have a file containing previously-merged config file md5sums, grab it.
3914 self.vartree.dbapi._fs_lock()
3916 # Always behave like --noconfmem is enabled for downgrades
3917 # so that people who don't know about this option are less
3918 # likely to get confused when doing upgrade/downgrade cycles.
3919 cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
3920 if "NOCONFMEM" in self.settings or downgrade:
3921 cfgfiledict["IGNORE"]=1
3923 cfgfiledict["IGNORE"]=0
3925 rval = self._merge_contents(srcroot, destroot, cfgfiledict)
3926 if rval != os.EX_OK:
3929 self.vartree.dbapi._fs_unlock()
3931 # These caches are populated during collision-protect and the data
3932 # they contain is now invalid. It's very important to invalidate
3933 # the contents_inodes cache so that FEATURES=unmerge-orphans
3934 # doesn't unmerge anything that belongs to this package that has
3936 for dblnk in others_in_slot:
3937 dblnk._clear_contents_cache()
3938 self._clear_contents_cache()
3940 linkmap = self.vartree.dbapi._linkmap
3941 plib_registry = self.vartree.dbapi._plib_registry
3942 # We initialize preserve_paths to an empty set rather
3943 # than None here because it plays an important role
3944 # in prune_plib_registry logic by serving to indicate
3945 # that we have a replacement for a package that's
3948 preserve_paths = set()
3950 if not (self._linkmap_broken or linkmap is None or
3951 plib_registry is None):
3952 self.vartree.dbapi._fs_lock()
3953 plib_registry.lock()
3955 plib_registry.load()
3956 needed = os.path.join(inforoot, linkmap._needed_aux_key)
3957 self._linkmap_rebuild(include_file=needed)
3959 # Preserve old libs if they are still in use
3960 # TODO: Handle cases where the previous instance
3961 # has already been uninstalled but it still has some
3962 # preserved libraries in the registry that we may
3963 # want to preserve here.
3964 preserve_paths = self._find_libs_to_preserve()
3966 plib_registry.unlock()
3967 self.vartree.dbapi._fs_unlock()
3970 self._add_preserve_libs_to_contents(preserve_paths)
3972 # If portage is reinstalling itself, remove the old
3973 # version now since we want to use the temporary
3974 # PORTAGE_BIN_PATH that will be removed when we return.
3975 reinstall_self = False
3976 if self.myroot == "/" and \
3977 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3978 reinstall_self = True
3980 emerge_log = self._emerge_log
3982 # If we have any preserved libraries then autoclean
3983 # is forced so that preserve-libs logic doesn't have
3984 # to account for the additional complexity of the
3985 # AUTOCLEAN=no mode.
3986 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
3990 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3992 others_in_slot.append(self) # self has just been merged
3993 for dblnk in list(others_in_slot):
3996 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3998 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3999 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
4000 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
4001 dblnk._linkmap_broken = self._linkmap_broken
4002 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
4003 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
4004 unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
4005 others_in_slot=others_in_slot, needed=needed,
4006 preserve_paths=preserve_paths)
4007 dblnk.settings.pop("REPLACED_BY_VERSION", None)
4009 if unmerge_rval == os.EX_OK:
4010 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
4012 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
4016 # TODO: Check status and abort if necessary.
4020 showMessage(_(">>> Original instance of package unmerged safely.\n"))
4022 if len(others_in_slot) > 1:
4023 showMessage(colorize("WARN", _("WARNING:"))
4024 + _(" AUTOCLEAN is disabled. This can cause serious"
4025 " problems due to overlapping packages.\n"),
4026 level=logging.WARN, noiselevel=-1)
4028 # We hold both directory locks.
4029 self.dbdir = self.dbpkgdir
4033 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
4037 # Check for file collisions with blocking packages
4038 # and remove any colliding files from their CONTENTS
4039 # since they now belong to this package.
4040 self._clear_contents_cache()
4041 contents = self.getcontents()
4042 destroot_len = len(destroot) - 1
4045 for blocker in blockers:
4046 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
4047 relative_paths=False)
4051 plib_registry = self.vartree.dbapi._plib_registry
4053 self.vartree.dbapi._fs_lock()
4054 plib_registry.lock()
4056 plib_registry.load()
4059 # keep track of the libs we preserved
4060 plib_registry.register(self.mycpv, slot, counter,
4061 sorted(preserve_paths))
4063 # Unregister any preserved libs that this package has overwritten
4064 # and update the contents of the packages that owned them.
4065 plib_dict = plib_registry.getPreservedLibs()
4066 for cpv, paths in plib_collisions.items():
4067 if cpv not in plib_dict:
4069 has_vdb_entry = False
4070 if cpv != self.mycpv:
4071 # If we've replaced another instance with the
4072 # same cpv then the vdb entry no longer belongs
4073 # to it, so we'll have to get the slot and counter
4074 # from plib_registry._data instead.
4075 self.vartree.dbapi.lock()
4078 slot, counter = self.vartree.dbapi.aux_get(
4079 cpv, ["SLOT", "COUNTER"])
4083 has_vdb_entry = True
4084 self.vartree.dbapi.removeFromContents(
4087 self.vartree.dbapi.unlock()
4089 if not has_vdb_entry:
4090 # It's possible for previously unmerged packages
4091 # to have preserved libs in the registry, so try
4092 # to retrieve the slot and counter from there.
4093 has_registry_entry = False
4094 for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
4095 plib_registry._data.items():
4099 cp, slot = plib_cps.split(":", 1)
4102 counter = plib_counter
4103 has_registry_entry = True
4106 if not has_registry_entry:
4109 remaining = [f for f in plib_dict[cpv] if f not in paths]
4110 plib_registry.register(cpv, slot, counter, remaining)
4112 plib_registry.store()
4114 plib_registry.unlock()
4115 self.vartree.dbapi._fs_unlock()
4117 self.vartree.dbapi._add(self)
4118 contents = self.getcontents()
4121 self.settings["PORTAGE_UPDATE_ENV"] = \
4122 os.path.join(self.dbpkgdir, "environment.bz2")
4123 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
4125 phase = EbuildPhase(background=False, phase="postinst",
4126 scheduler=self._scheduler, settings=self.settings)
4130 showMessage(_(">>> %s merged.\n") % self.mycpv)
4132 self.settings.pop("PORTAGE_UPDATE_ENV", None)
4135 # It's stupid to bail out here, so keep going regardless of
4136 # phase return code.
4137 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
4138 level=logging.ERROR, noiselevel=-1)
4140 #update environment settings, library paths. DO NOT change symlinks.
4142 target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
4143 contents=contents, env=self.settings,
4144 writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
4146 # For gcc upgrades, preserved libs have to be removed after the
4147 # the library path has been updated.
4148 self._prune_plib_registry()
4152 def _new_backup_path(self, p):
4154 The works for any type path, such as a regular file, symlink,
4155 or directory. The parent directory is assumed to exist.
4156 The returned filename is of the form p + '.backup.' + x, where
4157 x guarantees that the returned path does not exist yet.
4164 backup_p = p + '.backup.' + str(x).rjust(4, '0')
4172 def _merge_contents(self, srcroot, destroot, cfgfiledict):
4174 cfgfiledict_orig = cfgfiledict.copy()
4176 # open CONTENTS file (possibly overwriting old one) for recording
4177 # Use atomic_ofstream for automatic coercion of raw bytes to
4178 # unicode, in order to prevent TypeError when writing raw bytes
4179 # to TextIOWrapper with python2.
4180 outfile = atomic_ofstream(_unicode_encode(
4181 os.path.join(self.dbtmpdir, 'CONTENTS'),
4182 encoding=_encodings['fs'], errors='strict'),
4183 mode='w', encoding=_encodings['repo.content'],
4184 errors='backslashreplace')
4186 # Don't bump mtimes on merge since some application require
4187 # preservation of timestamps. This means that the unmerge phase must
4188 # check to see if file belongs to an installed instance in the same
4192 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
4193 prevmask = os.umask(0)
4196 # we do a first merge; this will recurse through all files in our srcroot but also build up a
4197 # "second hand" of symlinks to merge later
4198 if self.mergeme(srcroot, destroot, outfile, secondhand,
4199 self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
4202 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
4203 # broken symlinks. We'll merge them too.
4205 while len(secondhand) and len(secondhand)!=lastlen:
4206 # clear the thirdhand. Anything from our second hand that
4207 # couldn't get merged will be added to thirdhand.
4210 if self.mergeme(srcroot, destroot, outfile, thirdhand,
4211 secondhand, cfgfiledict, mymtime):
4215 lastlen = len(secondhand)
4217 # our thirdhand now becomes our secondhand. It's ok to throw
4218 # away secondhand since thirdhand contains all the stuff that
4219 # couldn't be merged.
4220 secondhand = thirdhand
4223 # force merge of remaining symlinks (broken or circular; oh well)
4224 if self.mergeme(srcroot, destroot, outfile, None,
4225 secondhand, cfgfiledict, mymtime):
4231 #if we opened it, close it
4235 # write out our collection of md5sums
4236 if cfgfiledict != cfgfiledict_orig:
4237 cfgfiledict.pop("IGNORE", None)
4239 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
4240 except InvalidLocation:
4241 self.settings._init_dirs()
4242 writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
4246 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
4249 This function handles actual merging of the package contents to the livefs.
4250 It also handles config protection.
4252 @param srcroot: Where are we copying files from (usually ${D})
4253 @type srcroot: String (Path)
4254 @param destroot: Typically ${ROOT}
4255 @type destroot: String (Path)
4256 @param outfile: File to log operations to
4257 @type outfile: File Object
4258 @param secondhand: A set of items to merge in pass two (usually
4259 or symlinks that point to non-existing files that may get merged later)
4260 @type secondhand: List
4261 @param stufftomerge: Either a diretory to merge, or a list of items.
4262 @type stufftomerge: String or List
4263 @param cfgfiledict: { File:mtime } mapping for config_protected files
4264 @type cfgfiledict: Dictionary
4265 @param thismtime: The current time (typically long(time.time())
4266 @type thismtime: Long
4267 @rtype: None or Boolean
4274 showMessage = self._display_merge
4275 writemsg = self._display_merge
4280 srcroot = normalize_path(srcroot).rstrip(sep) + sep
4281 destroot = normalize_path(destroot).rstrip(sep) + sep
4282 calc_prelink = "prelink-checksums" in self.settings.features
4284 protect_if_modified = \
4285 "config-protect-if-modified" in self.settings.features and \
4286 self._installed_instance is not None
4288 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
4289 if isinstance(stufftomerge, basestring):
4290 #A directory is specified. Figure out protection paths, listdir() it and process it.
4291 mergelist = os.listdir(join(srcroot, stufftomerge))
4292 offset = stufftomerge
4294 mergelist = stufftomerge
4297 for i, x in enumerate(mergelist):
4299 mysrc = join(srcroot, offset, x)
4300 mydest = join(destroot, offset, x)
4301 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
4302 myrealdest = join(sep, offset, x)
4303 # stat file once, test using S_* macros many times (faster that way)
4304 mystat = os.lstat(mysrc)
4305 mymode = mystat[stat.ST_MODE]
4306 # handy variables; mydest is the target object on the live filesystems;
4307 # mysrc is the source object in the temporary install dir
4309 mydstat = os.lstat(mydest)
4310 mydmode = mydstat.st_mode
4311 except OSError as e:
4312 if e.errno != errno.ENOENT:
4315 #dest file doesn't exist
4319 if stat.S_ISLNK(mymode):
4320 # we are merging a symbolic link
4321 # The file name of mysrc and the actual file that it points to
4322 # will have earlier been forcefully converted to the 'merge'
4323 # encoding if necessary, but the content of the symbolic link
4324 # may need to be forcefully converted here.
4325 myto = _os.readlink(_unicode_encode(mysrc,
4326 encoding=_encodings['merge'], errors='strict'))
4328 myto = _unicode_decode(myto,
4329 encoding=_encodings['merge'], errors='strict')
4330 except UnicodeDecodeError:
4331 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4333 myto = _unicode_encode(myto, encoding='ascii',
4334 errors='backslashreplace')
4335 myto = _unicode_decode(myto, encoding=_encodings['merge'],
4338 os.symlink(myto, mysrc)
4340 # Pass in the symlink target in order to bypass the
4341 # os.readlink() call inside abssymlink(), since that
4342 # call is unsafe if the merge encoding is not ascii
4343 # or utf_8 (see bug #382021).
4344 myabsto = abssymlink(mysrc, target=myto)
4346 if myabsto.startswith(srcroot):
4347 myabsto = myabsto[len(srcroot):]
4348 myabsto = myabsto.lstrip(sep)
4349 if self.settings and self.settings["D"]:
4350 if myto.startswith(self.settings["D"]):
4351 myto = myto[len(self.settings["D"])-1:]
4352 # myrealto contains the path of the real file to which this symlink points.
4353 # we can simply test for existence of this file to see if the target has been merged yet
4354 myrealto = normalize_path(os.path.join(destroot, myabsto))
4357 if stat.S_ISDIR(mydmode):
4358 # we can't merge a symlink over a directory
4359 newdest = self._new_backup_path(mydest)
4362 msg.append(_("Installation of a symlink is blocked by a directory:"))
4363 msg.append(" '%s'" % mydest)
4364 msg.append(_("This symlink will be merged with a different name:"))
4365 msg.append(" '%s'" % newdest)
4367 self._eerror("preinst", msg)
4370 elif not stat.S_ISLNK(mydmode):
4371 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
4372 # Kill file blocking installation of symlink to dir #71787
4374 elif self.isprotected(mydest):
4375 # Use md5 of the target in ${D} if it exists...
4377 newmd5 = perform_md5(join(srcroot, myabsto))
4378 except FileNotFound:
4379 # Maybe the target is merged already.
4381 newmd5 = perform_md5(myrealto)
4382 except FileNotFound:
4384 mydest = new_protect_filename(mydest, newmd5=newmd5)
4386 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
4387 if (secondhand != None) and (not os.path.exists(myrealto)):
4388 # either the target directory doesn't exist yet or the target file doesn't exist -- or
4389 # the target is a broken symlink. We will add this file to our "second hand" and merge
4391 secondhand.append(mysrc[len(srcroot):])
4393 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
4394 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4395 sstat=mystat, mysettings=self.settings,
4396 encoding=_encodings['merge'])
4398 showMessage(">>> %s -> %s\n" % (mydest, myto))
4399 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
4401 showMessage(_("!!! Failed to move file.\n"),
4402 level=logging.ERROR, noiselevel=-1)
4403 showMessage("!!! %s -> %s\n" % (mydest, myto),
4404 level=logging.ERROR, noiselevel=-1)
4406 elif stat.S_ISDIR(mymode):
4407 # we are merging a directory
4409 # destination exists
4412 # Save then clear flags on dest.
4413 dflags = mydstat.st_flags
4415 bsd_chflags.lchflags(mydest, 0)
4417 if not os.access(mydest, os.W_OK):
4418 pkgstuff = pkgsplit(self.pkg)
4419 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
4420 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
4421 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
4422 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
4423 writemsg(_("!!! And finish by running this: env-update\n\n"))
4426 if stat.S_ISDIR(mydmode) or \
4427 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
4428 # a symlink to an existing directory will work for us; keep it:
4429 showMessage("--- %s/\n" % mydest)
4431 bsd_chflags.lchflags(mydest, dflags)
4433 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
4434 backup_dest = self._new_backup_path(mydest)
4437 msg.append(_("Installation of a directory is blocked by a file:"))
4438 msg.append(" '%s'" % mydest)
4439 msg.append(_("This file will be renamed to a different name:"))
4440 msg.append(" '%s'" % backup_dest)
4442 self._eerror("preinst", msg)
4443 if movefile(mydest, backup_dest,
4444 mysettings=self.settings,
4445 encoding=_encodings['merge']) is None:
4447 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
4448 level=logging.ERROR, noiselevel=-1)
4449 #now create our directory
4451 if self.settings.selinux_enabled():
4452 _selinux_merge.mkdir(mydest, mysrc)
4455 except OSError as e:
4456 # Error handling should be equivalent to
4457 # portage.util.ensure_dirs() for cases
4459 if e.errno in (errno.EEXIST,):
4461 elif os.path.isdir(mydest):
4468 bsd_chflags.lchflags(mydest, dflags)
4469 os.chmod(mydest, mystat[0])
4470 os.chown(mydest, mystat[4], mystat[5])
4471 showMessage(">>> %s/\n" % mydest)
4474 #destination doesn't exist
4475 if self.settings.selinux_enabled():
4476 _selinux_merge.mkdir(mydest, mysrc)
4479 except OSError as e:
4480 # Error handling should be equivalent to
4481 # portage.util.ensure_dirs() for cases
4483 if e.errno in (errno.EEXIST,):
4485 elif os.path.isdir(mydest):
4490 os.chmod(mydest, mystat[0])
4491 os.chown(mydest, mystat[4], mystat[5])
4492 showMessage(">>> %s/\n" % mydest)
4493 outfile.write("dir "+myrealdest+"\n")
4494 # recurse and merge this directory
4495 if self.mergeme(srcroot, destroot, outfile, secondhand,
4496 join(offset, x), cfgfiledict, thismtime):
4498 elif stat.S_ISREG(mymode):
4499 # we are merging a regular file
4500 mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
4501 # calculate config file protection stuff
4502 mydestdir = os.path.dirname(mydest)
4506 protected = self.isprotected(mydest)
4508 # destination file exists
4510 if stat.S_ISDIR(mydmode):
4511 # install of destination is blocked by an existing directory with the same name
4512 newdest = self._new_backup_path(mydest)
4515 msg.append(_("Installation of a regular file is blocked by a directory:"))
4516 msg.append(" '%s'" % mydest)
4517 msg.append(_("This file will be merged with a different name:"))
4518 msg.append(" '%s'" % newdest)
4520 self._eerror("preinst", msg)
4523 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
4524 # install of destination is blocked by an existing regular file,
4525 # or by a symlink to an existing regular file;
4526 # now, config file management may come into play.
4527 # we only need to tweak mydest if cfg file management is in play.
4529 destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
4530 if protect_if_modified:
4532 self._installed_instance._match_contents(myrealdest)
4534 inst_info = self._installed_instance.getcontents()[contents_key]
4535 if inst_info[0] == "obj" and inst_info[2] == destmd5:
4539 # we have a protection path; enable config file management.
4541 if mymd5 == destmd5:
4542 #file already in place; simply update mtimes of destination
4545 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
4546 """ An identical update has previously been
4547 merged. Skip it unless the user has chosen
4549 moveme = cfgfiledict["IGNORE"]
4550 cfgprot = cfgfiledict["IGNORE"]
4553 mymtime = mystat[stat.ST_MTIME]
4558 # Merging a new file, so update confmem.
4559 cfgfiledict[myrealdest] = [mymd5]
4560 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
4561 """A previously remembered update has been
4562 accepted, so it is removed from confmem."""
4563 del cfgfiledict[myrealdest]
4566 mydest = new_protect_filename(mydest, newmd5=mymd5)
4568 # whether config protection or not, we merge the new file the
4569 # same way. Unless moveme=0 (blocking directory)
4571 # Create hardlinks only for source files that already exist
4572 # as hardlinks (having identical st_dev and st_ino).
4573 hardlink_key = (mystat.st_dev, mystat.st_ino)
4575 hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
4576 if hardlink_candidates is None:
4577 hardlink_candidates = []
4578 self._hardlink_merge_map[hardlink_key] = hardlink_candidates
4580 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
4581 sstat=mystat, mysettings=self.settings,
4582 hardlink_candidates=hardlink_candidates,
4583 encoding=_encodings['merge'])
4586 hardlink_candidates.append(mydest)
4590 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
4591 showMessage("%s %s\n" % (zing,mydest))
4593 # we are merging a fifo or device node
4596 # destination doesn't exist
4597 if movefile(mysrc, mydest, newmtime=thismtime,
4598 sstat=mystat, mysettings=self.settings,
4599 encoding=_encodings['merge']) is not None:
4603 if stat.S_ISFIFO(mymode):
4604 outfile.write("fif %s\n" % myrealdest)
4606 outfile.write("dev %s\n" % myrealdest)
4607 showMessage(zing + " " + mydest + "\n")
4609 def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
4610 mydbapi=None, prev_mtimes=None, counter=None):
4612 @param myroot: ignored, self._eroot is used instead
4616 parallel_install = "parallel-install" in self.settings.features
4617 if not parallel_install:
4619 self.vartree.dbapi._bump_mtime(self.mycpv)
4620 if self._scheduler is None:
4621 self._scheduler = PollScheduler().sched_iface
4623 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
4624 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
4627 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
4628 # fail-clean is enabled, and the success/die hooks have
4629 # already been called by EbuildPhase.
4630 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
4632 if retval == os.EX_OK:
4633 phase = 'success_hooks'
4637 ebuild_phase = MiscFunctionsProcess(
4638 background=False, commands=[phase],
4639 scheduler=self._scheduler, settings=self.settings)
4640 ebuild_phase.start()
4642 self._elog_process()
4644 if 'noclean' not in self.settings.features and \
4645 (retval == os.EX_OK or \
4646 'fail-clean' in self.settings.features):
4647 if myebuild is None:
4648 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
4650 doebuild_environment(myebuild, "clean",
4651 settings=self.settings, db=mydbapi)
4652 phase = EbuildPhase(background=False, phase="clean",
4653 scheduler=self._scheduler, settings=self.settings)
4657 self.settings.pop('REPLACING_VERSIONS', None)
4658 if self.vartree.dbapi._linkmap is None:
4659 # preserve-libs is entirely disabled
4662 self.vartree.dbapi._linkmap._clear_cache()
4663 self.vartree.dbapi._bump_mtime(self.mycpv)
4664 if not parallel_install:
4668 def getstring(self,name):
4669 "returns contents of a file with whitespace converted to spaces"
4670 if not os.path.exists(self.dbdir+"/"+name):
4673 _unicode_encode(os.path.join(self.dbdir, name),
4674 encoding=_encodings['fs'], errors='strict'),
4675 mode='r', encoding=_encodings['repo.content'], errors='replace'
4677 return " ".join(mydata)
4679 def copyfile(self,fname):
4680 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4682 def getfile(self,fname):
4683 if not os.path.exists(self.dbdir+"/"+fname):
4685 return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
4686 encoding=_encodings['fs'], errors='strict'),
4687 mode='r', encoding=_encodings['repo.content'], errors='replace'
4690 def setfile(self,fname,data):
4692 if fname == 'environment.bz2' or not isinstance(data, basestring):
4693 kwargs['mode'] = 'wb'
4695 kwargs['mode'] = 'w'
4696 kwargs['encoding'] = _encodings['repo.content']
4697 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
4699 def getelements(self,ename):
4700 if not os.path.exists(self.dbdir+"/"+ename):
4702 mylines = io.open(_unicode_encode(
4703 os.path.join(self.dbdir, ename),
4704 encoding=_encodings['fs'], errors='strict'),
4705 mode='r', encoding=_encodings['repo.content'], errors='replace'
4709 for y in x[:-1].split():
4713 def setelements(self,mylist,ename):
4714 myelement = io.open(_unicode_encode(
4715 os.path.join(self.dbdir, ename),
4716 encoding=_encodings['fs'], errors='strict'),
4717 mode='w', encoding=_encodings['repo.content'],
4718 errors='backslashreplace')
4720 myelement.write(_unicode_decode(x+"\n"))
4723 def isregular(self):
4724 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4725 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
4727 def _pre_merge_backup(self, backup_dblink, downgrade):
4729 if ("unmerge-backup" in self.settings.features or
4730 (downgrade and "downgrade-backup" in self.settings.features)):
4731 return self._quickpkg_dblink(backup_dblink, False, None)
4735 def _pre_unmerge_backup(self, background):
4737 if "unmerge-backup" in self.settings.features :
4739 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
4740 logfile = self.settings.get("PORTAGE_LOG_FILE")
4741 return self._quickpkg_dblink(self, background, logfile)
4745 def _quickpkg_dblink(self, backup_dblink, background, logfile):
4747 trees = QueryCommand.get_db()[self.settings["EROOT"]]
4748 bintree = trees["bintree"]
4749 binpkg_path = bintree.getname(backup_dblink.mycpv)
4750 if os.path.exists(binpkg_path) and \
4751 catsplit(backup_dblink.mycpv)[1] not in bintree.invalids:
4757 if not backup_dblink.exists():
4758 # It got unmerged by a concurrent process.
4761 # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
4762 quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
4765 # Let quickpkg inherit the global vartree config's env.
4766 env = dict(self.vartree.settings.items())
4767 env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
4769 pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
4770 if not pythonpath or \
4771 not os.path.samefile(pythonpath[0], portage._pym_path):
4772 pythonpath.insert(0, portage._pym_path)
4773 env['PYTHONPATH'] = ":".join(pythonpath)
4775 quickpkg_proc = SpawnProcess(
4776 args=[portage._python_interpreter, quickpkg_binary,
4777 "=%s" % (backup_dblink.mycpv,)],
4778 background=background, env=env,
4779 scheduler=self._scheduler, logfile=logfile)
4780 quickpkg_proc.start()
4782 return quickpkg_proc.wait()
4787 def merge(mycat, mypkg, pkgloc, infloc,
4788 myroot=None, settings=None, myebuild=None,
4789 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
4792 @param myroot: ignored, settings['EROOT'] is used instead
4795 if settings is None:
4796 raise TypeError("settings argument is required")
4797 if not os.access(settings['EROOT'], os.W_OK):
4798 writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
4801 background = (settings.get('PORTAGE_BACKGROUND') == '1')
4802 merge_task = MergeProcess(
4803 mycat=mycat, mypkg=mypkg, settings=settings,
4804 treetype=mytree, vartree=vartree,
4805 scheduler=(scheduler or PollScheduler().sched_iface),
4806 background=background, blockers=blockers, pkgloc=pkgloc,
4807 infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
4808 prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
4810 retcode = merge_task.wait()
4813 def unmerge(cat, pkg, myroot=None, settings=None,
4814 mytrimworld=None, vartree=None,
4815 ldpath_mtimes=None, scheduler=None):
4817 @param myroot: ignored, settings['EROOT'] is used instead
4818 @param mytrimworld: ignored
4821 if settings is None:
4822 raise TypeError("settings argument is required")
4823 mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
4824 vartree=vartree, scheduler=scheduler)
4825 vartree = mylink.vartree
4826 parallel_install = "parallel-install" in settings.features
4827 if not parallel_install:
4831 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
4832 if retval == os.EX_OK:
4841 if vartree.dbapi._linkmap is None:
4842 # preserve-libs is entirely disabled
4845 vartree.dbapi._linkmap._clear_cache()
4846 if not parallel_install:
4849 def write_contents(contents, root, f):
4851 Write contents to any file like object. The file will be left open.
4853 root_len = len(root) - 1
4854 for filename in sorted(contents):
4855 entry_data = contents[filename]
4856 entry_type = entry_data[0]
4857 relative_filename = filename[root_len:]
4858 if entry_type == "obj":
4859 entry_type, mtime, md5sum = entry_data
4860 line = "%s %s %s %s\n" % \
4861 (entry_type, relative_filename, md5sum, mtime)
4862 elif entry_type == "sym":
4863 entry_type, mtime, link = entry_data
4864 line = "%s %s -> %s %s\n" % \
4865 (entry_type, relative_filename, link, mtime)
4866 else: # dir, dev, fif
4867 line = "%s %s\n" % (entry_type, relative_filename)
4870 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4872 encoding = _encodings['merge']
4877 encoding=_encodings['merge'],
4879 except UnicodeEncodeError:
4880 # The package appears to have been merged with a
4881 # different value of sys.getfilesystemencoding(),
4882 # so fall back to utf_8 if appropriate.
4886 encoding=_encodings['fs'],
4888 except UnicodeEncodeError:
4892 encoding = _encodings['fs']
4894 tar.encoding = encoding
4895 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4897 maxval = len(contents)
4900 onProgress(maxval, 0)
4901 paths = list(contents)
4906 lst = os.lstat(path)
4907 except OSError as e:
4908 if e.errno != errno.ENOENT:
4912 onProgress(maxval, curval)
4914 contents_type = contents[path][0]
4915 if path.startswith(root):
4916 arcname = "./" + path[len(root):]
4918 raise ValueError("invalid root argument: '%s'" % root)
4920 if 'dir' == contents_type and \
4921 not stat.S_ISDIR(lst.st_mode) and \
4922 os.path.isdir(live_path):
4923 # Even though this was a directory in the original ${D}, it exists
4924 # as a symlink to a directory in the live filesystem. It must be
4925 # recorded as a real directory in the tar file to ensure that tar
4926 # can properly extract it's children.
4927 live_path = os.path.realpath(live_path)
4928 lst = os.lstat(live_path)
4930 # Since os.lstat() inside TarFile.gettarinfo() can trigger a
4931 # UnicodeEncodeError when python has something other than utf_8
4932 # return from sys.getfilesystemencoding() (as in bug #388773),
4933 # we implement the needed functionality here, using the result
4934 # of our successful lstat call. An alternative to this would be
4935 # to pass in the fileobj argument to TarFile.gettarinfo(), so
4936 # that it could use fstat instead of lstat. However, that would
4937 # have the unwanted effect of dereferencing symlinks.
4939 tarinfo = tar.tarinfo()
4940 tarinfo.name = arcname
4941 tarinfo.mode = lst.st_mode
4942 tarinfo.uid = lst.st_uid
4943 tarinfo.gid = lst.st_gid
4945 tarinfo.mtime = lst.st_mtime
4946 tarinfo.linkname = ""
4947 if stat.S_ISREG(lst.st_mode):
4948 inode = (lst.st_ino, lst.st_dev)
4949 if (lst.st_nlink > 1 and
4950 inode in tar.inodes and
4951 arcname != tar.inodes[inode]):
4952 tarinfo.type = tarfile.LNKTYPE
4953 tarinfo.linkname = tar.inodes[inode]
4955 tar.inodes[inode] = arcname
4956 tarinfo.type = tarfile.REGTYPE
4957 tarinfo.size = lst.st_size
4958 elif stat.S_ISDIR(lst.st_mode):
4959 tarinfo.type = tarfile.DIRTYPE
4960 elif stat.S_ISLNK(lst.st_mode):
4961 tarinfo.type = tarfile.SYMTYPE
4962 tarinfo.linkname = os.readlink(live_path)
4966 tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
4970 tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
4974 if stat.S_ISREG(lst.st_mode):
4975 if protect and protect(path):
4976 # Create an empty file as a place holder in order to avoid
4977 # potential collision-protect issues.
4978 f = tempfile.TemporaryFile()
4979 f.write(_unicode_encode(
4980 "# empty file because --include-config=n " + \
4981 "when `quickpkg` was used\n"))
4984 tarinfo.size = os.fstat(f.fileno()).st_size
4985 tar.addfile(tarinfo, f)
4988 f = open(_unicode_encode(path,
4990 errors='strict'), 'rb')
4992 tar.addfile(tarinfo, f)
4996 tar.addfile(tarinfo)
4998 onProgress(maxval, curval)