1 # Copyright 1998-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 "vardbapi", "vartree", "dblink"] + \
6 ["write_contents", "tar_contents"]
9 portage.proxy.lazyimport.lazyimport(globals(),
10 'portage.checksum:_perform_md5_merge@perform_md5',
11 'portage.data:portage_gid,portage_uid,secpass',
12 'portage.dbapi.dep_expand:dep_expand',
13 'portage.dbapi._MergeProcess:MergeProcess',
14 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
15 'use_reduce,_slot_re',
16 'portage.elog:collect_ebuild_messages,collect_messages,' + \
17 'elog_process,_merge_logentries',
18 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
19 'portage.output:bold,colorize',
20 'portage.package.ebuild.doebuild:doebuild_environment,' + \
22 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
23 'portage.update:fixdbentries',
24 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
25 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
26 'grabdict,normalize_path,new_protect_filename',
27 'portage.util.digraph:digraph',
28 'portage.util.env_update:env_update',
29 'portage.util.listdir:dircache,listdir',
30 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
31 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
32 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
36 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
37 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
38 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
39 from portage.dbapi import dbapi
40 from portage.dep import _slot_separator
41 from portage.exception import CommandNotFound, \
42 InvalidData, InvalidPackageName, \
43 FileNotFound, PermissionDenied, UnsupportedAPIException
44 from portage.localization import _
45 from portage.util.movefile import movefile
47 from portage import abssymlink, _movefile, bsd_chflags
49 # This is a special version of the os module, wrapped for unicode support.
50 from portage import os
51 from portage import _encodings
52 from portage import _os_merge
53 from portage import _selinux_merge
54 from portage import _unicode_decode
55 from portage import _unicode_encode
57 from _emerge.AsynchronousLock import AsynchronousLock
58 from _emerge.EbuildBuildDir import EbuildBuildDir
59 from _emerge.EbuildPhase import EbuildPhase
60 from _emerge.PollScheduler import PollScheduler
61 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
65 import re, shutil, stat, errno, subprocess
75 import cPickle as pickle
79 if sys.hexversion >= 0x3000000:
83 class vardbapi(dbapi):
85 _excluded_dirs = ["CVS", "lost+found"]
86 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
87 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
88 "|".join(_excluded_dirs) + r')$')
90 _aux_cache_version = "1"
91 _owners_cache_version = "1"
93 # Number of uncached packages to trigger cache update, since
94 # it's wasteful to update it for every vdb change.
95 _aux_cache_threshold = 5
97 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
98 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
100 def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
102 The categories parameter is unused since the dbapi class
103 now has a categories property that is generated from the
107 # Used by emerge to check whether any packages
108 # have been added or removed.
109 self._pkgs_changed = False
111 # The _aux_cache_threshold doesn't work as designed
112 # if the cache is flushed from a subprocess, so we
113 # use this to avoid waste vdb cache updates.
114 self._flush_cache_enabled = True
116 #cache for category directory mtimes
119 #cache for dependency checks
122 #cache for cp_list results
127 settings = portage.settings
128 self.settings = settings
129 self.root = settings['ROOT']
131 if _unused_param is not None and _unused_param != self.root:
132 warnings.warn("The first parameter of the " + \
133 "portage.dbapi.vartree.vardbapi" + \
134 " constructor is now unused. Use " + \
135 "settings['ROOT'] instead.",
136 DeprecationWarning, stacklevel=2)
138 self._eroot = settings['EROOT']
139 self._dbroot = self._eroot + VDB_PATH
144 vartree = portage.db[self.root]["vartree"]
145 self.vartree = vartree
146 self._aux_cache_keys = set(
147 ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
148 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
149 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
150 "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
152 self._aux_cache_obj = None
153 self._aux_cache_filename = os.path.join(self._eroot,
154 CACHE_PATH, "vdb_metadata.pickle")
155 self._counter_path = os.path.join(self._eroot,
156 CACHE_PATH, "counter")
158 self._plib_registry = None
159 if _ENABLE_PRESERVE_LIBS:
160 self._plib_registry = PreservedLibsRegistry(self.root,
161 os.path.join(self._eroot, PRIVATE_PATH,
162 "preserved_libs_registry"))
165 if _ENABLE_DYN_LINK_MAP:
166 self._linkmap = LinkageMap(self)
167 self._owners = self._owners_db(self)
169 self._cached_counter = None
171 def getpath(self, mykey, filename=None):
172 # This is an optimized hotspot, so don't use unicode-wrapped
173 # os module and don't use os.path.join().
174 rValue = self._eroot + VDB_PATH + _os.sep + mykey
175 if filename is not None:
176 # If filename is always relative, we can do just
177 # rValue += _os.sep + filename
178 rValue = _os.path.join(rValue, filename)
183 Acquire a reentrant lock, blocking, for cooperation with concurrent
184 processes. State is inherited by subprocesses, allowing subprocesses
185 to reenter a lock that was acquired by a parent process. However,
186 a lock can be released only by the same process that acquired it.
189 self._lock_count += 1
191 if self._lock is not None:
192 raise AssertionError("already locked")
193 # At least the parent needs to exist for the lock file.
194 ensure_dirs(self._dbroot)
195 self._lock = lockdir(self._dbroot)
196 self._lock_count += 1
200 Release a lock, decrementing the recursion level. Each unlock() call
201 must be matched with a prior lock() call, or else an AssertionError
202 will be raised if unlock() is called while not locked.
204 if self._lock_count > 1:
205 self._lock_count -= 1
207 if self._lock is None:
208 raise AssertionError("not locked")
210 unlockdir(self._lock)
213 def _bump_mtime(self, cpv):
215 This is called before an after any modifications, so that consumers
216 can use directory mtimes to validate caches. See bug #290428.
218 base = self._eroot + VDB_PATH
219 cat = catsplit(cpv)[0]
220 catdir = base + _os.sep + cat
224 for x in (catdir, base):
229 def cpv_exists(self, mykey, myrepo=None):
230 "Tells us whether an actual ebuild exists on disk (no masking)"
231 return os.path.exists(self.getpath(mykey))
233 def cpv_counter(self, mycpv):
234 "This method will grab the COUNTER. Returns a counter value."
236 return long(self.aux_get(mycpv, ["COUNTER"])[0])
237 except (KeyError, ValueError):
239 writemsg_level(_("portage: COUNTER for %s was corrupted; " \
240 "resetting to value of 0\n") % (mycpv,),
241 level=logging.ERROR, noiselevel=-1)
244 def cpv_inject(self, mycpv):
245 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
246 ensure_dirs(self.getpath(mycpv))
247 counter = self.counter_tick(mycpv=mycpv)
248 # write local package counter so that emerge clean does the right thing
249 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
251 def isInjected(self, mycpv):
252 if self.cpv_exists(mycpv):
253 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
255 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
259 def move_ent(self, mylist, repo_match=None):
264 for atom in (origcp, newcp):
265 if not isjustname(atom):
266 raise InvalidPackageName(str(atom))
267 origmatches = self.match(origcp, use_cache=0)
271 for mycpv in origmatches:
272 mycpv_cp = cpv_getkey(mycpv)
273 if mycpv_cp != origcp:
274 # Ignore PROVIDE virtual match.
276 if repo_match is not None \
277 and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
279 mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
280 mynewcat = catsplit(newcp)[0]
281 origpath = self.getpath(mycpv)
282 if not os.path.exists(origpath):
285 if not os.path.exists(self.getpath(mynewcat)):
286 #create the directory
287 ensure_dirs(self.getpath(mynewcat))
288 newpath = self.getpath(mynewcpv)
289 if os.path.exists(newpath):
290 #dest already exists; keep this puppy where it is.
292 _movefile(origpath, newpath, mysettings=self.settings)
293 self._clear_pkg_cache(self._dblink(mycpv))
294 self._clear_pkg_cache(self._dblink(mynewcpv))
296 # We need to rename the ebuild now.
297 old_pf = catsplit(mycpv)[1]
298 new_pf = catsplit(mynewcpv)[1]
301 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
302 os.path.join(newpath, new_pf + ".ebuild"))
303 except EnvironmentError as e:
304 if e.errno != errno.ENOENT:
307 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
308 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
309 fixdbentries([mylist], newpath)
312 def cp_list(self, mycp, use_cache=1):
313 mysplit=catsplit(mycp)
314 if mysplit[0] == '*':
315 mysplit[0] = mysplit[0][1:]
317 mystat = os.stat(self.getpath(mysplit[0])).st_mtime
320 if use_cache and mycp in self.cpcache:
321 cpc = self.cpcache[mycp]
324 cat_dir = self.getpath(mysplit[0])
326 dir_list = os.listdir(cat_dir)
327 except EnvironmentError as e:
328 if e.errno == PermissionDenied.errno:
329 raise PermissionDenied(cat_dir)
335 if self._excluded_dirs.match(x) is not None:
339 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
342 if ps[0] == mysplit[1]:
343 returnme.append(mysplit[0]+"/"+x)
344 self._cpv_sort_ascending(returnme)
346 self.cpcache[mycp] = [mystat, returnme[:]]
347 elif mycp in self.cpcache:
348 del self.cpcache[mycp]
351 def cpv_all(self, use_cache=1):
353 Set use_cache=0 to bypass the portage.cachedir() cache in cases
354 when the accuracy of mtime staleness checks should not be trusted
355 (generally this is only necessary in critical sections that
356 involve merge or unmerge of packages).
359 basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
362 from portage import listdir
364 def listdir(p, **kwargs):
366 return [x for x in os.listdir(p) \
367 if os.path.isdir(os.path.join(p, x))]
368 except EnvironmentError as e:
369 if e.errno == PermissionDenied.errno:
370 raise PermissionDenied(p)
374 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
375 if self._excluded_dirs.match(x) is not None:
377 if not self._category_re.match(x):
379 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
380 if self._excluded_dirs.match(y) is not None:
382 subpath = x + "/" + y
383 # -MERGING- should never be a cpv, nor should files.
385 if catpkgsplit(subpath) is None:
386 self.invalidentry(self.getpath(subpath))
389 self.invalidentry(self.getpath(subpath))
391 returnme.append(subpath)
395 def cp_all(self, use_cache=1):
396 mylist = self.cpv_all(use_cache=use_cache)
402 mysplit = catpkgsplit(y)
404 self.invalidentry(self.getpath(y))
407 self.invalidentry(self.getpath(y))
409 d[mysplit[0]+"/"+mysplit[1]] = None
412 def checkblockers(self, origdep):
415 def _clear_cache(self):
416 self.mtdircache.clear()
417 self.matchcache.clear()
419 self._aux_cache_obj = None
421 def _add(self, pkg_dblink):
422 self._pkgs_changed = True
423 self._clear_pkg_cache(pkg_dblink)
425 def _remove(self, pkg_dblink):
426 self._pkgs_changed = True
427 self._clear_pkg_cache(pkg_dblink)
429 def _clear_pkg_cache(self, pkg_dblink):
430 # Due to 1 second mtime granularity in <python-2.5, mtime checks
431 # are not always sufficient to invalidate vardbapi caches. Therefore,
432 # the caches need to be actively invalidated here.
433 self.mtdircache.pop(pkg_dblink.cat, None)
434 self.matchcache.pop(pkg_dblink.cat, None)
435 self.cpcache.pop(pkg_dblink.mysplit[0], None)
436 dircache.pop(pkg_dblink.dbcatdir, None)
438 def match(self, origdep, use_cache=1):
439 "caching match function"
441 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
442 mykey = dep_getkey(mydep)
443 mycat = catsplit(mykey)[0]
445 if mycat in self.matchcache:
446 del self.mtdircache[mycat]
447 del self.matchcache[mycat]
448 return list(self._iter_match(mydep,
449 self.cp_list(mydep.cp, use_cache=use_cache)))
451 curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
452 except (IOError, OSError):
455 if mycat not in self.matchcache or \
456 self.mtdircache[mycat] != curmtime:
458 self.mtdircache[mycat] = curmtime
459 self.matchcache[mycat] = {}
460 if mydep not in self.matchcache[mycat]:
461 mymatch = list(self._iter_match(mydep,
462 self.cp_list(mydep.cp, use_cache=use_cache)))
463 self.matchcache[mycat][mydep] = mymatch
464 return self.matchcache[mycat][mydep][:]
466 def findname(self, mycpv, myrepo=None):
467 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
469 def flush_cache(self):
470 """If the current user has permission and the internal aux_get cache has
471 been updated, save it to disk and mark it unmodified. This is called
472 by emerge after it has loaded the full vdb for use in dependency
473 calculations. Currently, the cache is only written if the user has
474 superuser privileges (since that's required to obtain a lock), but all
475 users have read access and benefit from faster metadata lookups (as
476 long as at least part of the cache is still valid)."""
477 if self._flush_cache_enabled and \
478 self._aux_cache is not None and \
479 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
481 self._owners.populate() # index any unindexed contents
482 valid_nodes = set(self.cpv_all())
483 for cpv in list(self._aux_cache["packages"]):
484 if cpv not in valid_nodes:
485 del self._aux_cache["packages"][cpv]
486 del self._aux_cache["modified"]
488 f = atomic_ofstream(self._aux_cache_filename, 'wb')
489 pickle.dump(self._aux_cache, f, protocol=2)
491 apply_secpass_permissions(
492 self._aux_cache_filename, gid=portage_gid, mode=0o644)
493 except (IOError, OSError) as e:
495 self._aux_cache["modified"] = set()
498 def _aux_cache(self):
499 if self._aux_cache_obj is None:
500 self._aux_cache_init()
501 return self._aux_cache_obj
503 def _aux_cache_init(self):
506 if sys.hexversion >= 0x3000000:
507 # Buffered io triggers extreme performance issues in
508 # Unpickler.load() (problem observed with python-3.0.1).
509 # Unfortunately, performance is still poor relative to
510 # python-2.x, but buffering makes it much worse.
511 open_kwargs["buffering"] = 0
513 f = open(_unicode_encode(self._aux_cache_filename,
514 encoding=_encodings['fs'], errors='strict'),
515 mode='rb', **open_kwargs)
516 mypickle = pickle.Unpickler(f)
518 mypickle.find_global = None
519 except AttributeError:
520 # TODO: If py3k, override Unpickler.find_class().
522 aux_cache = mypickle.load()
525 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
526 if isinstance(e, pickle.UnpicklingError):
527 writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
528 (self._aux_cache_filename, e), noiselevel=-1)
531 if not aux_cache or \
532 not isinstance(aux_cache, dict) or \
533 aux_cache.get("version") != self._aux_cache_version or \
534 not aux_cache.get("packages"):
535 aux_cache = {"version": self._aux_cache_version}
536 aux_cache["packages"] = {}
538 owners = aux_cache.get("owners")
539 if owners is not None:
540 if not isinstance(owners, dict):
542 elif "version" not in owners:
544 elif owners["version"] != self._owners_cache_version:
546 elif "base_names" not in owners:
548 elif not isinstance(owners["base_names"], dict):
554 "version" : self._owners_cache_version
556 aux_cache["owners"] = owners
558 aux_cache["modified"] = set()
559 self._aux_cache_obj = aux_cache
561 def aux_get(self, mycpv, wants, myrepo = None):
562 """This automatically caches selected keys that are frequently needed
563 by emerge for dependency calculations. The cached metadata is
564 considered valid if the mtime of the package directory has not changed
565 since the data was cached. The cache is stored in a pickled dict
566 object with the following format:
568 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
570 If an error occurs while loading the cache pickle or the version is
571 unrecognized, the cache will simple be recreated from scratch (it is
572 completely disposable).
574 cache_these_wants = self._aux_cache_keys.intersection(wants)
576 if self._aux_cache_keys_re.match(x) is not None:
577 cache_these_wants.add(x)
579 if not cache_these_wants:
580 return self._aux_get(mycpv, wants)
582 cache_these = set(self._aux_cache_keys)
583 cache_these.update(cache_these_wants)
585 mydir = self.getpath(mycpv)
588 mydir_stat = os.stat(mydir)
590 if e.errno != errno.ENOENT:
592 raise KeyError(mycpv)
593 mydir_mtime = mydir_stat[stat.ST_MTIME]
594 pkg_data = self._aux_cache["packages"].get(mycpv)
595 pull_me = cache_these.union(wants)
596 mydata = {"_mtime_" : mydir_mtime}
598 cache_incomplete = False
601 if pkg_data is not None:
602 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
605 cache_mtime, metadata = pkg_data
606 if not isinstance(cache_mtime, (long, int)) or \
607 not isinstance(metadata, dict):
611 cache_mtime, metadata = pkg_data
612 cache_valid = cache_mtime == mydir_mtime
614 # Migrate old metadata to unicode.
615 for k, v in metadata.items():
616 metadata[k] = _unicode_decode(v,
617 encoding=_encodings['repo.content'], errors='replace')
619 mydata.update(metadata)
620 pull_me.difference_update(mydata)
623 # pull any needed data and cache it
624 aux_keys = list(pull_me)
625 for k, v in zip(aux_keys,
626 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
628 if not cache_valid or cache_these.difference(metadata):
630 if cache_valid and metadata:
631 cache_data.update(metadata)
632 for aux_key in cache_these:
633 cache_data[aux_key] = mydata[aux_key]
634 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
635 self._aux_cache["modified"].add(mycpv)
637 if _slot_re.match(mydata['SLOT']) is None:
638 # Empty or invalid slot triggers InvalidAtom exceptions when
639 # generating slot atoms for packages, so translate it to '0' here.
640 mydata['SLOT'] = _unicode_decode('0')
642 return [mydata[x] for x in wants]
644 def _aux_get(self, mycpv, wants, st=None):
645 mydir = self.getpath(mycpv)
650 if e.errno == errno.ENOENT:
651 raise KeyError(mycpv)
652 elif e.errno == PermissionDenied.errno:
653 raise PermissionDenied(mydir)
656 if not stat.S_ISDIR(st.st_mode):
657 raise KeyError(mycpv)
661 results.append(st[stat.ST_MTIME])
665 _unicode_encode(os.path.join(mydir, x),
666 encoding=_encodings['fs'], errors='strict'),
667 mode='r', encoding=_encodings['repo.content'],
673 # Preserve \n for metadata that is known to
674 # contain multiple lines.
675 if self._aux_multi_line_re.match(x) is None:
676 myd = " ".join(myd.split())
678 myd = _unicode_decode('')
679 if x == "EAPI" and not myd:
680 results.append(_unicode_decode('0'))
685 def aux_update(self, cpv, values):
686 mylink = self._dblink(cpv)
687 if not mylink.exists():
689 self._bump_mtime(cpv)
690 self._clear_pkg_cache(mylink)
691 for k, v in values.items():
696 os.unlink(os.path.join(self.getpath(cpv), k))
697 except EnvironmentError:
699 self._bump_mtime(cpv)
701 def counter_tick(self, myroot=None, mycpv=None):
703 @param myroot: ignored, self._eroot is used instead
705 return self.counter_tick_core(incrementing=1, mycpv=mycpv)
707 def get_counter_tick_core(self, myroot=None, mycpv=None):
709 Use this method to retrieve the counter instead
710 of having to trust the value of a global counter
711 file that can lead to invalid COUNTER
712 generation. When cache is valid, the package COUNTER
713 files are not read and we rely on the timestamp of
714 the package directory to validate cache. The stat
715 calls should only take a short time, so performance
716 is sufficient without having to rely on a potentially
717 corrupt global counter file.
719 The global counter file located at
720 $CACHE_PATH/counter serves to record the
721 counter of the last installed package and
722 it also corresponds to the total number of
723 installation actions that have occurred in
724 the history of this package database.
726 @param myroot: ignored, self._eroot is used instead
733 _unicode_encode(self._counter_path,
734 encoding=_encodings['fs'], errors='strict'),
735 mode='r', encoding=_encodings['repo.content'],
737 except EnvironmentError as e:
738 new_vdb = not bool(self.cpv_all())
740 writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
741 self._counter_path, noiselevel=-1)
742 writemsg("!!! %s\n" % str(e), noiselevel=-1)
747 counter = long(cfile.readline().strip())
750 except (OverflowError, ValueError) as e:
751 writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
752 self._counter_path, noiselevel=-1)
753 writemsg("!!! %s\n" % str(e), noiselevel=-1)
756 if self._cached_counter == counter:
757 max_counter = counter
759 # We must ensure that we return a counter
760 # value that is at least as large as the
761 # highest one from the installed packages,
762 # since having a corrupt value that is too low
763 # can trigger incorrect AUTOCLEAN behavior due
764 # to newly installed packages having lower
765 # COUNTERs than the previous version in the
767 max_counter = counter
768 for cpv in self.cpv_all():
770 pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
771 except (KeyError, OverflowError, ValueError):
773 if pkg_counter > max_counter:
774 max_counter = pkg_counter
776 if counter < 0 and not new_vdb:
777 writemsg(_("!!! Initializing COUNTER to " \
778 "value of %d\n") % max_counter, noiselevel=-1)
780 return max_counter + 1
782 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
784 This method will grab the next COUNTER value and record it back
785 to the global file. Returns new counter value.
787 @param myroot: ignored, self._eroot is used instead
788 @param mycpv: ignored
794 counter = self.get_counter_tick_core() - 1
795 if self._cached_counter != counter:
799 # use same permissions as config._init_dirs()
800 ensure_dirs(os.path.dirname(self._counter_path),
801 gid=portage_gid, mode=0o2750, mask=0o2)
802 # update new global counter file
803 write_atomic(self._counter_path, str(counter))
804 self._cached_counter = counter
810 def _dblink(self, cpv):
811 category, pf = catsplit(cpv)
812 return dblink(category, pf, settings=self.settings,
813 vartree=self.vartree, treetype="vartree")
815 def removeFromContents(self, pkg, paths, relative_paths=True):
817 @param pkg: cpv for an installed package
819 @param paths: paths of files to remove from contents
820 @type paths: iterable
822 if not hasattr(pkg, "getcontents"):
823 pkg = self._dblink(pkg)
824 root = self.settings['ROOT']
825 root_len = len(root) - 1
826 new_contents = pkg.getcontents().copy()
829 for filename in paths:
830 filename = _unicode_decode(filename,
831 encoding=_encodings['content'], errors='strict')
832 filename = normalize_path(filename)
834 relative_filename = filename
836 relative_filename = filename[root_len:]
837 contents_key = pkg._match_contents(relative_filename)
839 del new_contents[contents_key]
843 self._bump_mtime(pkg.mycpv)
844 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
845 write_contents(new_contents, root, f)
847 self._bump_mtime(pkg.mycpv)
848 pkg._clear_contents_cache()
850 class _owners_cache(object):
852 This class maintains an hash table that serves to index package
853 contents by mapping the basename of file to a list of possible
854 packages that own it. This is used to optimize owner lookups
855 by narrowing the search down to a smaller number of packages.
858 from hashlib import md5 as _new_hash
860 from md5 import new as _new_hash
863 _hex_chars = int(_hash_bits / 4)
865 def __init__(self, vardb):
869 eroot_len = len(self._vardb._eroot)
870 contents = self._vardb._dblink(cpv).getcontents()
871 pkg_hash = self._hash_pkg(cpv)
873 # Empty path is a code used to represent empty contents.
874 self._add_path("", pkg_hash)
877 self._add_path(x[eroot_len:], pkg_hash)
879 self._vardb._aux_cache["modified"].add(cpv)
881 def _add_path(self, path, pkg_hash):
883 Empty path is a code that represents empty contents.
886 name = os.path.basename(path.rstrip(os.path.sep))
891 name_hash = self._hash_str(name)
892 base_names = self._vardb._aux_cache["owners"]["base_names"]
893 pkgs = base_names.get(name_hash)
896 base_names[name_hash] = pkgs
897 pkgs[pkg_hash] = None
899 def _hash_str(self, s):
901 # Always use a constant utf_8 encoding here, since
902 # the "default" encoding can change.
903 h.update(_unicode_encode(s,
904 encoding=_encodings['repo.content'],
905 errors='backslashreplace'))
907 h = h[-self._hex_chars:]
911 def _hash_pkg(self, cpv):
912 counter, mtime = self._vardb.aux_get(
913 cpv, ["COUNTER", "_mtime_"])
915 counter = int(counter)
918 return (cpv, counter, mtime)
920 class _owners_db(object):
922 def __init__(self, vardb):
929 owners_cache = vardbapi._owners_cache(self._vardb)
930 cached_hashes = set()
931 base_names = self._vardb._aux_cache["owners"]["base_names"]
933 # Take inventory of all cached package hashes.
934 for name, hash_values in list(base_names.items()):
935 if not isinstance(hash_values, dict):
938 cached_hashes.update(hash_values)
940 # Create sets of valid package hashes and uncached packages.
941 uncached_pkgs = set()
942 hash_pkg = owners_cache._hash_pkg
943 valid_pkg_hashes = set()
944 for cpv in self._vardb.cpv_all():
945 hash_value = hash_pkg(cpv)
946 valid_pkg_hashes.add(hash_value)
947 if hash_value not in cached_hashes:
948 uncached_pkgs.add(cpv)
950 # Cache any missing packages.
951 for cpv in uncached_pkgs:
952 owners_cache.add(cpv)
954 # Delete any stale cache.
955 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
957 for base_name_hash, bucket in list(base_names.items()):
958 for hash_value in stale_hashes.intersection(bucket):
959 del bucket[hash_value]
961 del base_names[base_name_hash]
965 def get_owners(self, path_iter):
967 @return the owners as a dblink -> set(files) mapping.
970 for owner, f in self.iter_owners(path_iter):
971 owned_files = owners.get(owner)
972 if owned_files is None:
974 owners[owner] = owned_files
978 def getFileOwnerMap(self, path_iter):
979 owners = self.get_owners(path_iter)
981 for pkg_dblink, files in owners.items():
983 owner_set = file_owners.get(f)
984 if owner_set is None:
986 file_owners[f] = owner_set
987 owner_set.add(pkg_dblink)
990 def iter_owners(self, path_iter):
992 Iterate over tuples of (dblink, path). In order to avoid
993 consuming too many resources for too much time, resources
994 are only allocated for the duration of a given iter_owners()
995 call. Therefore, to maximize reuse of resources when searching
996 for multiple files, it's best to search for them all in a single
1000 if not isinstance(path_iter, list):
1001 path_iter = list(path_iter)
1002 owners_cache = self._populate()
1005 hash_pkg = owners_cache._hash_pkg
1006 hash_str = owners_cache._hash_str
1007 base_names = self._vardb._aux_cache["owners"]["base_names"]
1012 x = dblink_cache.get(cpv)
1014 if len(dblink_cache) > 20:
1015 # Ensure that we don't run out of memory.
1016 raise StopIteration()
1017 x = self._vardb._dblink(cpv)
1018 dblink_cache[cpv] = x
1023 path = path_iter.pop()
1024 is_basename = os.sep != path[:1]
1028 name = os.path.basename(path.rstrip(os.path.sep))
1033 name_hash = hash_str(name)
1034 pkgs = base_names.get(name_hash)
1036 if pkgs is not None:
1038 for hash_value in pkgs:
1039 if not isinstance(hash_value, tuple) or \
1040 len(hash_value) != 3:
1042 cpv, counter, mtime = hash_value
1043 if not isinstance(cpv, basestring):
1046 current_hash = hash_pkg(cpv)
1050 if current_hash != hash_value:
1054 for p in dblink(cpv).getcontents():
1055 if os.path.basename(p) == name:
1056 owners.append((cpv, p[len(root):]))
1058 if dblink(cpv).isowner(path):
1059 owners.append((cpv, path))
1061 except StopIteration:
1062 path_iter.append(path)
1064 dblink_cache.clear()
1066 for x in self._iter_owners_low_mem(path_iter):
1070 for cpv, p in owners:
1071 yield (dblink(cpv), p)
1073 def _iter_owners_low_mem(self, path_list):
1075 This implemention will make a short-lived dblink instance (and
1076 parse CONTENTS) for every single installed package. This is
1077 slower and but uses less memory than the method which uses the
1085 for path in path_list:
1086 is_basename = os.sep != path[:1]
1090 name = os.path.basename(path.rstrip(os.path.sep))
1091 path_info_list.append((path, name, is_basename))
1093 root = self._vardb._eroot
1094 for cpv in self._vardb.cpv_all():
1095 dblnk = self._vardb._dblink(cpv)
1097 for path, name, is_basename in path_info_list:
1099 for p in dblnk.getcontents():
1100 if os.path.basename(p) == name:
1101 yield dblnk, p[len(root):]
1103 if dblnk.isowner(path):
1106 class vartree(object):
1107 "this tree will scan a var/db/pkg database located at root (passed to init)"
1108 def __init__(self, root=None, virtual=None, categories=None,
1111 if settings is None:
1112 settings = portage.settings
1113 self.root = settings['ROOT']
1115 if root is not None and root != self.root:
1116 warnings.warn("The 'root' parameter of the " + \
1117 "portage.dbapi.vartree.vartree" + \
1118 " constructor is now unused. Use " + \
1119 "settings['ROOT'] instead.",
1120 DeprecationWarning, stacklevel=2)
1122 self.settings = settings
1123 self.dbapi = vardbapi(settings=settings, vartree=self)
1126 def getpath(self, mykey, filename=None):
1127 return self.dbapi.getpath(mykey, filename=filename)
1129 def zap(self, mycpv):
1132 def inject(self, mycpv):
1135 def get_provide(self, mycpv):
1139 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
1141 myuse = myuse.split()
1142 mylines = use_reduce(mylines, uselist=myuse, flat=True)
1143 for myprovide in mylines:
1144 mys = catpkgsplit(myprovide)
1146 mys = myprovide.split("/")
1147 myprovides += [mys[0] + "/" + mys[1]]
1149 except SystemExit as e:
1151 except Exception as e:
1152 mydir = self.dbapi.getpath(mycpv)
1153 writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
1156 writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
1158 writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
1161 def get_all_provides(self):
1163 for node in self.getallcpv():
1164 for mykey in self.get_provide(node):
1165 if mykey in myprovides:
1166 myprovides[mykey] += [node]
1168 myprovides[mykey] = [node]
1171 def dep_bestmatch(self, mydep, use_cache=1):
1172 "compatibility method -- all matches, not just visible ones"
1173 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
1174 mymatch = best(self.dbapi.match(
1175 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
1176 use_cache=use_cache))
1182 def dep_match(self, mydep, use_cache=1):
1183 "compatibility method -- we want to see all matches, not just visible ones"
1184 #mymatch = match(mydep,self.dbapi)
1185 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
1191 def exists_specific(self, cpv):
1192 return self.dbapi.cpv_exists(cpv)
1194 def getallcpv(self):
1195 """temporary function, probably to be renamed --- Gets a list of all
1196 category/package-versions installed on the system."""
1197 return self.dbapi.cpv_all()
1199 def getallnodes(self):
1200 """new behavior: these are all *unmasked* nodes. There may or may not be available
1201 masked package for nodes in this nodes list."""
1202 return self.dbapi.cp_all()
1204 def getebuildpath(self, fullpackage):
1205 cat, package = catsplit(fullpackage)
1206 return self.getpath(fullpackage, filename=package+".ebuild")
1208 def getslot(self, mycatpkg):
1209 "Get a slot for a catpkg; assume it exists."
1211 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1218 class dblink(object):
1220 This class provides an interface to the installed package database
1221 At present this is implemented as a text backend in /var/db/pkg.
1225 _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
1227 _contents_re = re.compile(r'^(' + \
1228 r'(?P<dir>(dev|dir|fif) (.+))|' + \
1229 r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
1230 r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
1231 r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
1235 def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
1236 vartree=None, blockers=None, scheduler=None, pipe=None):
1238 Creates a DBlink object for a given CPV.
1239 The given CPV may not be present in the database already.
1241 @param cat: Category
1243 @param pkg: Package (PV)
1245 @param myroot: ignored, settings['ROOT'] is used instead
1246 @type myroot: String (Path)
1247 @param settings: Typically portage.settings
1248 @type settings: portage.config
1249 @param treetype: one of ['porttree','bintree','vartree']
1250 @type treetype: String
1251 @param vartree: an instance of vartree corresponding to myroot.
1252 @type vartree: vartree
1255 if settings is None:
1256 raise TypeError("settings argument is required")
1258 mysettings = settings
1259 myroot = settings['ROOT']
1262 self.mycpv = self.cat + "/" + self.pkg
1263 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1264 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1265 self.treetype = treetype
1267 vartree = portage.db[myroot]["vartree"]
1268 self.vartree = vartree
1269 self._blockers = blockers
1270 self._scheduler = scheduler
1272 # WARNING: EROOT support is experimental and may be incomplete
1273 # for cases in which EPREFIX is non-empty.
1274 self._eroot = mysettings['EROOT']
1275 self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
1276 self.dbcatdir = self.dbroot+"/"+cat
1277 self.dbpkgdir = self.dbcatdir+"/"+pkg
1278 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1279 self.dbdir = self.dbpkgdir
1280 self.settings = mysettings
1281 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1284 self._installed_instance = None
1285 self.contentscache = None
1286 self._contents_inodes = None
1287 self._contents_basenames = None
1288 self._linkmap_broken = False
1289 self._md5_merge_map = {}
1290 self._hash_key = (self.myroot, self.mycpv)
1291 self._protect_obj = None
1295 return hash(self._hash_key)
1297 def __eq__(self, other):
1298 return isinstance(other, dblink) and \
1299 self._hash_key == other._hash_key
1301 def _get_protect_obj(self):
1303 if self._protect_obj is None:
1304 self._protect_obj = ConfigProtect(self._eroot,
1305 portage.util.shlex_split(
1306 self.settings.get("CONFIG_PROTECT", "")),
1307 portage.util.shlex_split(
1308 self.settings.get("CONFIG_PROTECT_MASK", "")))
1310 return self._protect_obj
1312 def isprotected(self, obj):
1313 return self._get_protect_obj().isprotected(obj)
1315 def updateprotect(self):
1316 self._get_protect_obj().updateprotect()
1319 self.vartree.dbapi.lock()
1322 self.vartree.dbapi.unlock()
1325 "return path to location of db information (for >>> informational display)"
1329 "does the db entry exist? boolean."
1330 return os.path.exists(self.dbdir)
1334 Remove this entry from the database
1336 if not os.path.exists(self.dbdir):
1339 # Check validity of self.dbdir before attempting to remove it.
1340 if not self.dbdir.startswith(self.dbroot):
1341 writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
1342 self.dbdir, noiselevel=-1)
1345 shutil.rmtree(self.dbdir)
1346 # If empty, remove parent category directory.
1348 os.rmdir(os.path.dirname(self.dbdir))
1351 self.vartree.dbapi._remove(self)
1353 def clearcontents(self):
1355 For a given db entry (self), erase the CONTENTS values.
1359 if os.path.exists(self.dbdir+"/CONTENTS"):
1360 os.unlink(self.dbdir+"/CONTENTS")
1364 def _clear_contents_cache(self):
1365 self.contentscache = None
1366 self._contents_inodes = None
1367 self._contents_basenames = None
1369 def getcontents(self):
1371 Get the installed files of a given package (aka what that package installed)
1373 contents_file = os.path.join(self.dbdir, "CONTENTS")
1374 if self.contentscache is not None:
1375 return self.contentscache
1378 myc = codecs.open(_unicode_encode(contents_file,
1379 encoding=_encodings['fs'], errors='strict'),
1380 mode='r', encoding=_encodings['repo.content'],
1382 except EnvironmentError as e:
1383 if e.errno != errno.ENOENT:
1386 self.contentscache = pkgfiles
1388 mylines = myc.readlines()
1391 normalize_needed = self._normalize_needed
1392 contents_re = self._contents_re
1393 obj_index = contents_re.groupindex['obj']
1394 dir_index = contents_re.groupindex['dir']
1395 sym_index = contents_re.groupindex['sym']
1396 # The old symlink format may exist on systems that have packages
1397 # which were installed many years ago (see bug #351814).
1398 oldsym_index = contents_re.groupindex['oldsym']
1399 # CONTENTS files already contain EPREFIX
1400 myroot = self.settings['ROOT']
1401 if myroot == os.path.sep:
1403 # used to generate parent dir entries
1404 dir_entry = (_unicode_decode("dir"),)
1405 eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
1408 for pos, line in enumerate(mylines):
1409 if null_byte in line:
1410 # Null bytes are a common indication of corruption.
1411 errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
1413 line = line.rstrip("\n")
1414 m = contents_re.match(line)
1416 errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
1419 if m.group(obj_index) is not None:
1421 #format: type, mtime, md5sum
1422 data = (m.group(base+1), m.group(base+4), m.group(base+3))
1423 elif m.group(dir_index) is not None:
1426 data = (m.group(base+1),)
1427 elif m.group(sym_index) is not None:
1429 if m.group(oldsym_index) is None:
1430 mtime = m.group(base+5)
1432 mtime = m.group(base+8)
1433 #format: type, mtime, dest
1434 data = (m.group(base+1), mtime, m.group(base+3))
1436 # This won't happen as long the regular expression
1437 # is written to only match valid entries.
1438 raise AssertionError(_("required group not found " + \
1439 "in CONTENTS entry: '%s'") % line)
1441 path = m.group(base+2)
1442 if normalize_needed.search(path) is not None:
1443 path = normalize_path(path)
1444 if not path.startswith(os.path.sep):
1445 path = os.path.sep + path
1447 if myroot is not None:
1448 path = os.path.join(myroot, path.lstrip(os.path.sep))
1450 # Implicitly add parent directories, since we can't necessarily
1451 # assume that they are explicitly listed in CONTENTS, and it's
1452 # useful for callers if they can rely on parent directory entries
1453 # being generated here (crucial for things like dblink.isowner()).
1454 path_split = path.split(os.sep)
1456 while len(path_split) > eroot_split_len:
1457 parent = os.sep.join(path_split)
1458 if parent in pkgfiles:
1460 pkgfiles[parent] = dir_entry
1463 pkgfiles[path] = data
1466 writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
1467 for pos, e in errors:
1468 writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
1469 self.contentscache = pkgfiles
1472 def _prune_plib_registry(self, unmerge=False,
1473 needed=None, preserve_paths=None):
1474 # remove preserved libraries that don't have any consumers left
1475 plib_registry = self.vartree.dbapi._plib_registry
1477 plib_registry.lock()
1479 plib_registry.load()
1481 unmerge_with_replacement = \
1482 unmerge and preserve_paths is not None
1483 if unmerge_with_replacement:
1484 # If self.mycpv is about to be unmerged and we
1485 # have a replacement package, we want to exclude
1486 # the irrelevant NEEDED data that belongs to
1487 # files which are being unmerged now.
1488 exclude_pkgs = (self.mycpv,)
1492 self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
1493 include_file=needed, preserve_paths=preserve_paths)
1495 unmerge_preserve = None
1496 if unmerge and not unmerge_with_replacement:
1497 unmerge_preserve = \
1498 self._find_libs_to_preserve(unmerge=True)
1500 cpv_lib_map = self._find_unused_preserved_libs()
1502 self._remove_preserved_libs(cpv_lib_map)
1503 for cpv, removed in cpv_lib_map.items():
1504 if not self.vartree.dbapi.cpv_exists(cpv):
1506 self.vartree.dbapi.removeFromContents(cpv, removed)
1509 counter = self.vartree.dbapi.cpv_counter(self.mycpv)
1510 plib_registry.unregister(self.mycpv,
1511 self.settings["SLOT"], counter)
1512 if unmerge_preserve:
1513 plib_registry.register(self.mycpv,
1514 self.settings["SLOT"], counter, unmerge_preserve)
1515 # Remove the preserved files from our contents
1516 # so that they won't be unmerged.
1517 self.vartree.dbapi.removeFromContents(self,
1520 plib_registry.store()
1522 plib_registry.unlock()
1524 def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
1525 ldpath_mtimes=None, others_in_slot=None, needed=None,
1526 preserve_paths=None):
1529 Unmerges a given package (CPV)
1534 @param pkgfiles: files to unmerge (generally self.getcontents() )
1535 @type pkgfiles: Dictionary
1536 @param trimworld: Unused
1537 @type trimworld: Boolean
1538 @param cleanup: cleanup to pass to doebuild (see doebuild)
1539 @type cleanup: Boolean
1540 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1541 @type ldpath_mtimes: Dictionary
1542 @param others_in_slot: all dblink instances in this slot, excluding self
1543 @type others_in_slot: list
1544 @param needed: Filename containing libraries needed after unmerge.
1545 @type needed: String
1546 @param preserve_paths: Libraries preserved by a package instance that
1547 is currently being merged. They need to be explicitly passed to the
1548 LinkageMap, since they are not registered in the
1549 PreservedLibsRegistry yet.
1550 @type preserve_paths: set
1553 1. os.EX_OK if everything went well.
1554 2. return code of the failed phase (for prerm, postrm, cleanrm)
1557 if trimworld is not None:
1558 warnings.warn("The trimworld parameter of the " + \
1559 "portage.dbapi.vartree.dblink.unmerge()" + \
1560 " method is now unused.",
1561 DeprecationWarning, stacklevel=2)
1564 if self._scheduler is None:
1565 # We create a scheduler instance and use it to
1566 # log unmerge output separately from merge output.
1567 self._scheduler = PollScheduler().sched_iface
1568 if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
1569 if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
1570 self.settings["PORTAGE_BACKGROUND"] = "1"
1571 self.settings.backup_changes("PORTAGE_BACKGROUND")
1574 self.settings.pop("PORTAGE_BACKGROUND", None)
1575 elif self.settings.get("PORTAGE_BACKGROUND") == "1":
1578 self.vartree.dbapi._bump_mtime(self.mycpv)
1579 showMessage = self._display_merge
1580 if self.vartree.dbapi._categories is not None:
1581 self.vartree.dbapi._categories = None
1582 # When others_in_slot is supplied, the security check has already been
1583 # done for this slot, so it shouldn't be repeated until the next
1584 # replacement or unmerge operation.
1585 if others_in_slot is None:
1586 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1587 slot_matches = self.vartree.dbapi.match(
1588 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1590 for cur_cpv in slot_matches:
1591 if cur_cpv == self.mycpv:
1593 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1594 settings=self.settings, vartree=self.vartree,
1595 treetype="vartree", pipe=self._pipe))
1597 retval = self._security_check([self] + others_in_slot)
1601 contents = self.getcontents()
1602 # Now, don't assume that the name of the ebuild is the same as the
1603 # name of the dir; the package may have been moved.
1606 ebuild_phase = "prerm"
1608 mystuff = os.listdir(self.dbdir)
1610 if x.endswith(".ebuild"):
1611 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1612 if x[:-7] != self.pkg:
1613 # Clean up after vardbapi.move_ent() breakage in
1614 # portage versions before 2.1.2
1615 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1616 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1619 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1622 doebuild_environment(myebuildpath, "prerm",
1623 settings=self.settings, db=self.vartree.dbapi)
1624 except UnsupportedAPIException as e:
1626 # Sometimes this happens due to corruption of the EAPI file.
1627 showMessage(_("!!! FAILED prerm: %s\n") % \
1628 os.path.join(self.dbdir, "EAPI"),
1629 level=logging.ERROR, noiselevel=-1)
1630 showMessage(_unicode_decode("%s\n") % (e,),
1631 level=logging.ERROR, noiselevel=-1)
1634 self._prune_plib_registry(unmerge=True, needed=needed,
1635 preserve_paths=preserve_paths)
1637 builddir_lock = None
1639 scheduler = self._scheduler
1643 # Only create builddir_lock if doebuild_environment
1644 # succeeded, since that's needed to initialize
1646 builddir_lock = EbuildBuildDir(
1647 scheduler=scheduler,
1648 settings=self.settings)
1649 builddir_lock.lock()
1650 prepare_build_dirs(settings=self.settings, cleanup=True)
1651 log_path = self.settings.get("PORTAGE_LOG_FILE")
1653 phase = EbuildPhase(background=background,
1654 phase=ebuild_phase, scheduler=scheduler,
1655 settings=self.settings)
1657 retval = phase.wait()
1659 # XXX: Decide how to handle failures here.
1660 if retval != os.EX_OK:
1662 showMessage(_("!!! FAILED prerm: %s\n") % retval,
1663 level=logging.ERROR, noiselevel=-1)
1665 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
1666 conf_mem_lock = lockfile(conf_mem_file)
1668 self._unmerge_pkgfiles(pkgfiles, others_in_slot, conf_mem_file)
1670 unlockfile(conf_mem_lock)
1671 self._clear_contents_cache()
1674 ebuild_phase = "postrm"
1675 phase = EbuildPhase(background=background,
1676 phase=ebuild_phase, scheduler=scheduler,
1677 settings=self.settings)
1679 retval = phase.wait()
1681 # XXX: Decide how to handle failures here.
1682 if retval != os.EX_OK:
1684 showMessage(_("!!! FAILED postrm: %s\n") % retval,
1685 level=logging.ERROR, noiselevel=-1)
1688 self.vartree.dbapi._bump_mtime(self.mycpv)
1692 if retval != os.EX_OK:
1694 msg = _("The '%(ebuild_phase)s' "
1695 "phase of the '%(cpv)s' package "
1696 "has failed with exit value %(retval)s.") % \
1697 {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
1699 from textwrap import wrap
1700 msg_lines.extend(wrap(msg, 72))
1701 msg_lines.append("")
1703 ebuild_name = os.path.basename(myebuildpath)
1704 ebuild_dir = os.path.dirname(myebuildpath)
1705 msg = _("The problem occurred while executing "
1706 "the ebuild file named '%(ebuild_name)s' "
1707 "located in the '%(ebuild_dir)s' directory. "
1708 "If necessary, manually remove "
1709 "the environment.bz2 file and/or the "
1710 "ebuild file located in that directory.") % \
1711 {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
1712 msg_lines.extend(wrap(msg, 72))
1713 msg_lines.append("")
1716 "of the environment.bz2 file is "
1717 "preferred since it may allow the "
1718 "removal phases to execute successfully. "
1719 "The ebuild will be "
1720 "sourced and the eclasses "
1721 "from the current portage tree will be used "
1722 "when necessary. Removal of "
1723 "the ebuild file will cause the "
1724 "pkg_prerm() and pkg_postrm() removal "
1725 "phases to be skipped entirely.")
1726 msg_lines.extend(wrap(msg, 72))
1728 self._eerror(ebuild_phase, msg_lines)
1730 self._elog_process(phasefilter=("prerm", "postrm"))
1732 if retval == os.EX_OK and builddir_lock is not None:
1733 # myebuildpath might be None, so ensure
1734 # it has a sane value for the clean phase,
1735 # even though it won't really be sourced.
1736 myebuildpath = os.path.join(self.dbdir,
1737 self.pkg + ".ebuild")
1738 doebuild_environment(myebuildpath, "cleanrm",
1739 settings=self.settings, db=self.vartree.dbapi)
1740 phase = EbuildPhase(background=background,
1741 phase="cleanrm", scheduler=scheduler,
1742 settings=self.settings)
1744 retval = phase.wait()
1746 if builddir_lock is not None:
1747 builddir_lock.unlock()
1749 if log_path is not None:
1751 if not failures and 'unmerge-logs' not in self.settings.features:
1758 st = os.stat(log_path)
1768 if log_path is not None and os.path.exists(log_path):
1769 # Restore this since it gets lost somewhere above and it
1770 # needs to be set for _display_merge() to be able to log.
1771 # Note that the log isn't necessarily supposed to exist
1772 # since if PORT_LOGDIR is unset then it's a temp file
1773 # so it gets cleaned above.
1774 self.settings["PORTAGE_LOG_FILE"] = log_path
1776 self.settings.pop("PORTAGE_LOG_FILE", None)
1778 # Lock the config memory file to prevent symlink creation
1779 # in merge_contents from overlapping with env-update.
1780 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
1781 conf_mem_lock = lockfile(conf_mem_file)
1783 env_update(target_root=self.settings['ROOT'],
1784 prev_mtimes=ldpath_mtimes,
1785 contents=contents, env=self.settings.environ(),
1786 writemsg_level=self._display_merge)
1788 unlockfile(conf_mem_lock)
1792 def _display_merge(self, msg, level=0, noiselevel=0):
1793 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1795 if self._scheduler is None:
1796 writemsg_level(msg, level=level, noiselevel=noiselevel)
1799 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
1800 log_path = self.settings.get("PORTAGE_LOG_FILE")
1801 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
1803 if background and log_path is None:
1804 if level >= logging.WARN:
1805 writemsg_level(msg, level=level, noiselevel=noiselevel)
1807 self._scheduler.output(msg,
1808 log_path=log_path, background=background,
1809 level=level, noiselevel=noiselevel)
1811 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot, conf_mem_file):
1814 Unmerges the contents of a package from the liveFS
1815 Removes the VDB entry for self
1817 @param pkgfiles: typically self.getcontents()
1818 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1819 @param others_in_slot: all dblink instances in this slot, excluding self
1820 @type others_in_slot: list
1825 perf_md5 = perform_md5
1826 showMessage = self._display_merge
1829 showMessage(_("No package files given... Grabbing a set.\n"))
1830 pkgfiles = self.getcontents()
1832 if others_in_slot is None:
1834 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1835 slot_matches = self.vartree.dbapi.match(
1836 "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
1837 for cur_cpv in slot_matches:
1838 if cur_cpv == self.mycpv:
1840 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1841 settings=self.settings,
1842 vartree=self.vartree, treetype="vartree", pipe=self._pipe))
1844 dest_root = self._eroot
1845 dest_root_len = len(dest_root) - 1
1847 cfgfiledict = grabdict(conf_mem_file)
1850 unmerge_orphans = "unmerge-orphans" in self.settings.features
1851 calc_prelink = "prelink-checksums" in self.settings.features
1854 self.updateprotect()
1855 mykeys = list(pkgfiles)
1859 #process symlinks second-to-last, directories last.
1861 ignored_unlink_errnos = (
1862 errno.EBUSY, errno.ENOENT,
1863 errno.ENOTDIR, errno.EISDIR)
1864 ignored_rmdir_errnos = (
1865 errno.EEXIST, errno.ENOTEMPTY,
1866 errno.EBUSY, errno.ENOENT,
1867 errno.ENOTDIR, errno.EISDIR,
1869 modprotect = os.path.join(self._eroot, "lib/modules/")
1871 def unlink(file_name, lstatobj):
1873 if lstatobj.st_flags != 0:
1874 bsd_chflags.lchflags(file_name, 0)
1875 parent_name = os.path.dirname(file_name)
1876 # Use normal stat/chflags for the parent since we want to
1877 # follow any symlinks to the real parent directory.
1878 pflags = os.stat(parent_name).st_flags
1880 bsd_chflags.chflags(parent_name, 0)
1882 if not stat.S_ISLNK(lstatobj.st_mode):
1883 # Remove permissions to ensure that any hardlinks to
1884 # suid/sgid files are rendered harmless.
1885 os.chmod(file_name, 0)
1886 os.unlink(file_name)
1887 except OSError as ose:
1888 # If the chmod or unlink fails, you are in trouble.
1889 # With Prefix this can be because the file is owned
1890 # by someone else (a screwup by root?), on a normal
1891 # system maybe filesystem corruption. In any case,
1892 # if we backtrace and die here, we leave the system
1893 # in a totally undefined state, hence we just bleed
1894 # like hell and continue to hopefully finish all our
1895 # administrative and pkg_postinst stuff.
1896 self._eerror("postrm",
1897 ["Could not chmod or unlink '%s': %s" % \
1900 if bsd_chflags and pflags != 0:
1901 # Restore the parent flags we saved before unlinking
1902 bsd_chflags.chflags(parent_name, pflags)
1904 def show_unmerge(zing, desc, file_type, file_name):
1905 showMessage("%s %s %s %s\n" % \
1906 (zing, desc.ljust(8), file_type, file_name))
1909 unmerge_desc["cfgpro"] = _("cfgpro")
1910 unmerge_desc["replaced"] = _("replaced")
1911 unmerge_desc["!dir"] = _("!dir")
1912 unmerge_desc["!empty"] = _("!empty")
1913 unmerge_desc["!fif"] = _("!fif")
1914 unmerge_desc["!found"] = _("!found")
1915 unmerge_desc["!md5"] = _("!md5")
1916 unmerge_desc["!mtime"] = _("!mtime")
1917 unmerge_desc["!obj"] = _("!obj")
1918 unmerge_desc["!sym"] = _("!sym")
1920 real_root = self.settings['ROOT']
1921 real_root_len = len(real_root) - 1
1922 eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
1924 for i, objkey in enumerate(mykeys):
1926 obj = normalize_path(objkey)
1929 _unicode_encode(obj,
1930 encoding=_encodings['merge'], errors='strict')
1931 except UnicodeEncodeError:
1932 # The package appears to have been merged with a
1933 # different value of sys.getfilesystemencoding(),
1934 # so fall back to utf_8 if appropriate.
1936 _unicode_encode(obj,
1937 encoding=_encodings['fs'], errors='strict')
1938 except UnicodeEncodeError:
1942 perf_md5 = portage.checksum.perform_md5
1944 file_data = pkgfiles[objkey]
1945 file_type = file_data[0]
1948 statobj = os.stat(obj)
1953 lstatobj = os.lstat(obj)
1954 except (OSError, AttributeError):
1956 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1957 if lstatobj is None:
1958 show_unmerge("---", unmerge_desc["!found"], file_type, obj)
1960 # don't use EROOT, CONTENTS entries already contain EPREFIX
1961 if obj.startswith(real_root):
1962 relative_path = obj[real_root_len:]
1964 for dblnk in others_in_slot:
1965 if dblnk.isowner(relative_path):
1969 # A new instance of this package claims the file, so
1971 show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
1973 elif relative_path in cfgfiledict:
1974 stale_confmem.append(relative_path)
1975 # next line includes a tweak to protect modules from being unmerged,
1976 # but we don't protect modules from being overwritten if they are
1977 # upgraded. We effectively only want one half of the config protection
1978 # functionality for /lib/modules. For portage-ng both capabilities
1979 # should be able to be independently specified.
1980 # TODO: For rebuilds, re-parent previous modules to the new
1981 # installed instance (so they are not orphans). For normal
1982 # uninstall (not rebuild/reinstall), remove the modules along
1983 # with all other files (leave no orphans).
1984 if obj.startswith(modprotect):
1985 show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
1988 # Don't unlink symlinks to directories here since that can
1989 # remove /lib and /usr/lib symlinks.
1990 if unmerge_orphans and \
1991 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1992 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1993 not self.isprotected(obj):
1995 unlink(obj, lstatobj)
1996 except EnvironmentError as e:
1997 if e.errno not in ignored_unlink_errnos:
2000 show_unmerge("<<<", "", file_type, obj)
2003 lmtime = str(lstatobj[stat.ST_MTIME])
2004 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
2005 show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
2008 if pkgfiles[objkey][0] == "dir":
2009 if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
2010 show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
2013 elif pkgfiles[objkey][0] == "sym":
2015 show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
2017 # Go ahead and unlink symlinks to directories here when
2018 # they're actually recorded as symlinks in the contents.
2019 # Normally, symlinks such as /lib -> lib64 are not recorded
2020 # as symlinks in the contents of a package. If a package
2021 # installs something into ${D}/lib/, it is recorded in the
2022 # contents as a directory even if it happens to correspond
2023 # to a symlink when it's merged to the live filesystem.
2025 unlink(obj, lstatobj)
2026 show_unmerge("<<<", "", file_type, obj)
2027 except (OSError, IOError) as e:
2028 if e.errno not in ignored_unlink_errnos:
2031 show_unmerge("!!!", "", file_type, obj)
2032 elif pkgfiles[objkey][0] == "obj":
2033 if statobj is None or not stat.S_ISREG(statobj.st_mode):
2034 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2038 mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
2039 except FileNotFound as e:
2040 # the file has disappeared between now and our stat call
2041 show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
2044 # string.lower is needed because db entries used to be in upper-case. The
2045 # string.lower allows for backwards compatibility.
2046 if mymd5 != pkgfiles[objkey][2].lower():
2047 show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
2050 unlink(obj, lstatobj)
2051 except (OSError, IOError) as e:
2052 if e.errno not in ignored_unlink_errnos:
2055 show_unmerge("<<<", "", file_type, obj)
2056 elif pkgfiles[objkey][0] == "fif":
2057 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
2058 show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
2060 show_unmerge("---", "", file_type, obj)
2061 elif pkgfiles[objkey][0] == "dev":
2062 show_unmerge("---", "", file_type, obj)
2064 mydirs = sorted(mydirs)
2070 lstatobj = os.lstat(obj)
2071 if lstatobj.st_flags != 0:
2072 bsd_chflags.lchflags(obj, 0)
2073 parent_name = os.path.dirname(obj)
2074 # Use normal stat/chflags for the parent since we want to
2075 # follow any symlinks to the real parent directory.
2076 pflags = os.stat(parent_name).st_flags
2078 bsd_chflags.chflags(parent_name, 0)
2082 if bsd_chflags and pflags != 0:
2083 # Restore the parent flags we saved before unlinking
2084 bsd_chflags.chflags(parent_name, pflags)
2085 show_unmerge("<<<", "", "dir", obj)
2086 except EnvironmentError as e:
2087 if e.errno not in ignored_rmdir_errnos:
2089 if e.errno != errno.ENOENT:
2090 show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
2093 # Remove stale entries from config memory.
2095 for filename in stale_confmem:
2096 del cfgfiledict[filename]
2097 writedict(cfgfiledict, conf_mem_file)
2099 #remove self from vartree database so that our own virtual gets zapped if we're the last node
2100 self.vartree.zap(self.mycpv)
2102 def isowner(self, filename, destroot=None):
2104 Check if a file belongs to this package. This may
2105 result in a stat call for the parent directory of
2106 every installed file, since the inode numbers are
2107 used to work around the problem of ambiguous paths
2108 caused by symlinked directories. The results of
2109 stat calls are cached to optimize multiple calls
2118 1. True if this package owns the file.
2119 2. False if this package does not own the file.
2122 if destroot is not None and destroot != self._eroot:
2123 warnings.warn("The second parameter of the " + \
2124 "portage.dbapi.vartree.dblink.isowner()" + \
2125 " is now unused. Instead " + \
2126 "self.settings['EROOT'] will be used.",
2127 DeprecationWarning, stacklevel=2)
2129 return bool(self._match_contents(filename))
2131 def _match_contents(self, filename, destroot=None):
2133 The matching contents entry is returned, which is useful
2134 since the path may differ from the one given by the caller,
2138 @return: the contents entry corresponding to the given path, or False
2139 if the file is not owned by this package.
2142 filename = _unicode_decode(filename,
2143 encoding=_encodings['content'], errors='strict')
2145 if destroot is not None and destroot != self._eroot:
2146 warnings.warn("The second parameter of the " + \
2147 "portage.dbapi.vartree.dblink._match_contents()" + \
2148 " is now unused. Instead " + \
2149 "self.settings['ROOT'] will be used.",
2150 DeprecationWarning, stacklevel=2)
2152 # don't use EROOT here, image already contains EPREFIX
2153 destroot = self.settings['ROOT']
2155 # The given filename argument might have a different encoding than the
2156 # the filenames contained in the contents, so use separate wrapped os
2157 # modules for each. The basename is more likely to contain non-ascii
2158 # characters than the directory path, so use os_filename_arg for all
2159 # operations involving the basename of the filename arg.
2160 os_filename_arg = _os_merge
2164 _unicode_encode(filename,
2165 encoding=_encodings['merge'], errors='strict')
2166 except UnicodeEncodeError:
2167 # The package appears to have been merged with a
2168 # different value of sys.getfilesystemencoding(),
2169 # so fall back to utf_8 if appropriate.
2171 _unicode_encode(filename,
2172 encoding=_encodings['fs'], errors='strict')
2173 except UnicodeEncodeError:
2176 os_filename_arg = portage.os
2178 destfile = normalize_path(
2179 os_filename_arg.path.join(destroot,
2180 filename.lstrip(os_filename_arg.path.sep)))
2182 pkgfiles = self.getcontents()
2183 if pkgfiles and destfile in pkgfiles:
2186 basename = os_filename_arg.path.basename(destfile)
2187 if self._contents_basenames is None:
2192 encoding=_encodings['merge'],
2194 except UnicodeEncodeError:
2195 # The package appears to have been merged with a
2196 # different value of sys.getfilesystemencoding(),
2197 # so fall back to utf_8 if appropriate.
2201 encoding=_encodings['fs'],
2203 except UnicodeEncodeError:
2208 self._contents_basenames = set(
2209 os.path.basename(x) for x in pkgfiles)
2210 if basename not in self._contents_basenames:
2211 # This is a shortcut that, in most cases, allows us to
2212 # eliminate this package as an owner without the need
2213 # to examine inode numbers of parent directories.
2216 # Use stat rather than lstat since we want to follow
2217 # any symlinks to the real parent directory.
2218 parent_path = os_filename_arg.path.dirname(destfile)
2220 parent_stat = os_filename_arg.stat(parent_path)
2221 except EnvironmentError as e:
2222 if e.errno != errno.ENOENT:
2226 if self._contents_inodes is None:
2232 encoding=_encodings['merge'],
2234 except UnicodeEncodeError:
2235 # The package appears to have been merged with a
2236 # different value of sys.getfilesystemencoding(),
2237 # so fall back to utf_8 if appropriate.
2241 encoding=_encodings['fs'],
2243 except UnicodeEncodeError:
2248 self._contents_inodes = {}
2249 parent_paths = set()
2251 p_path = os.path.dirname(x)
2252 if p_path in parent_paths:
2254 parent_paths.add(p_path)
2260 inode_key = (s.st_dev, s.st_ino)
2261 # Use lists of paths in case multiple
2262 # paths reference the same inode.
2263 p_path_list = self._contents_inodes.get(inode_key)
2264 if p_path_list is None:
2266 self._contents_inodes[inode_key] = p_path_list
2267 if p_path not in p_path_list:
2268 p_path_list.append(p_path)
2270 p_path_list = self._contents_inodes.get(
2271 (parent_stat.st_dev, parent_stat.st_ino))
2273 for p_path in p_path_list:
2274 x = os_filename_arg.path.join(p_path, basename)
2280 def _linkmap_rebuild(self, **kwargs):
2282 Rebuild the self._linkmap if it's not broken due to missing
2283 scanelf binary. Also, return early if preserve-libs is disabled
2284 and the preserve-libs registry is empty.
2286 if self._linkmap_broken or \
2287 self.vartree.dbapi._linkmap is None or \
2288 self.vartree.dbapi._plib_registry is None or \
2289 ("preserve-libs" not in self.settings.features and \
2290 not self.vartree.dbapi._plib_registry.hasEntries()):
2293 self.vartree.dbapi._linkmap.rebuild(**kwargs)
2294 except CommandNotFound as e:
2295 self._linkmap_broken = True
2296 self._display_merge(_("!!! Disabling preserve-libs " \
2297 "due to error: Command Not Found: %s\n") % (e,),
2298 level=logging.ERROR, noiselevel=-1)
2300 def _find_libs_to_preserve(self, unmerge=False):
2302 Get set of relative paths for libraries to be preserved. When
2303 unmerge is False, file paths to preserver are selected from
2304 self._installed_instance. Otherwise, paths are selected from
2307 if self._linkmap_broken or \
2308 self.vartree.dbapi._linkmap is None or \
2309 self.vartree.dbapi._plib_registry is None or \
2310 (not unmerge and self._installed_instance is None) or \
2311 "preserve-libs" not in self.settings.features:
2315 linkmap = self.vartree.dbapi._linkmap
2317 installed_instance = self
2319 installed_instance = self._installed_instance
2320 old_contents = installed_instance.getcontents()
2321 root = self.settings['ROOT']
2322 root_len = len(root) - 1
2323 lib_graph = digraph()
2326 def path_to_node(path):
2327 node = path_node_map.get(path)
2329 node = LinkageMap._LibGraphNode(path, root)
2330 alt_path_node = lib_graph.get(node)
2331 if alt_path_node is not None:
2332 node = alt_path_node
2333 node.alt_paths.add(path)
2334 path_node_map[path] = node
2338 provider_nodes = set()
2339 # Create provider nodes and add them to the graph.
2340 for f_abs in old_contents:
2344 _unicode_encode(f_abs,
2345 encoding=_encodings['merge'], errors='strict')
2346 except UnicodeEncodeError:
2347 # The package appears to have been merged with a
2348 # different value of sys.getfilesystemencoding(),
2349 # so fall back to utf_8 if appropriate.
2351 _unicode_encode(f_abs,
2352 encoding=_encodings['fs'], errors='strict')
2353 except UnicodeEncodeError:
2358 f = f_abs[root_len:]
2359 if not unmerge and self.isowner(f):
2360 # We have an indentically named replacement file,
2361 # so we don't try to preserve the old copy.
2364 consumers = linkmap.findConsumers(f)
2369 provider_node = path_to_node(f)
2370 lib_graph.add(provider_node, None)
2371 provider_nodes.add(provider_node)
2372 consumer_map[provider_node] = consumers
2374 # Create consumer nodes and add them to the graph.
2375 # Note that consumers can also be providers.
2376 for provider_node, consumers in consumer_map.items():
2378 consumer_node = path_to_node(c)
2379 if installed_instance.isowner(c) and \
2380 consumer_node not in provider_nodes:
2381 # This is not a provider, so it will be uninstalled.
2383 lib_graph.add(provider_node, consumer_node)
2385 # Locate nodes which should be preserved. They consist of all
2386 # providers that are reachable from consumers that are not
2387 # providers themselves.
2388 preserve_nodes = set()
2389 for consumer_node in lib_graph.root_nodes():
2390 if consumer_node in provider_nodes:
2392 # Preserve all providers that are reachable from this consumer.
2393 node_stack = lib_graph.child_nodes(consumer_node)
2395 provider_node = node_stack.pop()
2396 if provider_node in preserve_nodes:
2398 preserve_nodes.add(provider_node)
2399 node_stack.extend(lib_graph.child_nodes(provider_node))
2401 preserve_paths = set()
2402 for preserve_node in preserve_nodes:
2403 # Make sure that at least one of the paths is not a symlink.
2404 # This prevents symlinks from being erroneously preserved by
2405 # themselves when the old instance installed symlinks that
2406 # the new instance does not install.
2408 for f in preserve_node.alt_paths:
2409 f_abs = os.path.join(root, f.lstrip(os.sep))
2411 if stat.S_ISREG(os.lstat(f_abs).st_mode):
2418 preserve_paths.update(preserve_node.alt_paths)
2420 return preserve_paths
2422 def _add_preserve_libs_to_contents(self, preserve_paths):
2424 Preserve libs returned from _find_libs_to_preserve().
2427 if not preserve_paths:
2431 showMessage = self._display_merge
2432 root = self.settings['ROOT']
2434 # Copy contents entries from the old package to the new one.
2435 new_contents = self.getcontents().copy()
2436 old_contents = self._installed_instance.getcontents()
2437 for f in sorted(preserve_paths):
2438 f = _unicode_decode(f,
2439 encoding=_encodings['content'], errors='strict')
2440 f_abs = os.path.join(root, f.lstrip(os.sep))
2441 contents_entry = old_contents.get(f_abs)
2442 if contents_entry is None:
2443 # This will probably never happen, but it might if one of the
2444 # paths returned from findConsumers() refers to one of the libs
2445 # that should be preserved yet the path is not listed in the
2446 # contents. Such a path might belong to some other package, so
2447 # it shouldn't be preserved here.
2448 showMessage(_("!!! File '%s' will not be preserved "
2449 "due to missing contents entry\n") % (f_abs,),
2450 level=logging.ERROR, noiselevel=-1)
2451 preserve_paths.remove(f)
2453 new_contents[f_abs] = contents_entry
2454 obj_type = contents_entry[0]
2455 showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
2457 # Add parent directories to contents if necessary.
2458 parent_dir = os.path.dirname(f_abs)
2459 while len(parent_dir) > len(root):
2460 new_contents[parent_dir] = ["dir"]
2462 parent_dir = os.path.dirname(parent_dir)
2463 if prev == parent_dir:
2465 outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
2466 write_contents(new_contents, root, outfile)
2468 self._clear_contents_cache()
2470 def _find_unused_preserved_libs(self):
2472 Find preserved libraries that don't have any consumers left.
2475 if self._linkmap_broken or \
2476 self.vartree.dbapi._linkmap is None or \
2477 self.vartree.dbapi._plib_registry is None or \
2478 not self.vartree.dbapi._plib_registry.hasEntries():
2481 # Since preserved libraries can be consumers of other preserved
2482 # libraries, use a graph to track consumer relationships.
2483 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2484 lib_graph = digraph()
2485 preserved_nodes = set()
2486 preserved_paths = set()
2489 root = self.settings['ROOT']
2491 def path_to_node(path):
2492 node = path_node_map.get(path)
2494 node = LinkageMap._LibGraphNode(path, root)
2495 alt_path_node = lib_graph.get(node)
2496 if alt_path_node is not None:
2497 node = alt_path_node
2498 node.alt_paths.add(path)
2499 path_node_map[path] = node
2502 linkmap = self.vartree.dbapi._linkmap
2503 for cpv, plibs in plib_dict.items():
2505 path_cpv_map[f] = cpv
2506 preserved_node = path_to_node(f)
2507 if not preserved_node.file_exists():
2509 lib_graph.add(preserved_node, None)
2510 preserved_paths.add(f)
2511 preserved_nodes.add(preserved_node)
2512 for c in self.vartree.dbapi._linkmap.findConsumers(f):
2513 consumer_node = path_to_node(c)
2514 if not consumer_node.file_exists():
2516 # Note that consumers may also be providers.
2517 lib_graph.add(preserved_node, consumer_node)
2519 # Eliminate consumers having providers with the same soname as an
2520 # installed library that is not preserved. This eliminates
2521 # libraries that are erroneously preserved due to a move from one
2522 # directory to another.
2524 for preserved_node in preserved_nodes:
2525 soname = linkmap.getSoname(preserved_node)
2526 for consumer_node in lib_graph.parent_nodes(preserved_node):
2527 if consumer_node in preserved_nodes:
2529 providers = provider_cache.get(consumer_node)
2530 if providers is None:
2531 providers = linkmap.findProviders(consumer_node)
2532 provider_cache[consumer_node] = providers
2533 providers = providers.get(soname)
2534 if providers is None:
2536 for provider in providers:
2537 if provider in preserved_paths:
2539 provider_node = path_to_node(provider)
2540 if not provider_node.file_exists():
2542 if provider_node in preserved_nodes:
2544 # An alternative provider seems to be
2545 # installed, so drop this edge.
2546 lib_graph.remove_edge(preserved_node, consumer_node)
2550 while not lib_graph.empty():
2551 root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
2554 lib_graph.difference_update(root_nodes)
2556 for node in root_nodes:
2557 unlink_list.update(node.alt_paths)
2558 unlink_list = sorted(unlink_list)
2559 for obj in unlink_list:
2560 cpv = path_cpv_map.get(obj)
2562 # This means that a symlink is in the preserved libs
2563 # registry, but the actual lib it points to is not.
2564 self._display_merge(_("!!! symlink to lib is preserved, "
2565 "but not the lib itself:\n!!! '%s'\n") % (obj,),
2566 level=logging.ERROR, noiselevel=-1)
2568 removed = cpv_lib_map.get(cpv)
2571 cpv_lib_map[cpv] = removed
2576 def _remove_preserved_libs(self, cpv_lib_map):
2578 Remove files returned from _find_unused_preserved_libs().
2583 files_to_remove = set()
2584 for files in cpv_lib_map.values():
2585 files_to_remove.update(files)
2586 files_to_remove = sorted(files_to_remove)
2587 showMessage = self._display_merge
2588 root = self.settings['ROOT']
2591 for obj in files_to_remove:
2592 obj = os.path.join(root, obj.lstrip(os.sep))
2593 parent_dirs.add(os.path.dirname(obj))
2594 if os.path.islink(obj):
2600 except OSError as e:
2601 if e.errno != errno.ENOENT:
2605 showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
2608 # Remove empty parent directories if possible.
2610 x = parent_dirs.pop()
2617 x = os.path.dirname(x)
2621 self.vartree.dbapi._plib_registry.pruneNonExisting()
2623 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
2627 collision_ignore = set([normalize_path(myignore) for myignore in \
2628 portage.util.shlex_split(
2629 self.settings.get("COLLISION_IGNORE", ""))])
2631 # For collisions with preserved libraries, the current package
2632 # will assume ownership and the libraries will be unregistered.
2633 if self.vartree.dbapi._plib_registry is None:
2634 # preserve-libs is entirely disabled
2639 plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
2642 for cpv, paths in plib_dict.items():
2643 plib_paths.update(paths)
2645 plib_cpv_map[f] = cpv
2646 plib_inodes = self._lstat_inode_map(plib_paths)
2648 plib_collisions = {}
2650 showMessage = self._display_merge
2653 destroot = self.settings['ROOT']
2654 showMessage(_(" %s checking %d files for package collisions\n") % \
2655 (colorize("GOOD", "*"), len(mycontents)))
2656 for i, f in enumerate(mycontents):
2657 if i % 1000 == 0 and i != 0:
2658 showMessage(_("%d files checked ...\n") % i)
2660 dest_path = normalize_path(
2661 os.path.join(destroot, f.lstrip(os.path.sep)))
2663 dest_lstat = os.lstat(dest_path)
2664 except EnvironmentError as e:
2665 if e.errno == errno.ENOENT:
2668 elif e.errno == errno.ENOTDIR:
2670 # A non-directory is in a location where this package
2671 # expects to have a directory.
2673 parent_path = dest_path
2674 while len(parent_path) > len(destroot):
2675 parent_path = os.path.dirname(parent_path)
2677 dest_lstat = os.lstat(parent_path)
2679 except EnvironmentError as e:
2680 if e.errno != errno.ENOTDIR:
2684 raise AssertionError(
2685 "unable to find non-directory " + \
2686 "parent for '%s'" % dest_path)
2687 dest_path = parent_path
2688 f = os.path.sep + dest_path[len(destroot):]
2696 plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
2699 cpv = plib_cpv_map[path]
2700 paths = plib_collisions.get(cpv)
2703 plib_collisions[cpv] = paths
2705 # The current package will assume ownership and the
2706 # libraries will be unregistered, so exclude this
2707 # path from the normal collisions.
2711 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
2712 for ver in mypkglist:
2716 if not isowned and self.isprotected(full_path):
2720 if collision_ignore:
2721 if f in collision_ignore:
2724 for myignore in collision_ignore:
2725 if f.startswith(myignore + os.path.sep):
2729 collisions.append(f)
2730 return collisions, plib_collisions
2732 def _lstat_inode_map(self, path_iter):
2734 Use lstat to create a map of the form:
2735 {(st_dev, st_ino) : set([path1, path2, ...])}
2736 Multiple paths may reference the same inode due to hardlinks.
2737 All lstat() calls are relative to self.myroot.
2742 root = self.settings['ROOT']
2745 path = os.path.join(root, f.lstrip(os.sep))
2748 except OSError as e:
2749 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2753 key = (st.st_dev, st.st_ino)
2754 paths = inode_map.get(key)
2757 inode_map[key] = paths
2761 def _security_check(self, installed_instances):
2762 if not installed_instances:
2767 showMessage = self._display_merge
2770 for dblnk in installed_instances:
2771 file_paths.update(dblnk.getcontents())
2774 for i, path in enumerate(file_paths):
2778 _unicode_encode(path,
2779 encoding=_encodings['merge'], errors='strict')
2780 except UnicodeEncodeError:
2781 # The package appears to have been merged with a
2782 # different value of sys.getfilesystemencoding(),
2783 # so fall back to utf_8 if appropriate.
2785 _unicode_encode(path,
2786 encoding=_encodings['fs'], errors='strict')
2787 except UnicodeEncodeError:
2794 except OSError as e:
2795 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
2799 if not stat.S_ISREG(s.st_mode):
2801 path = os.path.realpath(path)
2802 if path in real_paths:
2804 real_paths.add(path)
2805 if s.st_nlink > 1 and \
2806 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
2807 k = (s.st_dev, s.st_ino)
2808 inode_map.setdefault(k, []).append((path, s))
2809 suspicious_hardlinks = []
2810 for path_list in inode_map.values():
2811 path, s = path_list[0]
2812 if len(path_list) == s.st_nlink:
2813 # All hardlinks seem to be owned by this package.
2815 suspicious_hardlinks.append(path_list)
2816 if not suspicious_hardlinks:
2820 msg.append(_("suid/sgid file(s) "
2821 "with suspicious hardlink(s):"))
2823 for path_list in suspicious_hardlinks:
2824 for path, s in path_list:
2825 msg.append("\t%s" % path)
2827 msg.append(_("See the Gentoo Security Handbook "
2828 "guide for advice on how to proceed."))
2830 self._eerror("preinst", msg)
2834 def _eqawarn(self, phase, lines):
2835 self._elog("eqawarn", phase, lines)
2837 def _eerror(self, phase, lines):
2838 self._elog("eerror", phase, lines)
2840 def _elog(self, funcname, phase, lines):
2841 func = getattr(portage.elog.messages, funcname)
2842 if self._scheduler is None:
2844 func(l, phase=phase, key=self.mycpv)
2846 background = self.settings.get("PORTAGE_BACKGROUND") == "1"
2848 if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
2849 log_path = self.settings.get("PORTAGE_LOG_FILE")
2850 out = portage.StringIO()
2852 func(line, phase=phase, key=self.mycpv, out=out)
2853 msg = out.getvalue()
2854 self._scheduler.output(msg,
2855 background=background, log_path=log_path)
2857 def _elog_process(self, phasefilter=None):
2859 if self._pipe is None:
2860 elog_process(cpv, self.settings, phasefilter=phasefilter)
2862 logdir = os.path.join(self.settings["T"], "logging")
2863 ebuild_logentries = collect_ebuild_messages(logdir)
2864 py_logentries = collect_messages(key=cpv).get(cpv, {})
2865 logentries = _merge_logentries(py_logentries, ebuild_logentries)
2874 for phase, messages in logentries.items():
2875 for key, lines in messages:
2876 funcname = funcnames[key]
2877 if isinstance(lines, basestring):
2880 fields = (funcname, phase, cpv, line.rstrip('\n'))
2881 str_buffer.append(' '.join(fields))
2882 str_buffer.append('\n')
2884 os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
2886 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
2887 mydbapi=None, prev_mtimes=None, counter=None):
2890 This function does the following:
2892 calls self._preserve_libs if FEATURES=preserve-libs
2893 calls self._collision_protect if FEATURES=collision-protect
2894 calls doebuild(mydo=pkg_preinst)
2895 Merges the package to the livefs
2896 unmerges old version (if required)
2897 calls doebuild(mydo=pkg_postinst)
2900 @param srcroot: Typically this is ${D}
2901 @type srcroot: String (Path)
2902 @param destroot: ignored, self.settings['ROOT'] is used instead
2903 @type destroot: String (Path)
2904 @param inforoot: root of the vardb entry ?
2905 @type inforoot: String (Path)
2906 @param myebuild: path to the ebuild that we are processing
2907 @type myebuild: String (Path)
2908 @param mydbapi: dbapi which is handed to doebuild.
2909 @type mydbapi: portdbapi instance
2910 @param prev_mtimes: { Filename:mtime } mapping for env_update
2911 @type prev_mtimes: Dictionary
2917 secondhand is a list of symlinks that have been skipped due to their target
2918 not existing; we will merge these symlinks at a later time.
2923 srcroot = _unicode_decode(srcroot,
2924 encoding=_encodings['content'], errors='strict')
2925 destroot = self.settings['ROOT']
2926 inforoot = _unicode_decode(inforoot,
2927 encoding=_encodings['content'], errors='strict')
2928 myebuild = _unicode_decode(myebuild,
2929 encoding=_encodings['content'], errors='strict')
2931 showMessage = self._display_merge
2932 scheduler = self._scheduler
2934 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2936 if not os.path.isdir(srcroot):
2937 showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
2938 level=logging.ERROR, noiselevel=-1)
2942 for var_name in ('CHOST', 'SLOT'):
2943 if var_name == 'CHOST' and self.cat == 'virtual':
2945 os.unlink(os.path.join(inforoot, var_name))
2951 val = codecs.open(_unicode_encode(
2952 os.path.join(inforoot, var_name),
2953 encoding=_encodings['fs'], errors='strict'),
2954 mode='r', encoding=_encodings['repo.content'],
2955 errors='replace').readline().strip()
2956 except EnvironmentError as e:
2957 if e.errno != errno.ENOENT:
2962 if var_name == 'SLOT':
2965 if not slot.strip():
2966 slot = self.settings.get(var_name, '')
2967 if not slot.strip():
2968 showMessage(_("!!! SLOT is undefined\n"),
2969 level=logging.ERROR, noiselevel=-1)
2971 write_atomic(os.path.join(inforoot, var_name), slot + '\n')
2973 if val != self.settings.get(var_name, ''):
2974 self._eqawarn('preinst',
2975 [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
2976 {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
2979 self._eerror("preinst", lines)
2981 if not os.path.exists(self.dbcatdir):
2982 ensure_dirs(self.dbcatdir)
2985 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2986 otherversions.append(v.split("/")[1])
2988 cp = self.mysplit[0]
2989 slot_atom = "%s:%s" % (cp, slot)
2991 # filter any old-style virtual matches
2992 slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
2993 if cpv_getkey(cpv) == cp]
2995 if self.mycpv not in slot_matches and \
2996 self.vartree.dbapi.cpv_exists(self.mycpv):
2997 # handle multislot or unapplied slotmove
2998 slot_matches.append(self.mycpv)
3001 from portage import config
3002 for cur_cpv in slot_matches:
3003 # Clone the config in case one of these has to be unmerged since
3004 # we need it to have private ${T} etc... for things like elog.
3005 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
3006 settings=config(clone=self.settings),
3007 vartree=self.vartree, treetype="vartree",
3008 scheduler=self._scheduler, pipe=self._pipe))
3010 retval = self._security_check(others_in_slot)
3014 self.settings["REPLACING_VERSIONS"] = " ".join(
3015 [portage.versions.cpv_getversion(other.mycpv) for other in others_in_slot] )
3016 self.settings.backup_changes("REPLACING_VERSIONS")
3019 # Used by self.isprotected().
3022 for dblnk in others_in_slot:
3023 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
3024 if cur_counter > max_counter:
3025 max_counter = cur_counter
3027 self._installed_instance = max_dblnk
3029 # We check for unicode encoding issues after src_install. However,
3030 # the check must be repeated here for binary packages (it's
3031 # inexpensive since we call os.walk() here anyway).
3036 unicode_error = False
3040 paths_with_newlines = []
3041 srcroot_len = len(srcroot)
3044 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
3046 parent = _unicode_decode(parent,
3047 encoding=_encodings['merge'], errors='strict')
3048 except UnicodeDecodeError:
3049 new_parent = _unicode_decode(parent,
3050 encoding=_encodings['merge'], errors='replace')
3051 new_parent = _unicode_encode(new_parent,
3052 encoding=_encodings['merge'], errors='backslashreplace')
3053 new_parent = _unicode_decode(new_parent,
3054 encoding=_encodings['merge'], errors='replace')
3055 os.rename(parent, new_parent)
3056 unicode_error = True
3057 unicode_errors.append(new_parent[srcroot_len:])
3062 fname = _unicode_decode(fname,
3063 encoding=_encodings['merge'], errors='strict')
3064 except UnicodeDecodeError:
3065 fpath = portage._os.path.join(
3066 parent.encode(_encodings['merge']), fname)
3067 new_fname = _unicode_decode(fname,
3068 encoding=_encodings['merge'], errors='replace')
3069 new_fname = _unicode_encode(new_fname,
3070 encoding=_encodings['merge'], errors='backslashreplace')
3071 new_fname = _unicode_decode(new_fname,
3072 encoding=_encodings['merge'], errors='replace')
3073 new_fpath = os.path.join(parent, new_fname)
3074 os.rename(fpath, new_fpath)
3075 unicode_error = True
3076 unicode_errors.append(new_fpath[srcroot_len:])
3080 fpath = os.path.join(parent, fname)
3082 relative_path = fpath[srcroot_len:]
3084 if "\n" in relative_path:
3085 paths_with_newlines.append(relative_path)
3087 file_mode = os.lstat(fpath).st_mode
3088 if stat.S_ISREG(file_mode):
3089 myfilelist.append(relative_path)
3090 elif stat.S_ISLNK(file_mode):
3091 # Note: os.walk puts symlinks to directories in the "dirs"
3092 # list and it does not traverse them since that could lead
3093 # to an infinite recursion loop.
3094 mylinklist.append(relative_path)
3099 if not unicode_error:
3103 eerror(portage._merge_unicode_error(unicode_errors))
3105 if paths_with_newlines:
3107 msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
3109 paths_with_newlines.sort()
3110 for f in paths_with_newlines:
3111 msg.append("\t/%s" % (f.replace("\n", "\\n")))
3113 msg.append(_("package %s NOT merged") % self.mycpv)
3118 # If there are no files to merge, and an installed package in the same
3119 # slot has files, it probably means that something went wrong.
3120 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
3121 not myfilelist and not mylinklist and others_in_slot:
3122 installed_files = None
3123 for other_dblink in others_in_slot:
3124 installed_files = other_dblink.getcontents()
3125 if not installed_files:
3127 from textwrap import wrap
3131 "new_cpv":self.mycpv,
3132 "old_cpv":other_dblink.mycpv
3134 msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
3135 "any files, but the currently installed '%(old_cpv)s'"
3136 " package has the following files: ") % d, wrap_width))
3138 msg.extend(sorted(installed_files))
3140 msg.append(_("package %s NOT merged") % self.mycpv)
3143 _("Manually run `emerge --unmerge =%s` if you "
3144 "really want to remove the above files. Set "
3145 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
3146 "/etc/make.conf if you do not want to "
3147 "abort in cases like this.") % other_dblink.mycpv,
3153 # check for package collisions
3154 blockers = self._blockers
3155 if blockers is None:
3157 collisions, plib_collisions = \
3158 self._collision_protect(srcroot, destroot,
3159 others_in_slot + blockers, myfilelist + mylinklist)
3161 # Make sure the ebuild environment is initialized and that ${T}/elog
3162 # exists for logging of collision-protect eerror messages.
3163 if myebuild is None:
3164 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
3165 doebuild_environment(myebuild, "preinst",
3166 settings=self.settings, db=mydbapi)
3167 prepare_build_dirs(settings=self.settings, cleanup=cleanup)
3170 collision_protect = "collision-protect" in self.settings.features
3171 protect_owned = "protect-owned" in self.settings.features
3172 msg = _("This package will overwrite one or more files that"
3173 " may belong to other packages (see list below).")
3174 if not (collision_protect or protect_owned):
3175 msg += _(" Add either \"collision-protect\" or"
3176 " \"protect-owned\" to FEATURES in"
3177 " make.conf if you would like the merge to abort"
3178 " in cases like this. See the make.conf man page for"
3179 " more information about these features.")
3180 if self.settings.get("PORTAGE_QUIET") != "1":
3181 msg += _(" You can use a command such as"
3182 " `portageq owners / <filename>` to identify the"
3183 " installed package that owns a file. If portageq"
3184 " reports that only one package owns a file then do NOT"
3185 " file a bug report. A bug report is only useful if it"
3186 " identifies at least two or more packages that are known"
3187 " to install the same file(s)."
3188 " If a collision occurs and you"
3189 " can not explain where the file came from then you"
3190 " should simply ignore the collision since there is not"
3191 " enough information to determine if a real problem"
3192 " exists. Please do NOT file a bug report at"
3193 " http://bugs.gentoo.org unless you report exactly which"
3194 " two packages install the same file(s). Once again,"
3195 " please do NOT file a bug report unless you have"
3196 " completely understood the above message.")
3198 self.settings["EBUILD_PHASE"] = "preinst"
3199 from textwrap import wrap
3201 if collision_protect:
3203 msg.append(_("package %s NOT merged") % self.settings.mycpv)
3205 msg.append(_("Detected file collision(s):"))
3208 for f in collisions:
3209 msg.append("\t%s" % \
3210 os.path.join(destroot, f.lstrip(os.path.sep)))
3215 if collision_protect or protect_owned:
3218 msg.append(_("Searching all installed"
3219 " packages for file collisions..."))
3221 msg.append(_("Press Ctrl-C to Stop"))
3225 if len(collisions) > 20:
3226 # get_owners is slow for large numbers of files, so
3227 # don't look them all up.
3228 collisions = collisions[:20]
3231 owners = self.vartree.dbapi._owners.get_owners(collisions)
3232 self.vartree.dbapi.flush_cache()
3236 for pkg, owned_files in owners.items():
3239 msg.append("%s" % cpv)
3240 for f in sorted(owned_files):
3241 msg.append("\t%s" % os.path.join(destroot,
3242 f.lstrip(os.path.sep)))
3247 eerror([_("None of the installed"
3248 " packages claim the file(s)."), ""])
3250 # The explanation about the collision and how to solve
3251 # it may not be visible via a scrollback buffer, especially
3252 # if the number of file collisions is large. Therefore,
3253 # show a summary at the end.
3254 if collision_protect:
3255 msg = _("Package '%s' NOT merged due to file collisions.") % \
3257 elif protect_owned and owners:
3258 msg = _("Package '%s' NOT merged due to file collisions.") % \
3261 msg = _("Package '%s' merged despite file collisions.") % \
3263 msg += _(" If necessary, refer to your elog "
3264 "messages for the whole content of the above message.")
3265 eerror(wrap(msg, 70))
3267 if collision_protect or (protect_owned and owners):
3270 # The merge process may move files out of the image directory,
3271 # which causes invalidation of the .installed flag.
3273 os.unlink(os.path.join(
3274 os.path.dirname(normalize_path(srcroot)), ".installed"))
3275 except OSError as e:
3276 if e.errno != errno.ENOENT:
3280 self.dbdir = self.dbtmpdir
3282 ensure_dirs(self.dbtmpdir)
3284 # run preinst script
3285 if scheduler is None:
3286 showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % {"cpv":self.mycpv, "destroot":destroot})
3287 a = _spawn_phase("preinst", self.settings)
3289 a = scheduler.dblinkEbuildPhase(
3290 self, mydbapi, myebuild, "preinst")
3292 # XXX: Decide how to handle failures here.
3294 showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
3295 level=logging.ERROR, noiselevel=-1)
3298 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
3299 for x in os.listdir(inforoot):
3300 self.copyfile(inforoot+"/"+x)
3302 # write local package counter for recording
3304 counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
3305 codecs.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
3306 encoding=_encodings['fs'], errors='strict'),
3307 'w', encoding=_encodings['repo.content'], errors='backslashreplace'
3308 ).write(str(counter))
3310 self.updateprotect()
3312 #if we have a file containing previously-merged config file md5sums, grab it.
3313 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
3314 conf_mem_lock = lockfile(conf_mem_file)
3316 cfgfiledict = grabdict(conf_mem_file)
3317 if "NOCONFMEM" in self.settings:
3318 cfgfiledict["IGNORE"]=1
3320 cfgfiledict["IGNORE"]=0
3322 # Always behave like --noconfmem is enabled for downgrades
3323 # so that people who don't know about this option are less
3324 # likely to get confused when doing upgrade/downgrade cycles.
3325 pv_split = catpkgsplit(self.mycpv)[1:]
3326 for other in others_in_slot:
3327 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
3328 cfgfiledict["IGNORE"] = 1
3331 rval = self._merge_contents(srcroot, destroot, cfgfiledict,
3333 if rval != os.EX_OK:
3336 unlockfile(conf_mem_lock)
3338 # These caches are populated during collision-protect and the data
3339 # they contain is now invalid. It's very important to invalidate
3340 # the contents_inodes cache so that FEATURES=unmerge-orphans
3341 # doesn't unmerge anything that belongs to this package that has
3343 for dblnk in others_in_slot:
3344 dblnk._clear_contents_cache()
3345 self._clear_contents_cache()
3347 linkmap = self.vartree.dbapi._linkmap
3348 plib_registry = self.vartree.dbapi._plib_registry
3349 # We initialize preserve_paths to an empty set rather
3350 # than None here because it plays an important role
3351 # in prune_plib_registry logic by serving to indicate
3352 # that we have a replacement for a package that's
3355 preserve_paths = set()
3357 if not (linkmap is None or plib_registry is None):
3358 plib_registry.lock()
3360 plib_registry.load()
3361 needed = os.path.join(inforoot, linkmap._needed_aux_key)
3362 self._linkmap_rebuild(include_file=needed)
3364 # Preserve old libs if they are still in use
3365 # TODO: Handle cases where the previous instance
3366 # has already been uninstalled but it still has some
3367 # preserved libraries in the registry that we may
3368 # want to preserve here.
3369 preserve_paths = self._find_libs_to_preserve()
3371 plib_registry.unlock()
3374 self._add_preserve_libs_to_contents(preserve_paths)
3376 # If portage is reinstalling itself, remove the old
3377 # version now since we want to use the temporary
3378 # PORTAGE_BIN_PATH that will be removed when we return.
3379 reinstall_self = False
3380 if self.myroot == "/" and \
3381 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
3382 reinstall_self = True
3384 if scheduler is None:
3385 def emerge_log(msg):
3388 emerge_log = scheduler.dblinkEmergeLog
3390 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
3393 emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
3395 others_in_slot.append(self) # self has just been merged
3396 for dblnk in list(others_in_slot):
3399 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
3401 showMessage(_(">>> Safely unmerging already-installed instance...\n"))
3402 emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
3403 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
3404 dblnk._linkmap_broken = self._linkmap_broken
3405 dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
3406 dblnk.settings.backup_changes("REPLACED_BY_VERSION")
3407 unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
3408 others_in_slot=others_in_slot, needed=needed,
3409 preserve_paths=preserve_paths)
3410 dblnk.settings.pop("REPLACED_BY_VERSION", None)
3412 if unmerge_rval == os.EX_OK:
3413 emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
3415 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
3419 # TODO: Check status and abort if necessary.
3423 showMessage(_(">>> Original instance of package unmerged safely.\n"))
3425 if len(others_in_slot) > 1:
3426 showMessage(colorize("WARN", _("WARNING:"))
3427 + _(" AUTOCLEAN is disabled. This can cause serious"
3428 " problems due to overlapping packages.\n"),
3429 level=logging.WARN, noiselevel=-1)
3431 # We hold both directory locks.
3432 self.dbdir = self.dbpkgdir
3436 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
3440 # Check for file collisions with blocking packages
3441 # and remove any colliding files from their CONTENTS
3442 # since they now belong to this package.
3443 self._clear_contents_cache()
3444 contents = self.getcontents()
3445 destroot_len = len(destroot) - 1
3448 for blocker in blockers:
3449 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
3450 relative_paths=False)
3454 plib_registry = self.vartree.dbapi._plib_registry
3456 plib_registry.lock()
3458 plib_registry.load()
3461 # keep track of the libs we preserved
3462 plib_registry.register(self.mycpv, slot, counter,
3463 sorted(preserve_paths))
3465 # Unregister any preserved libs that this package has overwritten
3466 # and update the contents of the packages that owned them.
3467 plib_dict = plib_registry.getPreservedLibs()
3468 for cpv, paths in plib_collisions.items():
3469 if cpv not in plib_dict:
3471 has_vdb_entry = False
3472 if cpv != self.mycpv:
3473 # If we've replaced another instance with the
3474 # same cpv then the vdb entry no longer belongs
3475 # to it, so we'll have to get the slot and couter
3476 # from plib_registry._data instead.
3478 slot, counter = self.vartree.dbapi.aux_get(
3479 cpv, ["SLOT", "COUNTER"])
3480 has_vdb_entry = True
3484 if not has_vdb_entry:
3485 # It's possible for previously unmerged packages
3486 # to have preserved libs in the registry, so try
3487 # to retrieve the slot and counter from there.
3488 has_registry_entry = False
3489 for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
3490 plib_registry._data.items():
3494 cp, slot = plib_cps.split(":", 1)
3497 counter = plib_counter
3498 has_registry_entry = True
3501 if not has_registry_entry:
3504 remaining = [f for f in plib_dict[cpv] if f not in paths]
3505 plib_registry.register(cpv, slot, counter, remaining)
3507 self.vartree.dbapi.removeFromContents(cpv, paths)
3509 plib_registry.store()
3511 plib_registry.unlock()
3513 self.vartree.dbapi._add(self)
3514 contents = self.getcontents()
3517 self.settings["PORTAGE_UPDATE_ENV"] = \
3518 os.path.join(self.dbpkgdir, "environment.bz2")
3519 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
3521 if scheduler is None:
3522 a = _spawn_phase("postinst", self.settings)
3524 showMessage(_(">>> %s merged.\n") % self.mycpv)
3526 a = scheduler.dblinkEbuildPhase(
3527 self, mydbapi, myebuild, "postinst")
3529 self.settings.pop("PORTAGE_UPDATE_ENV", None)
3532 # It's stupid to bail out here, so keep going regardless of
3533 # phase return code.
3534 showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
3535 level=logging.ERROR, noiselevel=-1)
3538 for v in otherversions:
3539 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
3542 # Lock the config memory file to prevent symlink creation
3543 # in merge_contents from overlapping with env-update.
3544 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
3545 conf_mem_lock = lockfile(conf_mem_file)
3547 #update environment settings, library paths. DO NOT change symlinks.
3548 env_update(makelinks=(not downgrade),
3549 target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
3550 contents=contents, env=self.settings.environ(),
3551 writemsg_level=self._display_merge)
3553 unlockfile(conf_mem_lock)
3555 # For gcc upgrades, preserved libs have to be removed after the
3556 # the library path has been updated.
3557 self._prune_plib_registry()
3561 def _new_backup_path(self, p):
3563 The works for any type path, such as a regular file, symlink,
3564 or directory. The parent directory is assumed to exist.
3565 The returned filename is of the form p + '.backup.' + x, where
3566 x guarantees that the returned path does not exist yet.
3573 backup_p = p + '.backup.' + str(x).rjust(4, '0')
3581 def _merge_contents(self, srcroot, destroot, cfgfiledict, conf_mem_file):
3583 cfgfiledict_orig = cfgfiledict.copy()
3585 # open CONTENTS file (possibly overwriting old one) for recording
3586 outfile = codecs.open(_unicode_encode(
3587 os.path.join(self.dbtmpdir, 'CONTENTS'),
3588 encoding=_encodings['fs'], errors='strict'),
3589 mode='w', encoding=_encodings['repo.content'],
3590 errors='backslashreplace')
3592 # Don't bump mtimes on merge since some application require
3593 # preservation of timestamps. This means that the unmerge phase must
3594 # check to see if file belongs to an installed instance in the same
3598 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
3599 prevmask = os.umask(0)
3602 # we do a first merge; this will recurse through all files in our srcroot but also build up a
3603 # "second hand" of symlinks to merge later
3604 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
3607 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
3608 # broken symlinks. We'll merge them too.
3610 while len(secondhand) and len(secondhand)!=lastlen:
3611 # clear the thirdhand. Anything from our second hand that
3612 # couldn't get merged will be added to thirdhand.
3615 if self.mergeme(srcroot, destroot, outfile, thirdhand,
3616 secondhand, cfgfiledict, mymtime):
3620 lastlen = len(secondhand)
3622 # our thirdhand now becomes our secondhand. It's ok to throw
3623 # away secondhand since thirdhand contains all the stuff that
3624 # couldn't be merged.
3625 secondhand = thirdhand
3628 # force merge of remaining symlinks (broken or circular; oh well)
3629 if self.mergeme(srcroot, destroot, outfile, None,
3630 secondhand, cfgfiledict, mymtime):
3636 #if we opened it, close it
3640 # write out our collection of md5sums
3641 if cfgfiledict != cfgfiledict_orig:
3642 cfgfiledict.pop("IGNORE", None)
3643 ensure_dirs(os.path.dirname(conf_mem_file),
3644 gid=portage_gid, mode=0o2750, mask=0o2)
3645 writedict(cfgfiledict, conf_mem_file)
3649 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
3652 This function handles actual merging of the package contents to the livefs.
3653 It also handles config protection.
3655 @param srcroot: Where are we copying files from (usually ${D})
3656 @type srcroot: String (Path)
3657 @param destroot: Typically ${ROOT}
3658 @type destroot: String (Path)
3659 @param outfile: File to log operations to
3660 @type outfile: File Object
3661 @param secondhand: A set of items to merge in pass two (usually
3662 or symlinks that point to non-existing files that may get merged later)
3663 @type secondhand: List
3664 @param stufftomerge: Either a diretory to merge, or a list of items.
3665 @type stufftomerge: String or List
3666 @param cfgfiledict: { File:mtime } mapping for config_protected files
3667 @type cfgfiledict: Dictionary
3668 @param thismtime: The current time (typically long(time.time())
3669 @type thismtime: Long
3670 @rtype: None or Boolean
3677 showMessage = self._display_merge
3678 writemsg = self._display_merge
3683 srcroot = normalize_path(srcroot).rstrip(sep) + sep
3684 destroot = normalize_path(destroot).rstrip(sep) + sep
3685 calc_prelink = "prelink-checksums" in self.settings.features
3687 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
3688 if isinstance(stufftomerge, basestring):
3689 #A directory is specified. Figure out protection paths, listdir() it and process it.
3690 mergelist = os.listdir(join(srcroot, stufftomerge))
3691 offset = stufftomerge
3693 mergelist = stufftomerge
3696 for i, x in enumerate(mergelist):
3698 mysrc = join(srcroot, offset, x)
3699 mydest = join(destroot, offset, x)
3700 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
3701 myrealdest = join(sep, offset, x)
3702 # stat file once, test using S_* macros many times (faster that way)
3703 mystat = os.lstat(mysrc)
3704 mymode = mystat[stat.ST_MODE]
3705 # handy variables; mydest is the target object on the live filesystems;
3706 # mysrc is the source object in the temporary install dir
3708 mydstat = os.lstat(mydest)
3709 mydmode = mydstat.st_mode
3710 except OSError as e:
3711 if e.errno != errno.ENOENT:
3714 #dest file doesn't exist
3718 if stat.S_ISLNK(mymode):
3719 # we are merging a symbolic link
3720 myabsto = abssymlink(mysrc)
3721 if myabsto.startswith(srcroot):
3722 myabsto = myabsto[len(srcroot):]
3723 myabsto = myabsto.lstrip(sep)
3724 myto = os.readlink(mysrc)
3725 if self.settings and self.settings["D"]:
3726 if myto.startswith(self.settings["D"]):
3727 myto = myto[len(self.settings["D"]):]
3728 # myrealto contains the path of the real file to which this symlink points.
3729 # we can simply test for existence of this file to see if the target has been merged yet
3730 myrealto = normalize_path(os.path.join(destroot, myabsto))
3733 if not stat.S_ISLNK(mydmode):
3734 if stat.S_ISDIR(mydmode):
3735 # directory in the way: we can't merge a symlink over a directory
3736 # we won't merge this, continue with next file...
3739 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
3740 # Kill file blocking installation of symlink to dir #71787
3742 elif self.isprotected(mydest):
3743 # Use md5 of the target in ${D} if it exists...
3745 newmd5 = perform_md5(join(srcroot, myabsto))
3746 except FileNotFound:
3747 # Maybe the target is merged already.
3749 newmd5 = perform_md5(myrealto)
3750 except FileNotFound:
3752 mydest = new_protect_filename(mydest, newmd5=newmd5)
3754 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
3755 if (secondhand != None) and (not os.path.exists(myrealto)):
3756 # either the target directory doesn't exist yet or the target file doesn't exist -- or
3757 # the target is a broken symlink. We will add this file to our "second hand" and merge
3759 secondhand.append(mysrc[len(srcroot):])
3761 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
3762 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
3763 sstat=mystat, mysettings=self.settings,
3764 encoding=_encodings['merge'])
3766 showMessage(">>> %s -> %s\n" % (mydest, myto))
3767 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
3769 showMessage(_("!!! Failed to move file.\n"),
3770 level=logging.ERROR, noiselevel=-1)
3771 showMessage("!!! %s -> %s\n" % (mydest, myto),
3772 level=logging.ERROR, noiselevel=-1)
3774 elif stat.S_ISDIR(mymode):
3775 # we are merging a directory
3777 # destination exists
3780 # Save then clear flags on dest.
3781 dflags = mydstat.st_flags
3783 bsd_chflags.lchflags(mydest, 0)
3785 if not os.access(mydest, os.W_OK):
3786 pkgstuff = pkgsplit(self.pkg)
3787 writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
3788 writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
3789 writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
3790 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
3791 writemsg(_("!!! And finish by running this: env-update\n\n"))
3794 if stat.S_ISDIR(mydmode) or \
3795 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
3796 # a symlink to an existing directory will work for us; keep it:
3797 showMessage("--- %s/\n" % mydest)
3799 bsd_chflags.lchflags(mydest, dflags)
3801 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
3802 backup_dest = self._new_backup_path(mydest)
3805 msg.append(_("Installation of a directory is blocked by a file:"))
3806 msg.append(" '%s'" % mydest)
3807 msg.append(_("This file will be renamed to a different name:"))
3808 msg.append(" '%s'" % backup_dest)
3810 self._eerror("preinst", msg)
3811 if movefile(mydest, backup_dest,
3812 mysettings=self.settings,
3813 encoding=_encodings['merge']) is None:
3815 showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
3816 level=logging.ERROR, noiselevel=-1)
3817 #now create our directory
3819 if self.settings.selinux_enabled():
3820 _selinux_merge.mkdir(mydest, mysrc)
3823 except OSError as e:
3824 # Error handling should be equivalent to
3825 # portage.util.ensure_dirs() for cases
3827 if e.errno in (errno.EEXIST,):
3829 elif os.path.isdir(mydest):
3836 bsd_chflags.lchflags(mydest, dflags)
3837 os.chmod(mydest, mystat[0])
3838 os.chown(mydest, mystat[4], mystat[5])
3839 showMessage(">>> %s/\n" % mydest)
3842 #destination doesn't exist
3843 if self.settings.selinux_enabled():
3844 _selinux_merge.mkdir(mydest, mysrc)
3847 except OSError as e:
3848 # Error handling should be equivalent to
3849 # portage.util.ensure_dirs() for cases
3851 if e.errno in (errno.EEXIST,):
3853 elif os.path.isdir(mydest):
3858 os.chmod(mydest, mystat[0])
3859 os.chown(mydest, mystat[4], mystat[5])
3860 showMessage(">>> %s/\n" % mydest)
3861 outfile.write("dir "+myrealdest+"\n")
3862 # recurse and merge this directory
3863 if self.mergeme(srcroot, destroot, outfile, secondhand,
3864 join(offset, x), cfgfiledict, thismtime):
3866 elif stat.S_ISREG(mymode):
3867 # we are merging a regular file
3868 mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
3869 # calculate config file protection stuff
3870 mydestdir = os.path.dirname(mydest)
3874 protected = self.isprotected(mydest)
3876 # destination file exists
3878 if stat.S_ISDIR(mydmode):
3879 # install of destination is blocked by an existing directory with the same name
3880 newdest = self._new_backup_path(mydest)
3883 msg.append(_("Installation of a regular file is blocked by a directory:"))
3884 msg.append(" '%s'" % mydest)
3885 msg.append(_("This file will be merged with a different name:"))
3886 msg.append(" '%s'" % newdest)
3888 self._eerror("preinst", msg)
3891 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
3892 # install of destination is blocked by an existing regular file,
3893 # or by a symlink to an existing regular file;
3894 # now, config file management may come into play.
3895 # we only need to tweak mydest if cfg file management is in play.
3897 # we have a protection path; enable config file management.
3899 destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
3900 if mymd5 == destmd5:
3901 #file already in place; simply update mtimes of destination
3904 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
3905 """ An identical update has previously been
3906 merged. Skip it unless the user has chosen
3908 moveme = cfgfiledict["IGNORE"]
3909 cfgprot = cfgfiledict["IGNORE"]
3912 mymtime = mystat[stat.ST_MTIME]
3917 # Merging a new file, so update confmem.
3918 cfgfiledict[myrealdest] = [mymd5]
3919 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
3920 """A previously remembered update has been
3921 accepted, so it is removed from confmem."""
3922 del cfgfiledict[myrealdest]
3925 mydest = new_protect_filename(mydest, newmd5=mymd5)
3927 # whether config protection or not, we merge the new file the
3928 # same way. Unless moveme=0 (blocking directory)
3930 # Create hardlinks only for source files that already exist
3931 # as hardlinks (having identical st_dev and st_ino).
3932 hardlink_key = (mystat.st_dev, mystat.st_ino)
3934 hardlink_candidates = self._md5_merge_map.get(hardlink_key)
3935 if hardlink_candidates is None:
3936 hardlink_candidates = []
3937 self._md5_merge_map[hardlink_key] = hardlink_candidates
3939 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
3940 sstat=mystat, mysettings=self.settings,
3941 hardlink_candidates=hardlink_candidates,
3942 encoding=_encodings['merge'])
3945 if hardlink_candidates is not None:
3946 hardlink_candidates.append(mydest)
3950 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
3951 showMessage("%s %s\n" % (zing,mydest))
3953 # we are merging a fifo or device node
3956 # destination doesn't exist
3957 if movefile(mysrc, mydest, newmtime=thismtime,
3958 sstat=mystat, mysettings=self.settings,
3959 encoding=_encodings['merge']) is not None:
3963 if stat.S_ISFIFO(mymode):
3964 outfile.write("fif %s\n" % myrealdest)
3966 outfile.write("dev %s\n" % myrealdest)
3967 showMessage(zing + " " + mydest + "\n")
3969 def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
3970 mydbapi=None, prev_mtimes=None, counter=None):
3972 @param myroot: ignored, self._eroot is used instead
3976 parallel_install = "parallel-install" in self.settings.features
3977 if not parallel_install:
3979 self.vartree.dbapi._bump_mtime(self.mycpv)
3981 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
3982 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
3985 # If PORTAGE_BUILDDIR doesn't exist, then it probably means
3986 # fail-clean is enabled, and the success/die hooks have
3987 # already been called by _emerge.EbuildPhase (via
3988 # self._scheduler.dblinkEbuildPhase) prior to cleaning.
3989 if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
3991 if retval == os.EX_OK:
3992 phase = 'success_hooks'
3996 if self._scheduler is None:
3997 ebuild_phase = MiscFunctionsProcess(
4000 scheduler=PollScheduler().sched_iface,
4001 settings=self.settings)
4002 ebuild_phase.start()
4005 self._scheduler.dblinkEbuildPhase(
4006 self, mydbapi, myebuild, phase)
4008 self._elog_process()
4010 if 'noclean' not in self.settings.features and \
4011 (retval == os.EX_OK or \
4012 'fail-clean' in self.settings.features):
4013 if myebuild is None:
4014 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
4016 doebuild_environment(myebuild, "clean",
4017 settings=self.settings, db=mydbapi)
4018 if self._scheduler is None:
4019 _spawn_phase("clean", self.settings)
4021 self._scheduler.dblinkEbuildPhase(
4022 self, mydbapi, myebuild, "clean")
4025 self.settings.pop('REPLACING_VERSIONS', None)
4026 if self.vartree.dbapi._linkmap is None:
4027 # preserve-libs is entirely disabled
4030 self.vartree.dbapi._linkmap._clear_cache()
4031 self.vartree.dbapi._bump_mtime(self.mycpv)
4032 if not parallel_install:
4036 def getstring(self,name):
4037 "returns contents of a file with whitespace converted to spaces"
4038 if not os.path.exists(self.dbdir+"/"+name):
4040 mydata = codecs.open(
4041 _unicode_encode(os.path.join(self.dbdir, name),
4042 encoding=_encodings['fs'], errors='strict'),
4043 mode='r', encoding=_encodings['repo.content'], errors='replace'
4045 return " ".join(mydata)
4047 def copyfile(self,fname):
4048 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4050 def getfile(self,fname):
4051 if not os.path.exists(self.dbdir+"/"+fname):
4053 return codecs.open(_unicode_encode(os.path.join(self.dbdir, fname),
4054 encoding=_encodings['fs'], errors='strict'),
4055 mode='r', encoding=_encodings['repo.content'], errors='replace'
4058 def setfile(self,fname,data):
4060 if fname == 'environment.bz2' or not isinstance(data, basestring):
4061 kwargs['mode'] = 'wb'
4063 kwargs['mode'] = 'w'
4064 kwargs['encoding'] = _encodings['repo.content']
4065 write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
4067 def getelements(self,ename):
4068 if not os.path.exists(self.dbdir+"/"+ename):
4070 mylines = codecs.open(_unicode_encode(
4071 os.path.join(self.dbdir, ename),
4072 encoding=_encodings['fs'], errors='strict'),
4073 mode='r', encoding=_encodings['repo.content'], errors='replace'
4077 for y in x[:-1].split():
4081 def setelements(self,mylist,ename):
4082 myelement = codecs.open(_unicode_encode(
4083 os.path.join(self.dbdir, ename),
4084 encoding=_encodings['fs'], errors='strict'),
4085 mode='w', encoding=_encodings['repo.content'],
4086 errors='backslashreplace')
4088 myelement.write(x+"\n")
4091 def isregular(self):
4092 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4093 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
4095 def merge(mycat, mypkg, pkgloc, infloc,
4096 myroot=None, settings=None, myebuild=None,
4097 mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
4100 @param myroot: ignored, settings['EROOT'] is used instead
4103 if settings is None:
4104 raise TypeError("settings argument is required")
4105 if not os.access(settings['EROOT'], os.W_OK):
4106 writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
4109 background = (settings.get('PORTAGE_BACKGROUND') == '1')
4110 merge_task = MergeProcess(
4111 dblink=dblink, mycat=mycat, mypkg=mypkg, settings=settings,
4112 treetype=mytree, vartree=vartree,
4113 scheduler=(scheduler or PollScheduler().sched_iface),
4114 background=background, blockers=blockers, pkgloc=pkgloc,
4115 infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
4116 prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
4118 retcode = merge_task.wait()
4121 def unmerge(cat, pkg, myroot=None, settings=None,
4122 mytrimworld=None, vartree=None,
4123 ldpath_mtimes=None, scheduler=None):
4125 @param myroot: ignored, settings['EROOT'] is used instead
4126 @param mytrimworld: ignored
4129 if settings is None:
4130 raise TypeError("settings argument is required")
4131 mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
4132 vartree=vartree, scheduler=scheduler)
4133 vartree = mylink.vartree
4134 parallel_install = "parallel-install" in settings.features
4135 if not parallel_install:
4139 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
4140 if retval == os.EX_OK:
4149 if vartree.dbapi._linkmap is None:
4150 # preserve-libs is entirely disabled
4153 vartree.dbapi._linkmap._clear_cache()
4154 if not parallel_install:
4157 def write_contents(contents, root, f):
4159 Write contents to any file like object. The file will be left open.
4161 root_len = len(root) - 1
4162 for filename in sorted(contents):
4163 entry_data = contents[filename]
4164 entry_type = entry_data[0]
4165 relative_filename = filename[root_len:]
4166 if entry_type == "obj":
4167 entry_type, mtime, md5sum = entry_data
4168 line = "%s %s %s %s\n" % \
4169 (entry_type, relative_filename, md5sum, mtime)
4170 elif entry_type == "sym":
4171 entry_type, mtime, link = entry_data
4172 line = "%s %s -> %s %s\n" % \
4173 (entry_type, relative_filename, link, mtime)
4174 else: # dir, dev, fif
4175 line = "%s %s\n" % (entry_type, relative_filename)
4178 def tar_contents(contents, root, tar, protect=None, onProgress=None):
4184 encoding=_encodings['merge'],
4186 except UnicodeEncodeError:
4187 # The package appears to have been merged with a
4188 # different value of sys.getfilesystemencoding(),
4189 # so fall back to utf_8 if appropriate.
4193 encoding=_encodings['fs'],
4195 except UnicodeEncodeError:
4200 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
4202 maxval = len(contents)
4205 onProgress(maxval, 0)
4206 paths = list(contents)
4211 lst = os.lstat(path)
4212 except OSError as e:
4213 if e.errno != errno.ENOENT:
4217 onProgress(maxval, curval)
4219 contents_type = contents[path][0]
4220 if path.startswith(root):
4221 arcname = path[len(root):]
4223 raise ValueError("invalid root argument: '%s'" % root)
4225 if 'dir' == contents_type and \
4226 not stat.S_ISDIR(lst.st_mode) and \
4227 os.path.isdir(live_path):
4228 # Even though this was a directory in the original ${D}, it exists
4229 # as a symlink to a directory in the live filesystem. It must be
4230 # recorded as a real directory in the tar file to ensure that tar
4231 # can properly extract it's children.
4232 live_path = os.path.realpath(live_path)
4233 tarinfo = tar.gettarinfo(live_path, arcname)
4235 if stat.S_ISREG(lst.st_mode):
4236 if protect and protect(path):
4237 # Create an empty file as a place holder in order to avoid
4238 # potential collision-protect issues.
4239 f = tempfile.TemporaryFile()
4240 f.write(_unicode_encode(
4241 "# empty file because --include-config=n " + \
4242 "when `quickpkg` was used\n"))
4245 tarinfo.size = os.fstat(f.fileno()).st_size
4246 tar.addfile(tarinfo, f)
4249 f = open(_unicode_encode(path,
4250 encoding=object.__getattribute__(os, '_encoding'),
4251 errors='strict'), 'rb')
4253 tar.addfile(tarinfo, f)
4257 tar.addfile(tarinfo)
4259 onProgress(maxval, curval)