1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
6 "vardbapi", "vartree", "dblink"] + \
7 ["write_contents", "tar_contents"]
9 from portage.checksum import perform_md5
10 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
11 PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
12 from portage.data import portage_gid, portage_uid, secpass
13 from portage.dbapi import dbapi
14 from portage.dep import use_reduce, paren_reduce, isvalidatom, \
15 isjustname, dep_getkey, match_from_list
16 from portage.exception import CommandNotFound, \
17 InvalidData, InvalidPackageName, \
18 FileNotFound, PermissionDenied, UnsupportedAPIException
19 from portage.locks import lockdir, unlockdir
20 from portage.output import bold, red, green
21 from portage.update import fixdbentries
22 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
23 writemsg, writemsg_level, write_atomic, atomic_ofstream, writedict, \
24 grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
25 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
27 from portage import listdir, dep_expand, digraph, flatten, key_expand, \
28 doebuild_environment, doebuild, env_update, prepare_build_dirs, \
29 abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
31 from portage.elog import elog_process
32 from portage.elog.filtering import filter_mergephases, filter_unmergephases
33 from portage.cache.mappings import slot_dict_class
35 import os, re, shutil, stat, errno, copy, subprocess
38 from itertools import izip
41 import cPickle as pickle
45 class vardbapi(dbapi):
47 _excluded_dirs = ["CVS", "lost+found"]
48 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
49 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
50 "|".join(_excluded_dirs) + r')$')
52 _aux_cache_version = "1"
53 _owners_cache_version = "1"
55 # Number of uncached packages to trigger cache update, since
56 # it's wasteful to update it for every vdb change.
57 _aux_cache_threshold = 5
59 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
60 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
62 def __init__(self, root, categories=None, settings=None, vartree=None):
64 The categories parameter is unused since the dbapi class
65 now has a categories property that is generated from the
70 #cache for category directory mtimes
73 #cache for dependency checks
76 #cache for cp_list results
81 from portage import settings
82 self.settings = settings
84 from portage import db
85 vartree = db[root]["vartree"]
86 self.vartree = vartree
87 self._aux_cache_keys = set(
88 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
89 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
90 "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
91 "repository", "RESTRICT" , "SLOT", "USE"])
92 self._aux_cache_obj = None
93 self._aux_cache_filename = os.path.join(self.root,
94 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
95 self._counter_path = os.path.join(root,
96 CACHE_PATH.lstrip(os.path.sep), "counter")
98 self._owners = self._owners_db(self)
100 def getpath(self, mykey, filename=None):
101 rValue = os.path.join(self.root, VDB_PATH, mykey)
103 rValue = os.path.join(rValue, filename)
106 def cpv_exists(self, mykey):
107 "Tells us whether an actual ebuild exists on disk (no masking)"
108 return os.path.exists(self.getpath(mykey))
110 def cpv_counter(self, mycpv):
111 "This method will grab the COUNTER. Returns a counter value."
113 return long(self.aux_get(mycpv, ["COUNTER"])[0])
114 except (KeyError, ValueError):
116 writemsg_level(("portage: COUNTER for %s was corrupted; " + \
117 "resetting to value of 0\n") % (mycpv,),
118 level=logging.ERROR, noiselevel=-1)
121 def _counter_hash(self):
123 from hashlib import md5 as new_hash
125 from md5 import new as new_hash
127 aux_keys = ["COUNTER"]
128 cpv_list = self.cpv_all()
132 counter, = self.aux_get(cpv, aux_keys)
138 def cpv_inject(self, mycpv):
139 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
140 os.makedirs(self.getpath(mycpv))
141 counter = self.counter_tick(self.root, mycpv=mycpv)
142 # write local package counter so that emerge clean does the right thing
143 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
145 def isInjected(self, mycpv):
146 if self.cpv_exists(mycpv):
147 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
149 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
153 def move_ent(self, mylist):
158 for cp in [origcp, newcp]:
159 if not (isvalidatom(cp) and isjustname(cp)):
160 raise InvalidPackageName(cp)
161 origmatches = self.match(origcp, use_cache=0)
165 for mycpv in origmatches:
166 mycpsplit = catpkgsplit(mycpv)
167 mynewcpv = newcp + "-" + mycpsplit[2]
168 mynewcat = newcp.split("/")[0]
169 if mycpsplit[3] != "r0":
170 mynewcpv += "-" + mycpsplit[3]
171 mycpsplit_new = catpkgsplit(mynewcpv)
172 origpath = self.getpath(mycpv)
173 if not os.path.exists(origpath):
176 if not os.path.exists(self.getpath(mynewcat)):
177 #create the directory
178 os.makedirs(self.getpath(mynewcat))
179 newpath = self.getpath(mynewcpv)
180 if os.path.exists(newpath):
181 #dest already exists; keep this puppy where it is.
183 _movefile(origpath, newpath, mysettings=self.settings)
185 # We need to rename the ebuild now.
186 old_pf = catsplit(mycpv)[1]
187 new_pf = catsplit(mynewcpv)[1]
190 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
191 os.path.join(newpath, new_pf + ".ebuild"))
192 except EnvironmentError, e:
193 if e.errno != errno.ENOENT:
196 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
197 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
198 fixdbentries([mylist], newpath)
201 def cp_list(self, mycp, use_cache=1):
202 mysplit=catsplit(mycp)
203 if mysplit[0] == '*':
204 mysplit[0] = mysplit[0][1:]
206 mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
209 if use_cache and mycp in self.cpcache:
210 cpc = self.cpcache[mycp]
213 cat_dir = self.getpath(mysplit[0])
215 dir_list = os.listdir(cat_dir)
216 except EnvironmentError, e:
217 if e.errno == PermissionDenied.errno:
218 raise PermissionDenied(cat_dir)
224 if self._excluded_dirs.match(x) is not None:
228 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
231 if ps[0] == mysplit[1]:
232 returnme.append(mysplit[0]+"/"+x)
233 self._cpv_sort_ascending(returnme)
235 self.cpcache[mycp] = [mystat, returnme[:]]
236 elif mycp in self.cpcache:
237 del self.cpcache[mycp]
240 def cpv_all(self, use_cache=1):
242 Set use_cache=0 to bypass the portage.cachedir() cache in cases
243 when the accuracy of mtime staleness checks should not be trusted
244 (generally this is only necessary in critical sections that
245 involve merge or unmerge of packages).
248 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
251 from portage import listdir
253 def listdir(p, **kwargs):
255 return [x for x in os.listdir(p) \
256 if os.path.isdir(os.path.join(p, x))]
257 except EnvironmentError, e:
258 if e.errno == PermissionDenied.errno:
259 raise PermissionDenied(p)
263 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
264 if self._excluded_dirs.match(x) is not None:
266 if not self._category_re.match(x):
268 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
269 if self._excluded_dirs.match(y) is not None:
271 subpath = x + "/" + y
272 # -MERGING- should never be a cpv, nor should files.
274 if catpkgsplit(subpath) is None:
275 self.invalidentry(self.getpath(subpath))
278 self.invalidentry(self.getpath(subpath))
280 returnme.append(subpath)
284 def cp_all(self, use_cache=1):
285 mylist = self.cpv_all(use_cache=use_cache)
291 mysplit = catpkgsplit(y)
293 self.invalidentry(self.getpath(y))
296 self.invalidentry(self.getpath(y))
298 d[mysplit[0]+"/"+mysplit[1]] = None
301 def checkblockers(self, origdep):
304 def _clear_cache(self):
305 self.mtdircache.clear()
306 self.matchcache.clear()
308 self._aux_cache_obj = None
310 def _add(self, pkg_dblink):
311 self._clear_pkg_cache(pkg_dblink)
313 def _remove(self, pkg_dblink):
314 self._clear_pkg_cache(pkg_dblink)
316 def _clear_pkg_cache(self, pkg_dblink):
317 # Due to 1 second mtime granularity in <python-2.5, mtime checks
318 # are not always sufficient to invalidate vardbapi caches. Therefore,
319 # the caches need to be actively invalidated here.
320 self.mtdircache.pop(pkg_dblink.cat, None)
321 self.matchcache.pop(pkg_dblink.cat, None)
322 self.cpcache.pop(pkg_dblink.mysplit[0], None)
323 from portage import dircache
324 dircache.pop(pkg_dblink.dbcatdir, None)
326 def match(self, origdep, use_cache=1):
327 "caching match function"
329 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
330 mykey = dep_getkey(mydep)
331 mycat = catsplit(mykey)[0]
333 if mycat in self.matchcache:
334 del self.mtdircache[mycat]
335 del self.matchcache[mycat]
336 return list(self._iter_match(mydep,
337 self.cp_list(mydep.cp, use_cache=use_cache)))
339 curmtime = os.stat(self.root+VDB_PATH+"/"+mycat).st_mtime
340 except (IOError, OSError):
343 if mycat not in self.matchcache or \
344 self.mtdircache[mycat] != curmtime:
346 self.mtdircache[mycat] = curmtime
347 self.matchcache[mycat] = {}
348 if mydep not in self.matchcache[mycat]:
349 mymatch = list(self._iter_match(mydep,
350 self.cp_list(mydep.cp, use_cache=use_cache)))
351 self.matchcache[mycat][mydep] = mymatch
352 return self.matchcache[mycat][mydep][:]
354 def findname(self, mycpv):
355 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
357 def flush_cache(self):
358 """If the current user has permission and the internal aux_get cache has
359 been updated, save it to disk and mark it unmodified. This is called
360 by emerge after it has loaded the full vdb for use in dependency
361 calculations. Currently, the cache is only written if the user has
362 superuser privileges (since that's required to obtain a lock), but all
363 users have read access and benefit from faster metadata lookups (as
364 long as at least part of the cache is still valid)."""
365 if self._aux_cache is not None and \
366 len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
368 self._owners.populate() # index any unindexed contents
369 valid_nodes = set(self.cpv_all())
370 for cpv in self._aux_cache["packages"].keys():
371 if cpv not in valid_nodes:
372 del self._aux_cache["packages"][cpv]
373 del self._aux_cache["modified"]
375 f = atomic_ofstream(self._aux_cache_filename)
376 pickle.dump(self._aux_cache, f, -1)
378 apply_secpass_permissions(
379 self._aux_cache_filename, gid=portage_gid, mode=0644)
380 except (IOError, OSError), e:
382 self._aux_cache["modified"] = set()
385 def _aux_cache(self):
386 if self._aux_cache_obj is None:
387 self._aux_cache_init()
388 return self._aux_cache_obj
390 def _aux_cache_init(self):
393 f = open(self._aux_cache_filename)
394 mypickle = pickle.Unpickler(f)
395 mypickle.find_global = None
396 aux_cache = mypickle.load()
399 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
400 if isinstance(e, pickle.UnpicklingError):
401 writemsg("!!! Error loading '%s': %s\n" % \
402 (self._aux_cache_filename, str(e)), noiselevel=-1)
405 if not aux_cache or \
406 not isinstance(aux_cache, dict) or \
407 aux_cache.get("version") != self._aux_cache_version or \
408 not aux_cache.get("packages"):
409 aux_cache = {"version": self._aux_cache_version}
410 aux_cache["packages"] = {}
412 owners = aux_cache.get("owners")
413 if owners is not None:
414 if not isinstance(owners, dict):
416 elif "version" not in owners:
418 elif owners["version"] != self._owners_cache_version:
420 elif "base_names" not in owners:
422 elif not isinstance(owners["base_names"], dict):
428 "version" : self._owners_cache_version
430 aux_cache["owners"] = owners
432 aux_cache["modified"] = set()
433 self._aux_cache_obj = aux_cache
435 def aux_get(self, mycpv, wants):
436 """This automatically caches selected keys that are frequently needed
437 by emerge for dependency calculations. The cached metadata is
438 considered valid if the mtime of the package directory has not changed
439 since the data was cached. The cache is stored in a pickled dict
440 object with the following format:
442 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
444 If an error occurs while loading the cache pickle or the version is
445 unrecognized, the cache will simple be recreated from scratch (it is
446 completely disposable).
448 cache_these_wants = self._aux_cache_keys.intersection(wants)
450 if self._aux_cache_keys_re.match(x) is not None:
451 cache_these_wants.add(x)
453 if not cache_these_wants:
454 return self._aux_get(mycpv, wants)
456 cache_these = set(self._aux_cache_keys)
457 cache_these.update(cache_these_wants)
459 mydir = self.getpath(mycpv)
462 mydir_stat = os.stat(mydir)
464 if e.errno != errno.ENOENT:
466 raise KeyError(mycpv)
467 mydir_mtime = long(mydir_stat.st_mtime)
468 pkg_data = self._aux_cache["packages"].get(mycpv)
469 pull_me = cache_these.union(wants)
470 mydata = {"_mtime_" : mydir_mtime}
472 cache_incomplete = False
475 if pkg_data is not None:
476 if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
479 cache_mtime, metadata = pkg_data
480 if not isinstance(cache_mtime, (long, int)) or \
481 not isinstance(metadata, dict):
485 cache_mtime, metadata = pkg_data
486 cache_valid = cache_mtime == mydir_mtime
488 mydata.update(metadata)
489 pull_me.difference_update(mydata)
492 # pull any needed data and cache it
493 aux_keys = list(pull_me)
494 for k, v in izip(aux_keys,
495 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
497 if not cache_valid or cache_these.difference(metadata):
499 if cache_valid and metadata:
500 cache_data.update(metadata)
501 for aux_key in cache_these:
502 cache_data[aux_key] = mydata[aux_key]
503 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
504 self._aux_cache["modified"].add(mycpv)
505 return [mydata[x] for x in wants]
507 def _aux_get(self, mycpv, wants, st=None):
508 mydir = self.getpath(mycpv)
513 if e.errno == errno.ENOENT:
514 raise KeyError(mycpv)
515 elif e.errno == PermissionDenied.errno:
516 raise PermissionDenied(mydir)
519 if not stat.S_ISDIR(st.st_mode):
520 raise KeyError(mycpv)
524 results.append(st.st_mtime)
527 myf = open(os.path.join(mydir, x), "r")
532 # Preserve \n for metadata that is known to
533 # contain multiple lines.
534 if self._aux_multi_line_re.match(x) is None:
535 myd = " ".join(myd.split())
538 if x == "EAPI" and not myd:
544 def aux_update(self, cpv, values):
545 cat, pkg = catsplit(cpv)
546 mylink = dblink(cat, pkg, self.root, self.settings,
547 treetype="vartree", vartree=self.vartree)
548 if not mylink.exists():
550 for k, v in values.iteritems():
555 os.unlink(os.path.join(self.getpath(cpv), k))
556 except EnvironmentError:
559 def counter_tick(self, myroot, mycpv=None):
560 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
562 def get_counter_tick_core(self, myroot, mycpv=None):
564 Use this method to retrieve the counter instead
565 of having to trust the value of a global counter
566 file that can lead to invalid COUNTER
567 generation. When cache is valid, the package COUNTER
568 files are not read and we rely on the timestamp of
569 the package directory to validate cache. The stat
570 calls should only take a short time, so performance
571 is sufficient without having to rely on a potentially
572 corrupt global counter file.
574 The global counter file located at
575 $CACHE_PATH/counter serves to record the
576 counter of the last installed package and
577 it also corresponds to the total number of
578 installation actions that have occurred in
579 the history of this package database.
581 cp_list = self.cp_list
583 for cp in self.cp_all():
584 for cpv in cp_list(cp):
586 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
587 except (KeyError, OverflowError, ValueError):
589 if counter > max_counter:
590 max_counter = counter
595 cfile = open(self._counter_path, "r")
596 except EnvironmentError, e:
597 new_vdb = not bool(self.cpv_all())
599 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
600 self._counter_path, noiselevel=-1)
601 writemsg("!!! %s\n" % str(e), noiselevel=-1)
606 counter = long(cfile.readline().strip())
609 except (OverflowError, ValueError), e:
610 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
611 self._counter_path, noiselevel=-1)
612 writemsg("!!! %s\n" % str(e), noiselevel=-1)
615 # We must ensure that we return a counter
616 # value that is at least as large as the
617 # highest one from the installed packages,
618 # since having a corrupt value that is too low
619 # can trigger incorrect AUTOCLEAN behavior due
620 # to newly installed packages having lower
621 # COUNTERs than the previous version in the
623 if counter > max_counter:
624 max_counter = counter
626 if counter < 0 and not new_vdb:
627 writemsg("!!! Initializing COUNTER to " + \
628 "value of %d\n" % max_counter, noiselevel=-1)
630 return max_counter + 1
632 def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
633 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
634 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
638 # update new global counter file
639 write_atomic(self._counter_path, str(counter))
642 def _dblink(self, cpv):
643 category, pf = catsplit(cpv)
644 return dblink(category, pf, self.root,
645 self.settings, vartree=self.vartree, treetype="vartree")
647 def removeFromContents(self, pkg, paths, relative_paths=True):
649 @param pkg: cpv for an installed package
651 @param paths: paths of files to remove from contents
652 @type paths: iterable
654 if not hasattr(pkg, "getcontents"):
655 pkg = self._dblink(pkg)
657 root_len = len(root) - 1
658 new_contents = pkg.getcontents().copy()
661 for filename in paths:
662 filename = normalize_path(filename)
664 relative_filename = filename
666 relative_filename = filename[root_len:]
667 contents_key = pkg._match_contents(relative_filename, root)
669 del new_contents[contents_key]
673 f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
674 write_contents(new_contents, root, f)
676 pkg._clear_contents_cache()
678 class _owners_cache(object):
680 This class maintains an hash table that serves to index package
681 contents by mapping the basename of file to a list of possible
682 packages that own it. This is used to optimize owner lookups
683 by narrowing the search down to a smaller number of packages.
686 from hashlib import md5 as _new_hash
688 from md5 import new as _new_hash
691 _hex_chars = _hash_bits / 4
693 def __init__(self, vardb):
697 root_len = len(self._vardb.root)
698 contents = self._vardb._dblink(cpv).getcontents()
699 pkg_hash = self._hash_pkg(cpv)
701 # Empty path is a code used to represent empty contents.
702 self._add_path("", pkg_hash)
704 self._add_path(x[root_len:], pkg_hash)
705 self._vardb._aux_cache["modified"].add(cpv)
707 def _add_path(self, path, pkg_hash):
709 Empty path is a code that represents empty contents.
712 name = os.path.basename(path.rstrip(os.path.sep))
717 name_hash = self._hash_str(name)
718 base_names = self._vardb._aux_cache["owners"]["base_names"]
719 pkgs = base_names.get(name_hash)
722 base_names[name_hash] = pkgs
723 pkgs[pkg_hash] = None
725 def _hash_str(self, s):
729 h = h[-self._hex_chars:]
733 def _hash_pkg(self, cpv):
734 counter, mtime = self._vardb.aux_get(
735 cpv, ["COUNTER", "_mtime_"])
737 counter = int(counter)
740 return (cpv, counter, mtime)
742 class _owners_db(object):
744 def __init__(self, vardb):
751 owners_cache = vardbapi._owners_cache(self._vardb)
752 cached_hashes = set()
753 base_names = self._vardb._aux_cache["owners"]["base_names"]
755 # Take inventory of all cached package hashes.
756 for name, hash_values in base_names.items():
757 if not isinstance(hash_values, dict):
760 cached_hashes.update(hash_values)
762 # Create sets of valid package hashes and uncached packages.
763 uncached_pkgs = set()
764 hash_pkg = owners_cache._hash_pkg
765 valid_pkg_hashes = set()
766 for cpv in self._vardb.cpv_all():
767 hash_value = hash_pkg(cpv)
768 valid_pkg_hashes.add(hash_value)
769 if hash_value not in cached_hashes:
770 uncached_pkgs.add(cpv)
772 # Cache any missing packages.
773 for cpv in uncached_pkgs:
774 owners_cache.add(cpv)
776 # Delete any stale cache.
777 stale_hashes = cached_hashes.difference(valid_pkg_hashes)
779 for base_name_hash, bucket in base_names.items():
780 for hash_value in stale_hashes.intersection(bucket):
781 del bucket[hash_value]
783 del base_names[base_name_hash]
787 def get_owners(self, path_iter):
789 @return the owners as a dblink -> set(files) mapping.
792 for owner, f in self.iter_owners(path_iter):
793 owned_files = owners.get(owner)
794 if owned_files is None:
796 owners[owner] = owned_files
800 def getFileOwnerMap(self, path_iter):
801 owners = self.get_owners(path_iter)
803 for pkg_dblink, files in owners.iteritems():
805 owner_set = file_owners.get(f)
806 if owner_set is None:
808 file_owners[f] = owner_set
809 owner_set.add(pkg_dblink)
812 def iter_owners(self, path_iter):
814 Iterate over tuples of (dblink, path). In order to avoid
815 consuming too many resources for too much time, resources
816 are only allocated for the duration of a given iter_owners()
817 call. Therefore, to maximize reuse of resources when searching
818 for multiple files, it's best to search for them all in a single
822 owners_cache = self._populate()
826 hash_pkg = owners_cache._hash_pkg
827 hash_str = owners_cache._hash_str
828 base_names = self._vardb._aux_cache["owners"]["base_names"]
833 x = dblink_cache.get(cpv)
835 x = self._vardb._dblink(cpv)
836 dblink_cache[cpv] = x
839 for path in path_iter:
840 name = os.path.basename(path.rstrip(os.path.sep))
844 name_hash = hash_str(name)
845 pkgs = base_names.get(name_hash)
847 for hash_value in pkgs:
848 if not isinstance(hash_value, tuple) or \
849 len(hash_value) != 3:
851 cpv, counter, mtime = hash_value
852 if not isinstance(cpv, basestring):
855 current_hash = hash_pkg(cpv)
859 if current_hash != hash_value:
861 if dblink(cpv).isowner(path, root):
862 yield dblink(cpv), path
864 class vartree(object):
865 "this tree will scan a var/db/pkg database located at root (passed to init)"
866 def __init__(self, root="/", virtual=None, clone=None, categories=None,
869 writemsg("vartree.__init__(): deprecated " + \
870 "use of clone parameter\n", noiselevel=-1)
871 self.root = clone.root[:]
872 self.dbapi = copy.deepcopy(clone.dbapi)
874 from portage import config
875 self.settings = config(clone=clone.settings)
879 from portage import settings
880 self.settings = settings # for key_expand calls
881 if categories is None:
882 categories = settings.categories
883 self.dbapi = vardbapi(self.root, categories=categories,
884 settings=settings, vartree=self)
887 def getpath(self, mykey, filename=None):
888 return self.dbapi.getpath(mykey, filename=filename)
890 def zap(self, mycpv):
893 def inject(self, mycpv):
896 def get_provide(self, mycpv):
900 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
902 myuse = myuse.split()
903 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
904 for myprovide in mylines:
905 mys = catpkgsplit(myprovide)
907 mys = myprovide.split("/")
908 myprovides += [mys[0] + "/" + mys[1]]
910 except SystemExit, e:
913 mydir = os.path.join(self.root, VDB_PATH, mycpv)
914 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
917 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
919 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
922 def get_all_provides(self):
924 for node in self.getallcpv():
925 for mykey in self.get_provide(node):
926 if mykey in myprovides:
927 myprovides[mykey] += [node]
929 myprovides[mykey] = [node]
932 def dep_bestmatch(self, mydep, use_cache=1):
933 "compatibility method -- all matches, not just visible ones"
934 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
935 mymatch = best(self.dbapi.match(
936 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
937 use_cache=use_cache))
943 def dep_match(self, mydep, use_cache=1):
944 "compatibility method -- we want to see all matches, not just visible ones"
945 #mymatch = match(mydep,self.dbapi)
946 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
952 def exists_specific(self, cpv):
953 return self.dbapi.cpv_exists(cpv)
956 """temporary function, probably to be renamed --- Gets a list of all
957 category/package-versions installed on the system."""
958 return self.dbapi.cpv_all()
960 def getallnodes(self):
961 """new behavior: these are all *unmasked* nodes. There may or may not be available
962 masked package for nodes in this nodes list."""
963 return self.dbapi.cp_all()
965 def exists_specific_cat(self, cpv, use_cache=1):
966 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
967 settings=self.settings)
971 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
975 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
981 def getebuildpath(self, fullpackage):
982 cat, package = catsplit(fullpackage)
983 return self.getpath(fullpackage, filename=package+".ebuild")
985 def getnode(self, mykey, use_cache=1):
986 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
987 settings=self.settings)
990 mysplit = catsplit(mykey)
991 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
994 mypsplit = pkgsplit(x)
996 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
998 if mypsplit[0] == mysplit[1]:
999 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
1000 returnme.append(appendme)
1004 def getslot(self, mycatpkg):
1005 "Get a slot for a catpkg; assume it exists."
1007 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1011 def hasnode(self, mykey, use_cache):
1012 """Does the particular node (cat/pkg key) exist?"""
1013 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1014 settings=self.settings)
1015 mysplit = catsplit(mykey)
1016 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
1018 mypsplit = pkgsplit(x)
1020 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1022 if mypsplit[0] == mysplit[1]:
1029 class dblink(object):
1031 This class provides an interface to the installed package database
1032 At present this is implemented as a text backend in /var/db/pkg.
1036 _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1037 _contents_split_counts = {
1045 # When looping over files for merge/unmerge, temporarily yield to the
1046 # scheduler each time this many files are processed.
1047 _file_merge_yield_interval = 20
1049 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
1050 vartree=None, blockers=None, scheduler=None):
1052 Creates a DBlink object for a given CPV.
1053 The given CPV may not be present in the database already.
1055 @param cat: Category
1057 @param pkg: Package (PV)
1059 @param myroot: Typically ${ROOT}
1060 @type myroot: String (Path)
1061 @param mysettings: Typically portage.config
1062 @type mysettings: An instance of portage.config
1063 @param treetype: one of ['porttree','bintree','vartree']
1064 @type treetype: String
1065 @param vartree: an instance of vartree corresponding to myroot.
1066 @type vartree: vartree
1071 self.mycpv = self.cat + "/" + self.pkg
1072 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1073 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1074 self.treetype = treetype
1076 from portage import db
1077 vartree = db[myroot]["vartree"]
1078 self.vartree = vartree
1079 self._blockers = blockers
1080 self._scheduler = scheduler
1082 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
1083 self.dbcatdir = self.dbroot+"/"+cat
1084 self.dbpkgdir = self.dbcatdir+"/"+pkg
1085 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1086 self.dbdir = self.dbpkgdir
1088 self._lock_vdb = None
1090 self.settings = mysettings
1091 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1094 protect_obj = ConfigProtect(myroot,
1095 shlex.split(mysettings.get("CONFIG_PROTECT", "")),
1096 shlex.split(mysettings.get("CONFIG_PROTECT_MASK", "")))
1097 self.updateprotect = protect_obj.updateprotect
1098 self.isprotected = protect_obj.isprotected
1099 self._installed_instance = None
1100 self.contentscache = None
1101 self._contents_inodes = None
1102 self._contents_basenames = None
1103 self._md5_merge_map = {}
1107 raise AssertionError("Lock already held.")
1108 # At least the parent needs to exist for the lock file.
1109 ensure_dirs(self.dbroot)
1110 self._lock_vdb = lockdir(self.dbroot)
1114 unlockdir(self._lock_vdb)
1115 self._lock_vdb = None
1118 "return path to location of db information (for >>> informational display)"
1122 "does the db entry exist? boolean."
1123 return os.path.exists(self.dbdir)
1127 Remove this entry from the database
1129 if not os.path.exists(self.dbdir):
1132 # Check validity of self.dbdir before attempting to remove it.
1133 if not self.dbdir.startswith(self.dbroot):
1134 writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1135 self.dbdir, noiselevel=-1)
1138 shutil.rmtree(self.dbdir)
1139 self.vartree.dbapi._remove(self)
1141 def clearcontents(self):
1143 For a given db entry (self), erase the CONTENTS values.
1145 if os.path.exists(self.dbdir+"/CONTENTS"):
1146 os.unlink(self.dbdir+"/CONTENTS")
1148 def _clear_contents_cache(self):
1149 self.contentscache = None
1150 self._contents_inodes = None
1151 self._contents_basenames = None
1153 def getcontents(self):
1155 Get the installed files of a given package (aka what that package installed)
1157 contents_file = os.path.join(self.dbdir, "CONTENTS")
1158 if self.contentscache is not None:
1159 return self.contentscache
1162 myc = open(contents_file,"r")
1163 except EnvironmentError, e:
1164 if e.errno != errno.ENOENT:
1167 self.contentscache = pkgfiles
1169 mylines = myc.readlines()
1172 normalize_needed = self._normalize_needed
1173 contents_split_counts = self._contents_split_counts
1174 myroot = self.myroot
1175 if myroot == os.path.sep:
1179 for pos, line in enumerate(mylines):
1180 if null_byte in line:
1181 # Null bytes are a common indication of corruption.
1182 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1184 line = line.rstrip("\n")
1185 # Split on " " so that even file paths that
1186 # end with spaces can be handled.
1187 mydat = line.split(" ")
1188 entry_type = mydat[0] # empty string if line is empty
1189 correct_split_count = contents_split_counts.get(entry_type)
1190 if correct_split_count and len(mydat) > correct_split_count:
1191 # Apparently file paths contain spaces, so reassemble
1192 # the split have the correct_split_count.
1193 newsplit = [entry_type]
1194 spaces_total = len(mydat) - correct_split_count
1195 if entry_type == "sym":
1197 splitter = mydat.index("->", 2, len(mydat) - 2)
1199 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1201 spaces_in_path = splitter - 2
1202 spaces_in_target = spaces_total - spaces_in_path
1203 newsplit.append(" ".join(mydat[1:splitter]))
1204 newsplit.append("->")
1205 target_end = splitter + spaces_in_target + 2
1206 newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1207 newsplit.extend(mydat[target_end:])
1209 path_end = spaces_total + 2
1210 newsplit.append(" ".join(mydat[1:path_end]))
1211 newsplit.extend(mydat[path_end:])
1214 # we do this so we can remove from non-root filesystems
1215 # (use the ROOT var to allow maintenance on other partitions)
1217 if normalize_needed.match(mydat[1]):
1218 mydat[1] = normalize_path(mydat[1])
1219 if not mydat[1].startswith(os.path.sep):
1220 mydat[1] = os.path.sep + mydat[1]
1222 mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1223 if mydat[0] == "obj":
1224 #format: type, mtime, md5sum
1225 pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1226 elif mydat[0] == "dir":
1228 pkgfiles[mydat[1]] = [mydat[0]]
1229 elif mydat[0] == "sym":
1230 #format: type, mtime, dest
1231 pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1232 elif mydat[0] == "dev":
1234 pkgfiles[mydat[1]] = [mydat[0]]
1235 elif mydat[0]=="fif":
1237 pkgfiles[mydat[1]] = [mydat[0]]
1239 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1240 except (KeyError, IndexError):
1241 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1243 writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1244 for pos, e in errors:
1245 writemsg("!!! line %d: %s\n" % (pos, e), noiselevel=-1)
1246 self.contentscache = pkgfiles
1249 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1250 ldpath_mtimes=None, others_in_slot=None):
1253 Unmerges a given package (CPV)
1258 @param pkgfiles: files to unmerge (generally self.getcontents() )
1259 @type pkgfiles: Dictionary
1260 @param trimworld: Remove CPV from world file if True, not if False
1261 @type trimworld: Boolean
1262 @param cleanup: cleanup to pass to doebuild (see doebuild)
1263 @type cleanup: Boolean
1264 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1265 @type ldpath_mtimes: Dictionary
1266 @param others_in_slot: all dblink instances in this slot, excluding self
1267 @type others_in_slot: list
1270 1. os.EX_OK if everything went well.
1271 2. return code of the failed phase (for prerm, postrm, cleanrm)
1274 The caller must ensure that lockdb() and unlockdb() are called
1275 before and after this method.
1277 showMessage = self._display_merge
1278 if self.vartree.dbapi._categories is not None:
1279 self.vartree.dbapi._categories = None
1280 # When others_in_slot is supplied, the security check has already been
1281 # done for this slot, so it shouldn't be repeated until the next
1282 # replacement or unmerge operation.
1283 if others_in_slot is None:
1284 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1285 slot_matches = self.vartree.dbapi.match(
1286 "%s:%s" % (dep_getkey(self.mycpv), slot))
1288 for cur_cpv in slot_matches:
1289 if cur_cpv == self.mycpv:
1291 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1292 self.vartree.root, self.settings, vartree=self.vartree,
1293 treetype="vartree"))
1295 retval = self._security_check([self] + others_in_slot)
1299 contents = self.getcontents()
1300 # Now, don't assume that the name of the ebuild is the same as the
1301 # name of the dir; the package may have been moved.
1303 ebuild_phase = "prerm"
1305 mystuff = os.listdir(self.dbdir)
1307 if x.endswith(".ebuild"):
1308 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1309 if x[:-7] != self.pkg:
1310 # Clean up after vardbapi.move_ent() breakage in
1311 # portage versions before 2.1.2
1312 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1313 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1316 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1319 doebuild_environment(myebuildpath, "prerm", self.myroot,
1320 self.settings, 0, 0, self.vartree.dbapi)
1321 except UnsupportedAPIException, e:
1322 # Sometimes this happens due to corruption of the EAPI file.
1323 writemsg("!!! FAILED prerm: %s\n" % \
1324 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1325 writemsg("%s\n" % str(e), noiselevel=-1)
1328 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1329 ensure_dirs(os.path.dirname(catdir), uid=portage_uid,
1330 gid=portage_gid, mode=070, mask=0)
1332 builddir_lock = None
1334 scheduler = self._scheduler
1338 catdir_lock = lockdir(catdir)
1340 uid=portage_uid, gid=portage_gid,
1342 builddir_lock = lockdir(
1343 self.settings["PORTAGE_BUILDDIR"])
1345 unlockdir(catdir_lock)
1349 prepare_build_dirs(self.myroot, self.settings, 1)
1350 log_path = self.settings.get("PORTAGE_LOG_FILE")
1352 if scheduler is None:
1353 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1354 self.settings, cleanup=cleanup, use_cache=0,
1355 mydbapi=self.vartree.dbapi, tree=self.treetype,
1356 vartree=self.vartree)
1358 retval = scheduler.dblinkEbuildPhase(
1359 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1361 # XXX: Decide how to handle failures here.
1362 if retval != os.EX_OK:
1363 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1365 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1366 self._clear_contents_cache()
1369 ebuild_phase = "postrm"
1370 if scheduler is None:
1371 retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1372 self.settings, use_cache=0, tree=self.treetype,
1373 mydbapi=self.vartree.dbapi, vartree=self.vartree)
1375 retval = scheduler.dblinkEbuildPhase(
1376 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1378 # XXX: Decide how to handle failures here.
1379 if retval != os.EX_OK:
1380 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1386 if retval != os.EX_OK:
1388 msg = ("The '%s' " % ebuild_phase) + \
1389 ("phase of the '%s' package " % self.mycpv) + \
1390 ("has failed with exit value %s." % retval)
1391 from textwrap import wrap
1392 msg_lines.extend(wrap(msg, 72))
1393 msg_lines.append("")
1395 ebuild_name = os.path.basename(myebuildpath)
1396 ebuild_dir = os.path.dirname(myebuildpath)
1397 msg = "The problem occurred while executing " + \
1398 ("the ebuild file named '%s' " % ebuild_name) + \
1399 ("located in the '%s' directory. " \
1401 "If necessary, manually remove " + \
1402 "the environment.bz2 file and/or the " + \
1403 "ebuild file located in that directory."
1404 msg_lines.extend(wrap(msg, 72))
1405 msg_lines.append("")
1407 msg = "Removal " + \
1408 "of the environment.bz2 file is " + \
1409 "preferred since it may allow the " + \
1410 "removal phases to execute successfully. " + \
1411 "The ebuild will be " + \
1412 "sourced and the eclasses " + \
1413 "from the current portage tree will be used " + \
1414 "when necessary. Removal of " + \
1415 "the ebuild file will cause the " + \
1416 "pkg_prerm() and pkg_postrm() removal " + \
1417 "phases to be skipped entirely."
1418 msg_lines.extend(wrap(msg, 72))
1420 self._eerror(ebuild_phase, msg_lines)
1422 # process logs created during pre/postrm
1423 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
1424 if retval == os.EX_OK:
1425 if scheduler is None:
1426 doebuild(myebuildpath, "cleanrm", self.myroot,
1427 self.settings, tree="vartree",
1428 mydbapi=self.vartree.dbapi,
1429 vartree=self.vartree)
1431 scheduler.dblinkEbuildPhase(
1432 self, self.vartree.dbapi,
1433 myebuildpath, "cleanrm")
1435 unlockdir(builddir_lock)
1437 if myebuildpath and not catdir_lock:
1438 # Lock catdir for removal if empty.
1439 catdir_lock = lockdir(catdir)
1445 if e.errno not in (errno.ENOENT,
1446 errno.ENOTEMPTY, errno.EEXIST):
1449 unlockdir(catdir_lock)
1451 if log_path is not None and os.path.exists(log_path):
1452 # Restore this since it gets lost somewhere above and it
1453 # needs to be set for _display_merge() to be able to log.
1454 # Note that the log isn't necessarily supposed to exist
1455 # since if PORT_LOGDIR is unset then it's a temp file
1456 # so it gets cleaned above.
1457 self.settings["PORTAGE_LOG_FILE"] = log_path
1459 self.settings.pop("PORTAGE_LOG_FILE", None)
1461 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
1462 contents=contents, env=self.settings.environ(),
1463 writemsg_level=self._display_merge)
1466 def _display_merge(self, msg, level=0, noiselevel=0):
1467 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1469 if self._scheduler is not None:
1470 self._scheduler.dblinkDisplayMerge(self, msg,
1471 level=level, noiselevel=noiselevel)
1473 writemsg_level(msg, level=level, noiselevel=noiselevel)
1475 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1478 Unmerges the contents of a package from the liveFS
1479 Removes the VDB entry for self
1481 @param pkgfiles: typically self.getcontents()
1482 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1483 @param others_in_slot: all dblink instances in this slot, excluding self
1484 @type others_in_slot: list
1488 showMessage = self._display_merge
1489 scheduler = self._scheduler
1492 showMessage("No package files given... Grabbing a set.\n")
1493 pkgfiles = self.getcontents()
1495 if others_in_slot is None:
1497 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1498 slot_matches = self.vartree.dbapi.match(
1499 "%s:%s" % (dep_getkey(self.mycpv), slot))
1500 for cur_cpv in slot_matches:
1501 if cur_cpv == self.mycpv:
1503 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1504 self.vartree.root, self.settings,
1505 vartree=self.vartree, treetype="vartree"))
1507 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
1509 dest_root_len = len(dest_root) - 1
1511 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1512 cfgfiledict = grabdict(conf_mem_file)
1515 unmerge_orphans = "unmerge-orphans" in self.settings.features
1518 self.updateprotect()
1519 mykeys = pkgfiles.keys()
1523 #process symlinks second-to-last, directories last.
1525 ignored_unlink_errnos = (
1526 errno.EBUSY, errno.ENOENT,
1527 errno.ENOTDIR, errno.EISDIR)
1528 ignored_rmdir_errnos = (
1529 errno.EEXIST, errno.ENOTEMPTY,
1530 errno.EBUSY, errno.ENOENT,
1531 errno.ENOTDIR, errno.EISDIR)
1532 modprotect = os.path.join(self.vartree.root, "lib/modules/")
1534 def unlink(file_name, lstatobj):
1536 if lstatobj.st_flags != 0:
1537 bsd_chflags.lchflags(file_name, 0)
1538 parent_name = os.path.dirname(file_name)
1539 # Use normal stat/chflags for the parent since we want to
1540 # follow any symlinks to the real parent directory.
1541 pflags = os.stat(parent_name).st_flags
1543 bsd_chflags.chflags(parent_name, 0)
1545 if not stat.S_ISLNK(lstatobj.st_mode):
1546 # Remove permissions to ensure that any hardlinks to
1547 # suid/sgid files are rendered harmless.
1548 os.chmod(file_name, 0)
1549 os.unlink(file_name)
1551 if bsd_chflags and pflags != 0:
1552 # Restore the parent flags we saved before unlinking
1553 bsd_chflags.chflags(parent_name, pflags)
1555 def show_unmerge(zing, desc, file_type, file_name):
1556 showMessage("%s %s %s %s\n" % \
1557 (zing, desc.ljust(8), file_type, file_name))
1558 for i, objkey in enumerate(mykeys):
1560 if scheduler is not None and \
1561 0 == i % self._file_merge_yield_interval:
1562 scheduler.scheduleYield()
1564 obj = normalize_path(objkey)
1565 file_data = pkgfiles[objkey]
1566 file_type = file_data[0]
1569 statobj = os.stat(obj)
1574 lstatobj = os.lstat(obj)
1575 except (OSError, AttributeError):
1577 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1578 if lstatobj is None:
1579 show_unmerge("---", "!found", file_type, obj)
1581 if obj.startswith(dest_root):
1582 relative_path = obj[dest_root_len:]
1584 for dblnk in others_in_slot:
1585 if dblnk.isowner(relative_path, dest_root):
1589 # A new instance of this package claims the file, so
1591 show_unmerge("---", "replaced", file_type, obj)
1593 elif relative_path in cfgfiledict:
1594 stale_confmem.append(relative_path)
1595 # next line includes a tweak to protect modules from being unmerged,
1596 # but we don't protect modules from being overwritten if they are
1597 # upgraded. We effectively only want one half of the config protection
1598 # functionality for /lib/modules. For portage-ng both capabilities
1599 # should be able to be independently specified.
1600 if obj.startswith(modprotect):
1601 show_unmerge("---", "cfgpro", file_type, obj)
1604 # Don't unlink symlinks to directories here since that can
1605 # remove /lib and /usr/lib symlinks.
1606 if unmerge_orphans and \
1607 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1608 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1609 not self.isprotected(obj):
1611 unlink(obj, lstatobj)
1612 except EnvironmentError, e:
1613 if e.errno not in ignored_unlink_errnos:
1616 show_unmerge("<<<", "", file_type, obj)
1619 lmtime = str(lstatobj[stat.ST_MTIME])
1620 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
1621 show_unmerge("---", "!mtime", file_type, obj)
1624 if pkgfiles[objkey][0] == "dir":
1625 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
1626 show_unmerge("---", "!dir", file_type, obj)
1629 elif pkgfiles[objkey][0] == "sym":
1631 show_unmerge("---", "!sym", file_type, obj)
1633 # Go ahead and unlink symlinks to directories here when
1634 # they're actually recorded as symlinks in the contents.
1635 # Normally, symlinks such as /lib -> lib64 are not recorded
1636 # as symlinks in the contents of a package. If a package
1637 # installs something into ${D}/lib/, it is recorded in the
1638 # contents as a directory even if it happens to correspond
1639 # to a symlink when it's merged to the live filesystem.
1641 unlink(obj, lstatobj)
1642 show_unmerge("<<<", "", file_type, obj)
1643 except (OSError, IOError),e:
1644 if e.errno not in ignored_unlink_errnos:
1647 show_unmerge("!!!", "", file_type, obj)
1648 elif pkgfiles[objkey][0] == "obj":
1649 if statobj is None or not stat.S_ISREG(statobj.st_mode):
1650 show_unmerge("---", "!obj", file_type, obj)
1654 mymd5 = perform_md5(obj, calc_prelink=1)
1655 except FileNotFound, e:
1656 # the file has disappeared between now and our stat call
1657 show_unmerge("---", "!obj", file_type, obj)
1660 # string.lower is needed because db entries used to be in upper-case. The
1661 # string.lower allows for backwards compatibility.
1662 if mymd5 != pkgfiles[objkey][2].lower():
1663 show_unmerge("---", "!md5", file_type, obj)
1666 unlink(obj, lstatobj)
1667 except (OSError, IOError), e:
1668 if e.errno not in ignored_unlink_errnos:
1671 show_unmerge("<<<", "", file_type, obj)
1672 elif pkgfiles[objkey][0] == "fif":
1673 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
1674 show_unmerge("---", "!fif", file_type, obj)
1676 show_unmerge("---", "", file_type, obj)
1677 elif pkgfiles[objkey][0] == "dev":
1678 show_unmerge("---", "", file_type, obj)
1686 lstatobj = os.lstat(obj)
1687 if lstatobj.st_flags != 0:
1688 bsd_chflags.lchflags(obj, 0)
1689 parent_name = os.path.dirname(obj)
1690 # Use normal stat/chflags for the parent since we want to
1691 # follow any symlinks to the real parent directory.
1692 pflags = os.stat(parent_name).st_flags
1694 bsd_chflags.chflags(parent_name, 0)
1698 if bsd_chflags and pflags != 0:
1699 # Restore the parent flags we saved before unlinking
1700 bsd_chflags.chflags(parent_name, pflags)
1701 show_unmerge("<<<", "", "dir", obj)
1702 except EnvironmentError, e:
1703 if e.errno not in ignored_rmdir_errnos:
1705 if e.errno != errno.ENOENT:
1706 show_unmerge("---", "!empty", "dir", obj)
1709 # Remove stale entries from config memory.
1711 for filename in stale_confmem:
1712 del cfgfiledict[filename]
1713 writedict(cfgfiledict, conf_mem_file)
1715 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1716 self.vartree.zap(self.mycpv)
1718 def isowner(self, filename, destroot):
1720 Check if a file belongs to this package. This may
1721 result in a stat call for the parent directory of
1722 every installed file, since the inode numbers are
1723 used to work around the problem of ambiguous paths
1724 caused by symlinked directories. The results of
1725 stat calls are cached to optimize multiple calls
1734 1. True if this package owns the file.
1735 2. False if this package does not own the file.
1737 return bool(self._match_contents(filename, destroot))
1739 def _match_contents(self, filename, destroot):
1741 The matching contents entry is returned, which is useful
1742 since the path may differ from the one given by the caller,
1746 @return: the contents entry corresponding to the given path, or False
1747 if the file is not owned by this package.
1750 destfile = normalize_path(
1751 os.path.join(destroot, filename.lstrip(os.path.sep)))
1753 pkgfiles = self.getcontents()
1754 if pkgfiles and destfile in pkgfiles:
1757 basename = os.path.basename(destfile)
1758 if self._contents_basenames is None:
1759 self._contents_basenames = set(
1760 os.path.basename(x) for x in pkgfiles)
1761 if basename not in self._contents_basenames:
1762 # This is a shortcut that, in most cases, allows us to
1763 # eliminate this package as an owner without the need
1764 # to examine inode numbers of parent directories.
1767 # Use stat rather than lstat since we want to follow
1768 # any symlinks to the real parent directory.
1769 parent_path = os.path.dirname(destfile)
1771 parent_stat = os.stat(parent_path)
1772 except EnvironmentError, e:
1773 if e.errno != errno.ENOENT:
1777 if self._contents_inodes is None:
1778 self._contents_inodes = {}
1779 parent_paths = set()
1781 p_path = os.path.dirname(x)
1782 if p_path in parent_paths:
1784 parent_paths.add(p_path)
1790 inode_key = (s.st_dev, s.st_ino)
1791 # Use lists of paths in case multiple
1792 # paths reference the same inode.
1793 p_path_list = self._contents_inodes.get(inode_key)
1794 if p_path_list is None:
1796 self._contents_inodes[inode_key] = p_path_list
1797 if p_path not in p_path_list:
1798 p_path_list.append(p_path)
1799 p_path_list = self._contents_inodes.get(
1800 (parent_stat.st_dev, parent_stat.st_ino))
1802 for p_path in p_path_list:
1803 x = os.path.join(p_path, basename)
1809 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
1810 collision_ignore = set([normalize_path(myignore) for myignore in \
1811 shlex.split(self.settings.get("COLLISION_IGNORE", ""))])
1813 showMessage = self._display_merge
1814 scheduler = self._scheduler
1817 destroot = normalize_path(destroot).rstrip(os.path.sep) + \
1819 showMessage("%s checking %d files for package collisions\n" % \
1820 (green("*"), len(mycontents)))
1821 for i, f in enumerate(mycontents):
1822 if i % 1000 == 0 and i != 0:
1823 showMessage("%d files checked ...\n" % i)
1825 if scheduler is not None and \
1826 0 == i % self._file_merge_yield_interval:
1827 scheduler.scheduleYield()
1829 dest_path = normalize_path(
1830 os.path.join(destroot, f.lstrip(os.path.sep)))
1832 dest_lstat = os.lstat(dest_path)
1833 except EnvironmentError, e:
1834 if e.errno == errno.ENOENT:
1837 elif e.errno == errno.ENOTDIR:
1839 # A non-directory is in a location where this package
1840 # expects to have a directory.
1842 parent_path = dest_path
1843 while len(parent_path) > len(destroot):
1844 parent_path = os.path.dirname(parent_path)
1846 dest_lstat = os.lstat(parent_path)
1848 except EnvironmentError, e:
1849 if e.errno != errno.ENOTDIR:
1853 raise AssertionError(
1854 "unable to find non-directory " + \
1855 "parent for '%s'" % dest_path)
1856 dest_path = parent_path
1857 f = os.path.sep + dest_path[len(destroot):]
1866 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
1867 for ver in mypkglist:
1868 if ver.isowner(f, destroot):
1871 if not isowned and self.isprotected(full_path):
1875 if collision_ignore:
1876 if f in collision_ignore:
1879 for myignore in collision_ignore:
1880 if f.startswith(myignore + os.path.sep):
1884 collisions.append(f)
1887 def _lstat_inode_map(self, path_iter):
1889 Use lstat to create a map of the form:
1890 {(st_dev, st_ino) : set([path1, path2, ...])}
1891 Multiple paths may reference the same inode due to hardlinks.
1892 All lstat() calls are relative to self.myroot.
1897 path = os.path.join(root, f.lstrip(os.sep))
1901 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1905 key = (st.st_dev, st.st_ino)
1906 paths = inode_map.get(key)
1909 inode_map[key] = paths
1913 def _security_check(self, installed_instances):
1914 if not installed_instances:
1917 showMessage = self._display_merge
1918 scheduler = self._scheduler
1921 for dblnk in installed_instances:
1922 file_paths.update(dblnk.getcontents())
1925 for i, path in enumerate(file_paths):
1927 if scheduler is not None and \
1928 0 == i % self._file_merge_yield_interval:
1929 scheduler.scheduleYield()
1934 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1938 if not stat.S_ISREG(s.st_mode):
1940 path = os.path.realpath(path)
1941 if path in real_paths:
1943 real_paths.add(path)
1944 if s.st_nlink > 1 and \
1945 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
1946 k = (s.st_dev, s.st_ino)
1947 inode_map.setdefault(k, []).append((path, s))
1948 suspicious_hardlinks = []
1949 for path_list in inode_map.itervalues():
1950 path, s = path_list[0]
1951 if len(path_list) == s.st_nlink:
1952 # All hardlinks seem to be owned by this package.
1954 suspicious_hardlinks.append(path_list)
1955 if not suspicious_hardlinks:
1959 msg.append("suid/sgid file(s) " + \
1960 "with suspicious hardlink(s):")
1962 for path_list in suspicious_hardlinks:
1963 for path, s in path_list:
1964 msg.append("\t%s" % path)
1966 msg.append("See the Gentoo Security Handbook " + \
1967 "guide for advice on how to proceed.")
1969 self._eerror("preinst", msg)
1973 def _eerror(self, phase, lines):
1974 from portage.elog.messages import eerror as _eerror
1975 if self._scheduler is None:
1977 _eerror(l, phase=phase, key=self.settings.mycpv)
1979 self._scheduler.dblinkElog(self,
1980 phase, _eerror, lines)
1982 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
1983 mydbapi=None, prev_mtimes=None):
1986 This function does the following:
1988 calls self._preserve_libs if FEATURES=preserve-libs
1989 calls self._collision_protect if FEATURES=collision-protect
1990 calls doebuild(mydo=pkg_preinst)
1991 Merges the package to the livefs
1992 unmerges old version (if required)
1993 calls doebuild(mydo=pkg_postinst)
1997 @param srcroot: Typically this is ${D}
1998 @type srcroot: String (Path)
1999 @param destroot: Path to merge to (usually ${ROOT})
2000 @type destroot: String (Path)
2001 @param inforoot: root of the vardb entry ?
2002 @type inforoot: String (Path)
2003 @param myebuild: path to the ebuild that we are processing
2004 @type myebuild: String (Path)
2005 @param mydbapi: dbapi which is handed to doebuild.
2006 @type mydbapi: portdbapi instance
2007 @param prev_mtimes: { Filename:mtime } mapping for env_update
2008 @type prev_mtimes: Dictionary
2014 secondhand is a list of symlinks that have been skipped due to their target
2015 not existing; we will merge these symlinks at a later time.
2018 showMessage = self._display_merge
2019 scheduler = self._scheduler
2021 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2022 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
2024 if not os.path.isdir(srcroot):
2025 showMessage("!!! Directory Not Found: D='%s'\n" % srcroot,
2026 level=logging.ERROR, noiselevel=-1)
2029 inforoot_slot_file = os.path.join(inforoot, "SLOT")
2032 f = open(inforoot_slot_file)
2034 slot = f.read().strip()
2037 except EnvironmentError, e:
2038 if e.errno != errno.ENOENT:
2046 self._eerror("preinst", lines)
2048 if slot != self.settings["SLOT"]:
2049 showMessage("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2050 (self.settings["SLOT"], slot), level=logging.WARN)
2052 if not os.path.exists(self.dbcatdir):
2053 os.makedirs(self.dbcatdir)
2056 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2057 otherversions.append(v.split("/")[1])
2059 # filter any old-style virtual matches
2060 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2061 "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2062 if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2064 if self.mycpv not in slot_matches and \
2065 self.vartree.dbapi.cpv_exists(self.mycpv):
2066 # handle multislot or unapplied slotmove
2067 slot_matches.append(self.mycpv)
2070 from portage import config
2071 for cur_cpv in slot_matches:
2072 # Clone the config in case one of these has to be unmerged since
2073 # we need it to have private ${T} etc... for things like elog.
2074 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2075 self.vartree.root, config(clone=self.settings),
2076 vartree=self.vartree, treetype="vartree",
2077 scheduler=self._scheduler))
2079 retval = self._security_check(others_in_slot)
2084 # Used by self.isprotected().
2087 for dblnk in others_in_slot:
2088 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2089 if cur_counter > max_counter:
2090 max_counter = cur_counter
2092 self._installed_instance = max_dblnk
2096 paths_with_newlines = []
2097 srcroot_len = len(srcroot)
2100 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2102 file_path = os.path.join(parent, f)
2103 relative_path = file_path[srcroot_len:]
2105 if "\n" in relative_path:
2106 paths_with_newlines.append(relative_path)
2108 file_mode = os.lstat(file_path).st_mode
2109 if stat.S_ISREG(file_mode):
2110 myfilelist.append(relative_path)
2111 elif stat.S_ISLNK(file_mode):
2112 # Note: os.walk puts symlinks to directories in the "dirs"
2113 # list and it does not traverse them since that could lead
2114 # to an infinite recursion loop.
2115 mylinklist.append(relative_path)
2117 if paths_with_newlines:
2119 msg.append("This package installs one or more files containing")
2120 msg.append("a newline (\\n) character:")
2122 paths_with_newlines.sort()
2123 for f in paths_with_newlines:
2124 msg.append("\t/%s" % (f.replace("\n", "\\n")))
2126 msg.append("package %s NOT merged" % self.mycpv)
2131 # If there are no files to merge, and an installed package in the same
2132 # slot has files, it probably means that something went wrong.
2133 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2134 not myfilelist and not mylinklist and others_in_slot:
2135 installed_files = None
2136 for other_dblink in others_in_slot:
2137 installed_files = other_dblink.getcontents()
2138 if not installed_files:
2140 from textwrap import wrap
2147 msg.extend(wrap(("The '%s' package will not install " + \
2148 "any files, but the currently installed '%s'" + \
2149 " package has the following files: ") % d, wrap_width))
2151 msg.extend(sorted(installed_files))
2153 msg.append("package %s NOT merged" % self.mycpv)
2156 ("Manually run `emerge --unmerge =%s` " % \
2157 other_dblink.mycpv) + "if you really want to " + \
2158 "remove the above files. Set " + \
2159 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2160 "/etc/make.conf if you do not want to " + \
2161 "abort in cases like this.",
2167 # check for package collisions
2169 if self._blockers is not None:
2170 # This is only supposed to be called when
2171 # the vdb is locked, like it is here.
2172 blockers = self._blockers()
2173 if blockers is None:
2176 self._collision_protect(srcroot, destroot,
2177 others_in_slot + blockers, myfilelist + mylinklist)
2179 # Make sure the ebuild environment is initialized and that ${T}/elog
2180 # exists for logging of collision-protect eerror messages.
2181 if myebuild is None:
2182 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2183 doebuild_environment(myebuild, "preinst", destroot,
2184 self.settings, 0, 0, mydbapi)
2185 prepare_build_dirs(destroot, self.settings, cleanup)
2188 collision_protect = "collision-protect" in self.settings.features
2189 protect_owned = "protect-owned" in self.settings.features
2190 msg = "This package will overwrite one or more files that" + \
2191 " may belong to other packages (see list below)."
2192 if not (collision_protect or protect_owned):
2193 msg += " Add either \"collision-protect\" or" + \
2194 " \"protect-owned\" to FEATURES in" + \
2195 " make.conf if you would like the merge to abort" + \
2196 " in cases like this. See the make.conf man page for" + \
2197 " more information about these features."
2198 if self.settings.get("PORTAGE_QUIET") != "1":
2199 msg += " You can use a command such as" + \
2200 " `portageq owners / <filename>` to identify the" + \
2201 " installed package that owns a file. If portageq" + \
2202 " reports that only one package owns a file then do NOT" + \
2203 " file a bug report. A bug report is only useful if it" + \
2204 " identifies at least two or more packages that are known" + \
2205 " to install the same file(s)." + \
2206 " If a collision occurs and you" + \
2207 " can not explain where the file came from then you" + \
2208 " should simply ignore the collision since there is not" + \
2209 " enough information to determine if a real problem" + \
2210 " exists. Please do NOT file a bug report at" + \
2211 " http://bugs.gentoo.org unless you report exactly which" + \
2212 " two packages install the same file(s). Once again," + \
2213 " please do NOT file a bug report unless you have" + \
2214 " completely understood the above message."
2216 self.settings["EBUILD_PHASE"] = "preinst"
2217 from textwrap import wrap
2219 if collision_protect:
2221 msg.append("package %s NOT merged" % self.settings.mycpv)
2223 msg.append("Detected file collision(s):")
2226 for f in collisions:
2227 msg.append("\t%s" % \
2228 os.path.join(destroot, f.lstrip(os.path.sep)))
2234 msg.append("Searching all installed" + \
2235 " packages for file collisions...")
2237 msg.append("Press Ctrl-C to Stop")
2241 owners = self.vartree.dbapi._owners.get_owners(collisions)
2242 self.vartree.dbapi.flush_cache()
2244 for pkg, owned_files in owners.iteritems():
2247 msg.append("%s" % cpv)
2248 for f in sorted(owned_files):
2249 msg.append("\t%s" % os.path.join(destroot,
2250 f.lstrip(os.path.sep)))
2255 eerror(["None of the installed" + \
2256 " packages claim the file(s).", ""])
2258 # The explanation about the collision and how to solve
2259 # it may not be visible via a scrollback buffer, especially
2260 # if the number of file collisions is large. Therefore,
2261 # show a summary at the end.
2262 if collision_protect:
2263 msg = "Package '%s' NOT merged due to file collisions." % \
2265 elif protect_owned and owners:
2266 msg = "Package '%s' NOT merged due to file collisions." % \
2269 msg = "Package '%s' merged despite file collisions." % \
2271 msg += " If necessary, refer to your elog " + \
2272 "messages for the whole content of the above message."
2273 eerror(wrap(msg, 70))
2275 if collision_protect or (protect_owned and owners):
2278 # The merge process may move files out of the image directory,
2279 # which causes invalidation of the .installed flag.
2281 os.unlink(os.path.join(
2282 os.path.dirname(normalize_path(srcroot)), ".installed"))
2284 if e.errno != errno.ENOENT:
2288 self.dbdir = self.dbtmpdir
2290 ensure_dirs(self.dbtmpdir)
2292 # run preinst script
2293 if scheduler is None:
2294 showMessage(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2295 a = doebuild(myebuild, "preinst", destroot, self.settings,
2296 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2297 vartree=self.vartree)
2299 a = scheduler.dblinkEbuildPhase(
2300 self, mydbapi, myebuild, "preinst")
2302 # XXX: Decide how to handle failures here.
2304 showMessage("!!! FAILED preinst: "+str(a)+"\n",
2305 level=logging.ERROR, noiselevel=-1)
2308 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2309 for x in os.listdir(inforoot):
2310 self.copyfile(inforoot+"/"+x)
2312 # write local package counter for recording
2313 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2314 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2315 lcfile.write(str(counter))
2318 # open CONTENTS file (possibly overwriting old one) for recording
2319 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2321 self.updateprotect()
2323 #if we have a file containing previously-merged config file md5sums, grab it.
2324 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2325 cfgfiledict = grabdict(conf_mem_file)
2326 cfgfiledict_orig = cfgfiledict.copy()
2327 if "NOCONFMEM" in self.settings:
2328 cfgfiledict["IGNORE"]=1
2330 cfgfiledict["IGNORE"]=0
2332 # Always behave like --noconfmem is enabled for downgrades
2333 # so that people who don't know about this option are less
2334 # likely to get confused when doing upgrade/downgrade cycles.
2335 pv_split = catpkgsplit(self.mycpv)[1:]
2336 for other in others_in_slot:
2337 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
2338 cfgfiledict["IGNORE"] = 1
2341 # Don't bump mtimes on merge since some application require
2342 # preservation of timestamps. This means that the unmerge phase must
2343 # check to see if file belongs to an installed instance in the same
2347 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2348 prevmask = os.umask(0)
2351 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2352 # "second hand" of symlinks to merge later
2353 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
2356 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
2357 # broken symlinks. We'll merge them too.
2359 while len(secondhand) and len(secondhand)!=lastlen:
2360 # clear the thirdhand. Anything from our second hand that
2361 # couldn't get merged will be added to thirdhand.
2364 if self.mergeme(srcroot, destroot, outfile, thirdhand,
2365 secondhand, cfgfiledict, mymtime):
2369 lastlen = len(secondhand)
2371 # our thirdhand now becomes our secondhand. It's ok to throw
2372 # away secondhand since thirdhand contains all the stuff that
2373 # couldn't be merged.
2374 secondhand = thirdhand
2377 # force merge of remaining symlinks (broken or circular; oh well)
2378 if self.mergeme(srcroot, destroot, outfile, None,
2379 secondhand, cfgfiledict, mymtime):
2381 self._md5_merge_map.clear()
2386 #if we opened it, close it
2390 # write out our collection of md5sums
2391 cfgfiledict.pop("IGNORE", None)
2392 if cfgfiledict != cfgfiledict_orig:
2393 ensure_dirs(os.path.dirname(conf_mem_file),
2394 gid=portage_gid, mode=02750, mask=02)
2395 writedict(cfgfiledict, conf_mem_file)
2397 # These caches are populated during collision-protect and the data
2398 # they contain is now invalid. It's very important to invalidate
2399 # the contents_inodes cache so that FEATURES=unmerge-orphans
2400 # doesn't unmerge anything that belongs to this package that has
2402 for dblnk in others_in_slot:
2403 dblnk._clear_contents_cache()
2404 self._clear_contents_cache()
2406 # If portage is reinstalling itself, remove the old
2407 # version now since we want to use the temporary
2408 # PORTAGE_BIN_PATH that will be removed when we return.
2409 reinstall_self = False
2410 if self.myroot == "/" and \
2411 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
2412 reinstall_self = True
2414 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
2415 others_in_slot.append(self) # self has just been merged
2416 for dblnk in list(others_in_slot):
2419 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
2421 showMessage(">>> Safely unmerging already-installed instance...\n")
2422 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
2423 dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
2424 others_in_slot=others_in_slot)
2425 # TODO: Check status and abort if necessary.
2427 showMessage(">>> Original instance of package unmerged safely.\n")
2429 if len(others_in_slot) > 1:
2430 from portage.output import colorize
2431 showMessage(colorize("WARN", "WARNING:")
2432 + " AUTOCLEAN is disabled. This can cause serious"
2433 + " problems due to overlapping packages.\n",
2434 level=logging.WARN, noiselevel=-1)
2436 # We hold both directory locks.
2437 self.dbdir = self.dbpkgdir
2439 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
2441 # Check for file collisions with blocking packages
2442 # and remove any colliding files from their CONTENTS
2443 # since they now belong to this package.
2444 self._clear_contents_cache()
2445 contents = self.getcontents()
2446 destroot_len = len(destroot) - 1
2447 for blocker in blockers:
2448 self.vartree.dbapi.removeFromContents(blocker, iter(contents),
2449 relative_paths=False)
2451 self.vartree.dbapi._add(self)
2452 contents = self.getcontents()
2455 self.settings["PORTAGE_UPDATE_ENV"] = \
2456 os.path.join(self.dbpkgdir, "environment.bz2")
2457 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
2459 if scheduler is None:
2460 a = doebuild(myebuild, "postinst", destroot, self.settings,
2461 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2462 vartree=self.vartree)
2464 showMessage(">>> %s %s\n" % (self.mycpv, "merged."))
2466 a = scheduler.dblinkEbuildPhase(
2467 self, mydbapi, myebuild, "postinst")
2469 self.settings.pop("PORTAGE_UPDATE_ENV", None)
2471 # XXX: Decide how to handle failures here.
2473 showMessage("!!! FAILED postinst: "+str(a)+"\n",
2474 level=logging.ERROR, noiselevel=-1)
2478 for v in otherversions:
2479 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
2482 #update environment settings, library paths. DO NOT change symlinks.
2483 env_update(makelinks=(not downgrade),
2484 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
2485 contents=contents, env=self.settings.environ(),
2486 writemsg_level=self._display_merge)
2490 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
2493 This function handles actual merging of the package contents to the livefs.
2494 It also handles config protection.
2496 @param srcroot: Where are we copying files from (usually ${D})
2497 @type srcroot: String (Path)
2498 @param destroot: Typically ${ROOT}
2499 @type destroot: String (Path)
2500 @param outfile: File to log operations to
2501 @type outfile: File Object
2502 @param secondhand: A set of items to merge in pass two (usually
2503 or symlinks that point to non-existing files that may get merged later)
2504 @type secondhand: List
2505 @param stufftomerge: Either a diretory to merge, or a list of items.
2506 @type stufftomerge: String or List
2507 @param cfgfiledict: { File:mtime } mapping for config_protected files
2508 @type cfgfiledict: Dictionary
2509 @param thismtime: The current time (typically long(time.time())
2510 @type thismtime: Long
2511 @rtype: None or Boolean
2518 showMessage = self._display_merge
2519 writemsg = self._display_merge
2520 scheduler = self._scheduler
2522 from os.path import sep, join
2523 srcroot = normalize_path(srcroot).rstrip(sep) + sep
2524 destroot = normalize_path(destroot).rstrip(sep) + sep
2526 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
2527 if isinstance(stufftomerge, basestring):
2528 #A directory is specified. Figure out protection paths, listdir() it and process it.
2529 mergelist = os.listdir(join(srcroot, stufftomerge))
2530 offset = stufftomerge
2532 mergelist = stufftomerge
2535 for i, x in enumerate(mergelist):
2537 if scheduler is not None and \
2538 0 == i % self._file_merge_yield_interval:
2539 scheduler.scheduleYield()
2541 mysrc = join(srcroot, offset, x)
2542 mydest = join(destroot, offset, x)
2543 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2544 myrealdest = join(sep, offset, x)
2545 # stat file once, test using S_* macros many times (faster that way)
2547 mystat = os.lstat(mysrc)
2550 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2551 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
2552 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
2553 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2554 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
2555 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
2558 mymode = mystat[stat.ST_MODE]
2559 # handy variables; mydest is the target object on the live filesystems;
2560 # mysrc is the source object in the temporary install dir
2562 mydstat = os.lstat(mydest)
2563 mydmode = mydstat.st_mode
2565 if e.errno != errno.ENOENT:
2568 #dest file doesn't exist
2572 if stat.S_ISLNK(mymode):
2573 # we are merging a symbolic link
2574 myabsto = abssymlink(mysrc)
2575 if myabsto.startswith(srcroot):
2576 myabsto = myabsto[len(srcroot):]
2577 myabsto = myabsto.lstrip(sep)
2578 myto = os.readlink(mysrc)
2579 if self.settings and self.settings["D"]:
2580 if myto.startswith(self.settings["D"]):
2581 myto = myto[len(self.settings["D"]):]
2582 # myrealto contains the path of the real file to which this symlink points.
2583 # we can simply test for existence of this file to see if the target has been merged yet
2584 myrealto = normalize_path(os.path.join(destroot, myabsto))
2587 if not stat.S_ISLNK(mydmode):
2588 if stat.S_ISDIR(mydmode):
2589 # directory in the way: we can't merge a symlink over a directory
2590 # we won't merge this, continue with next file...
2593 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
2594 # Kill file blocking installation of symlink to dir #71787
2596 elif self.isprotected(mydest):
2597 # Use md5 of the target in ${D} if it exists...
2599 newmd5 = perform_md5(join(srcroot, myabsto))
2600 except FileNotFound:
2601 # Maybe the target is merged already.
2603 newmd5 = perform_md5(myrealto)
2604 except FileNotFound:
2606 mydest = new_protect_filename(mydest, newmd5=newmd5)
2608 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
2609 if (secondhand != None) and (not os.path.exists(myrealto)):
2610 # either the target directory doesn't exist yet or the target file doesn't exist -- or
2611 # the target is a broken symlink. We will add this file to our "second hand" and merge
2613 secondhand.append(mysrc[len(srcroot):])
2615 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
2616 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2618 showMessage(">>> %s -> %s\n" % (mydest, myto))
2619 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
2621 showMessage("!!! Failed to move file.\n",
2622 level=logging.ERROR, noiselevel=-1)
2623 showMessage("!!! %s -> %s\n" % (mydest, myto),
2624 level=logging.ERROR, noiselevel=-1)
2626 elif stat.S_ISDIR(mymode):
2627 # we are merging a directory
2629 # destination exists
2632 # Save then clear flags on dest.
2633 dflags = mydstat.st_flags
2635 bsd_chflags.lchflags(mydest, 0)
2637 if not os.access(mydest, os.W_OK):
2638 pkgstuff = pkgsplit(self.pkg)
2639 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
2640 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
2641 writemsg("!!! You may start the merge process again by using ebuild:\n")
2642 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
2643 writemsg("!!! And finish by running this: env-update\n\n")
2646 if stat.S_ISDIR(mydmode) or \
2647 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
2648 # a symlink to an existing directory will work for us; keep it:
2649 showMessage("--- %s/\n" % mydest)
2651 bsd_chflags.lchflags(mydest, dflags)
2653 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
2654 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
2656 showMessage("bak %s %s.backup\n" % (mydest, mydest),
2657 level=logging.ERROR, noiselevel=-1)
2658 #now create our directory
2659 if self.settings.selinux_enabled():
2661 sid = selinux.get_sid(mysrc)
2662 selinux.secure_mkdir(mydest,sid)
2666 bsd_chflags.lchflags(mydest, dflags)
2667 os.chmod(mydest, mystat[0])
2668 os.chown(mydest, mystat[4], mystat[5])
2669 showMessage(">>> %s/\n" % mydest)
2671 #destination doesn't exist
2672 if self.settings.selinux_enabled():
2674 sid = selinux.get_sid(mysrc)
2675 selinux.secure_mkdir(mydest, sid)
2678 os.chmod(mydest, mystat[0])
2679 os.chown(mydest, mystat[4], mystat[5])
2680 showMessage(">>> %s/\n" % mydest)
2681 outfile.write("dir "+myrealdest+"\n")
2682 # recurse and merge this directory
2683 if self.mergeme(srcroot, destroot, outfile, secondhand,
2684 join(offset, x), cfgfiledict, thismtime):
2686 elif stat.S_ISREG(mymode):
2687 # we are merging a regular file
2688 mymd5 = perform_md5(mysrc, calc_prelink=1)
2689 # calculate config file protection stuff
2690 mydestdir = os.path.dirname(mydest)
2695 # destination file exists
2696 if stat.S_ISDIR(mydmode):
2697 # install of destination is blocked by an existing directory with the same name
2699 showMessage("!!! %s\n" % mydest,
2700 level=logging.ERROR, noiselevel=-1)
2701 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
2703 # install of destination is blocked by an existing regular file,
2704 # or by a symlink to an existing regular file;
2705 # now, config file management may come into play.
2706 # we only need to tweak mydest if cfg file management is in play.
2707 if self.isprotected(mydest):
2708 # we have a protection path; enable config file management.
2709 destmd5 = perform_md5(mydest, calc_prelink=1)
2710 if mymd5 == destmd5:
2711 #file already in place; simply update mtimes of destination
2714 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
2715 """ An identical update has previously been
2716 merged. Skip it unless the user has chosen
2718 moveme = cfgfiledict["IGNORE"]
2719 cfgprot = cfgfiledict["IGNORE"]
2722 mymtime = long(mystat.st_mtime)
2727 # Merging a new file, so update confmem.
2728 cfgfiledict[myrealdest] = [mymd5]
2729 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
2730 """A previously remembered update has been
2731 accepted, so it is removed from confmem."""
2732 del cfgfiledict[myrealdest]
2734 mydest = new_protect_filename(mydest, newmd5=mymd5)
2736 # whether config protection or not, we merge the new file the
2737 # same way. Unless moveme=0 (blocking directory)
2739 hardlink_key = (mymd5, mystat.st_size,
2740 mystat.st_mode, mystat.st_uid, mystat.st_gid)
2741 hardlink_candidates = self._md5_merge_map.get(hardlink_key)
2742 if hardlink_candidates is None:
2743 hardlink_candidates = []
2744 self._md5_merge_map[hardlink_key] = hardlink_candidates
2745 mymtime = movefile(mysrc, mydest, newmtime=thismtime,
2746 sstat=mystat, mysettings=self.settings,
2747 hardlink_candidates=hardlink_candidates)
2750 hardlink_candidates.append(mydest)
2754 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
2755 showMessage("%s %s\n" % (zing,mydest))
2757 # we are merging a fifo or device node
2760 # destination doesn't exist
2761 if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
2765 if stat.S_ISFIFO(mymode):
2766 outfile.write("fif %s\n" % myrealdest)
2768 outfile.write("dev %s\n" % myrealdest)
2769 showMessage(zing + " " + mydest + "\n")
2771 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2772 mydbapi=None, prev_mtimes=None):
2774 If portage is reinstalling itself, create temporary
2775 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
2776 to avoid relying on the new versions which may be
2777 incompatible. Register an atexit hook to clean up the
2778 temporary directories. Pre-load elog modules here since
2779 we won't be able to later if they get unmerged (happens
2780 when namespace changes).
2782 if self.vartree.dbapi._categories is not None:
2783 self.vartree.dbapi._categories = None
2784 if self.myroot == "/" and \
2785 match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]) and \
2786 not self.vartree.dbapi.cpv_exists(self.mycpv):
2787 settings = self.settings
2788 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
2789 from tempfile import mkdtemp
2791 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
2792 # /tmp, it can't be mounted with the "noexec" option.
2793 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
2794 settings["PORTAGE_TMPDIR"])
2795 from portage.process import atexit_register
2796 atexit_register(shutil.rmtree, base_path_tmp)
2798 for subdir in "bin", "pym":
2799 var_name = "PORTAGE_%s_PATH" % subdir.upper()
2800 var_orig = settings[var_name]
2801 var_new = os.path.join(base_path_tmp, subdir)
2802 settings[var_name] = var_new
2803 settings.backup_changes(var_name)
2804 shutil.copytree(var_orig, var_new, symlinks=True)
2805 os.chmod(var_new, dir_perms)
2806 os.chmod(base_path_tmp, dir_perms)
2807 # This serves so pre-load the modules.
2808 elog_process(self.mycpv, self.settings,
2809 phasefilter=filter_mergephases)
2811 return self._merge(mergeroot, inforoot,
2812 myroot, myebuild=myebuild, cleanup=cleanup,
2813 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2815 def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2816 mydbapi=None, prev_mtimes=None):
2820 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
2821 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2823 # Process ebuild logfiles
2824 elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
2825 if retval == os.EX_OK and "noclean" not in self.settings.features:
2826 if myebuild is None:
2827 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2829 if self._scheduler is None:
2830 doebuild(myebuild, "clean", myroot,
2831 self.settings, tree=self.treetype,
2832 mydbapi=mydbapi, vartree=self.vartree)
2834 self._scheduler.dblinkEbuildPhase(
2835 self, mydbapi, myebuild, "clean")
2840 def getstring(self,name):
2841 "returns contents of a file with whitespace converted to spaces"
2842 if not os.path.exists(self.dbdir+"/"+name):
2844 myfile = open(self.dbdir+"/"+name,"r")
2845 mydata = myfile.read().split()
2847 return " ".join(mydata)
2849 def copyfile(self,fname):
2850 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
2852 def getfile(self,fname):
2853 if not os.path.exists(self.dbdir+"/"+fname):
2855 myfile = open(self.dbdir+"/"+fname,"r")
2856 mydata = myfile.read()
2860 def setfile(self,fname,data):
2861 write_atomic(os.path.join(self.dbdir, fname), data)
2863 def getelements(self,ename):
2864 if not os.path.exists(self.dbdir+"/"+ename):
2866 myelement = open(self.dbdir+"/"+ename,"r")
2867 mylines = myelement.readlines()
2870 for y in x[:-1].split():
2875 def setelements(self,mylist,ename):
2876 myelement = open(self.dbdir+"/"+ename,"w")
2878 myelement.write(x+"\n")
2881 def isregular(self):
2882 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
2883 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
2885 def write_contents(contents, root, f):
2887 Write contents to any file like object. The file will be left open.
2889 root_len = len(root) - 1
2890 for filename in sorted(contents):
2891 entry_data = contents[filename]
2892 entry_type = entry_data[0]
2893 relative_filename = filename[root_len:]
2894 if entry_type == "obj":
2895 entry_type, mtime, md5sum = entry_data
2896 line = "%s %s %s %s\n" % \
2897 (entry_type, relative_filename, md5sum, mtime)
2898 elif entry_type == "sym":
2899 entry_type, mtime, link = entry_data
2900 line = "%s %s -> %s %s\n" % \
2901 (entry_type, relative_filename, link, mtime)
2902 else: # dir, dev, fif
2903 line = "%s %s\n" % (entry_type, relative_filename)
2906 def tar_contents(contents, root, tar, protect=None, onProgress=None):
2907 from portage.util import normalize_path
2909 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
2911 maxval = len(contents)
2914 onProgress(maxval, 0)
2915 paths = contents.keys()
2920 lst = os.lstat(path)
2922 if e.errno != errno.ENOENT:
2926 onProgress(maxval, curval)
2928 contents_type = contents[path][0]
2929 if path.startswith(root):
2930 arcname = path[len(root):]
2932 raise ValueError("invalid root argument: '%s'" % root)
2934 if 'dir' == contents_type and \
2935 not stat.S_ISDIR(lst.st_mode) and \
2936 os.path.isdir(live_path):
2937 # Even though this was a directory in the original ${D}, it exists
2938 # as a symlink to a directory in the live filesystem. It must be
2939 # recorded as a real directory in the tar file to ensure that tar
2940 # can properly extract it's children.
2941 live_path = os.path.realpath(live_path)
2942 tarinfo = tar.gettarinfo(live_path, arcname)
2943 # store numbers instead of real names like tar's --numeric-owner
2944 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
2945 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
2947 if stat.S_ISREG(lst.st_mode):
2948 # break hardlinks due to bug #185305
2949 tarinfo.type = tarfile.REGTYPE
2950 if protect and protect(path):
2951 # Create an empty file as a place holder in order to avoid
2952 # potential collision-protect issues.
2954 tar.addfile(tarinfo)
2958 tar.addfile(tarinfo, f)
2962 tar.addfile(tarinfo)
2964 onProgress(maxval, curval)