1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 from portage.checksum import perform_md5
6 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, PORTAGE_BIN_PATH, \
8 from portage.data import portage_gid, portage_uid, secpass
9 from portage.dbapi import dbapi
10 from portage.dep import dep_getslot, use_reduce, paren_reduce, isvalidatom, \
11 isjustname, dep_getkey, match_from_list
12 from portage.exception import InvalidAtom, InvalidData, InvalidPackageName, \
13 FileNotFound, PermissionDenied, UnsupportedAPIException
14 from portage.locks import lockdir, unlockdir
15 from portage.output import bold, red, green
16 from portage.update import fixdbentries
17 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
18 writemsg, writemsg_stdout, write_atomic, atomic_ofstream, writedict, \
19 grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
20 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
22 from portage import listdir, dep_expand, flatten, key_expand, \
23 doebuild_environment, doebuild, env_update, prepare_build_dirs, \
24 abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
26 from portage.elog import elog_process
27 from portage.elog.messages import ewarn
28 from portage.elog.filtering import filter_mergephases, filter_unmergephases
30 import os, re, sys, stat, errno, commands, copy, time, subprocess
31 from itertools import izip
36 import pickle as cPickle
38 class PreservedLibsRegistry(object):
39 """ This class handles the tracking of preserved library objects """
40 def __init__(self, filename, autocommit=True):
41 """ @param filename: absolute path for saving the preserved libs records
42 @type filename: String
43 @param autocommit: determines if the file is written after every update
44 @type autocommit: Boolean
46 self._filename = filename
47 self._autocommit = autocommit
51 """ Reload the registry data from file """
53 self._data = cPickle.load(open(self._filename, "r"))
55 if e.errno == errno.ENOENT:
57 elif e.errno == PermissionDenied.errno:
58 raise PermissionDenied(self._filename)
63 """ Store the registry data to file. No need to call this if autocommit
66 f = atomic_ofstream(self._filename)
67 cPickle.dump(self._data, f)
70 def register(self, cpv, slot, counter, paths):
71 """ Register new objects in the registry. If there is a record with the
72 same packagename (internally derived from cpv) and slot it is
73 overwritten with the new data.
74 @param cpv: package instance that owns the objects
75 @type cpv: CPV (as String)
76 @param slot: the value of SLOT of the given package instance
78 @param counter: vdb counter value for the package instace
79 @type counter: Integer
80 @param paths: absolute paths of objects that got preserved during an update
83 cp = "/".join(catpkgsplit(cpv)[:2])
85 if len(paths) == 0 and self._data.has_key(cps) \
86 and self._data[cps][0] == cpv and int(self._data[cps][1]) == int(counter):
89 self._data[cps] = (cpv, counter, paths)
93 def unregister(self, cpv, slot, counter):
94 """ Remove a previous registration of preserved objects for the given package.
95 @param cpv: package instance whose records should be removed
96 @type cpv: CPV (as String)
97 @param slot: the value of SLOT of the given package instance
100 self.register(cpv, slot, counter, [])
102 def pruneNonExisting(self):
103 """ Remove all records for objects that no longer exist on the filesystem. """
104 for cps in self._data.keys():
105 cpv, counter, paths = self._data[cps]
106 paths = [f for f in paths if os.path.exists(f)]
108 self._data[cps] = (cpv, counter, paths)
114 def hasEntries(self):
115 """ Check if this registry contains any records. """
116 return len(self._data) > 0
118 def getPreservedLibs(self):
119 """ Return a mapping of packages->preserved objects.
120 @returns mapping of package instances to preserved objects
121 @rtype Dict cpv->list-of-paths
124 for cps in self._data:
125 rValue[self._data[cps][0]] = self._data[cps][2]
128 class LinkageMap(object):
129 def __init__(self, vardbapi):
130 self._dbapi = vardbapi
132 self._obj_properties = {}
133 self._defpath = getlibpaths()
135 def rebuild(self, include_file=None):
139 for cpv in self._dbapi.cpv_all():
140 lines += self._dbapi.aux_get(cpv, ["NEEDED.ELF.2"])[0].split('\n')
141 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
142 self._dbapi.flush_cache()
145 lines += grabfile(include_file)
147 # have to call scanelf for preserved libs here as they aren't
148 # registered in NEEDED.ELF.2 files
149 if self._dbapi.plib_registry and self._dbapi.plib_registry.getPreservedLibs():
150 args = ["/usr/bin/scanelf", "-yqF", "%a;%F;%S;%r;%n"]
151 for items in self._dbapi.plib_registry.getPreservedLibs().values():
153 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
154 output = [l[3:] for l in proc.communicate()[0].split("\n")]
160 fields = l.strip("\n").split(";")
162 print "Error", fields
163 # insufficient field length
166 obj = os.path.realpath(fields[1])
168 path = fields[3].replace("${ORIGIN}", os.path.dirname(obj)).replace("$ORIGIN", os.path.dirname(obj)).split(":")
169 needed = fields[4].split(",")
171 libs.setdefault(soname, {arch: {"providers": [], "consumers": []}})
172 libs[soname].setdefault(arch, {"providers": [], "consumers": []})
173 libs[soname][arch]["providers"].append(obj)
175 libs.setdefault(x, {arch: {"providers": [], "consumers": []}})
176 libs[x].setdefault(arch, {"providers": [], "consumers": []})
177 libs[x][arch]["consumers"].append(obj)
178 obj_properties[obj] = (arch, needed, path, soname)
181 self._obj_properties = obj_properties
183 def listLibraryObjects(self):
187 for soname in self._libs:
188 for arch in self._libs[soname]:
189 rValue.extend(self._libs[soname][arch]["providers"])
192 def findProviders(self, obj):
196 if obj not in self._obj_properties:
197 obj = os.path.realpath(obj)
198 if obj not in self._obj_properties:
199 raise KeyError("%s not in object list" % obj)
200 arch, needed, path, soname = self._obj_properties[obj]
201 path.extend(self._defpath)
202 path = [os.path.realpath(x) for x in path]
205 if x not in self._libs or arch not in self._libs[x]:
207 for y in self._libs[x][arch]["providers"]:
208 if x[0] == os.sep and os.path.realpath(x) == os.path.realpath(y):
210 elif os.path.realpath(os.path.dirname(y)) in path:
214 def findConsumers(self, obj):
217 if obj not in self._obj_properties:
218 obj = os.path.realpath(obj)
219 if obj not in self._obj_properties:
220 raise KeyError("%s not in object list" % obj)
222 for soname in self._libs:
223 for arch in self._libs[soname]:
224 if obj in self._libs[soname][arch]["providers"]:
225 for x in self._libs[soname][arch]["consumers"]:
226 path = self._obj_properties[x][2]
227 path = [os.path.realpath(y) for y in path+self._defpath]
228 if soname[0] == os.sep and os.path.realpath(soname) == os.path.realpath(obj):
230 elif os.path.realpath(os.path.dirname(obj)) in path:
234 class vardbapi(dbapi):
236 _excluded_dirs = ["CVS", "lost+found"]
237 _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
238 _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
239 "|".join(_excluded_dirs) + r')$')
241 _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
242 _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
244 def __init__(self, root, categories=None, settings=None, vartree=None):
246 The categories parameter is unused since the dbapi class
247 now has a categories property that is generated from the
252 #cache for category directory mtimes
255 #cache for dependency checks
258 #cache for cp_list results
263 from portage import settings
264 self.settings = settings
266 from portage import db
267 vartree = db[root]["vartree"]
268 self.vartree = vartree
269 self._aux_cache_keys = set(
270 ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
271 "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
272 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
273 "repository", "RESTRICT" , "SLOT", "USE"])
274 self._aux_cache = None
275 self._aux_cache_version = "1"
276 self._aux_cache_filename = os.path.join(self.root,
277 CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
278 self._counter_path = os.path.join(root,
279 CACHE_PATH.lstrip(os.path.sep), "counter")
282 self.plib_registry = PreservedLibsRegistry(
283 os.path.join(self.root, PRIVATE_PATH, "preserved_libs_registry"))
284 except PermissionDenied:
285 # apparently this user isn't allowed to access PRIVATE_PATH
286 self.plib_registry = None
288 self.linkmap = LinkageMap(self)
290 def getpath(self, mykey, filename=None):
291 rValue = os.path.join(self.root, VDB_PATH, mykey)
293 rValue = os.path.join(rValue, filename)
296 def cpv_exists(self, mykey):
297 "Tells us whether an actual ebuild exists on disk (no masking)"
298 return os.path.exists(self.getpath(mykey))
300 def cpv_counter(self, mycpv):
301 "This method will grab the COUNTER. Returns a counter value."
303 return long(self.aux_get(mycpv, ["COUNTER"])[0])
304 except (KeyError, ValueError):
306 cdir = self.getpath(mycpv)
307 cpath = self.getpath(mycpv, filename="COUNTER")
309 # We write our new counter value to a new file that gets moved into
310 # place to avoid filesystem corruption on XFS (unexpected reboot.)
312 if os.path.exists(cpath):
313 cfile = open(cpath, "r")
315 counter = long(cfile.readline())
317 print "portage: COUNTER for", mycpv, "was corrupted; resetting to value of 0"
321 elif os.path.exists(cdir):
322 mys = pkgsplit(mycpv)
323 myl = self.match(mys[0], use_cache=0)
327 # Only one package... Counter doesn't matter.
328 write_atomic(cpath, "1")
330 except SystemExit, e:
333 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
335 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
337 writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
338 writemsg("!!! %s\n" % e, noiselevel=-1)
341 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
343 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
345 writemsg("!!! remerge the package.\n", noiselevel=-1)
350 # update new global counter file
351 write_atomic(cpath, str(counter))
354 def cpv_inject(self, mycpv):
355 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
356 os.makedirs(self.getpath(mycpv))
357 counter = self.counter_tick(self.root, mycpv=mycpv)
358 # write local package counter so that emerge clean does the right thing
359 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
361 def isInjected(self, mycpv):
362 if self.cpv_exists(mycpv):
363 if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
365 if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
369 def move_ent(self, mylist):
374 for cp in [origcp, newcp]:
375 if not (isvalidatom(cp) and isjustname(cp)):
376 raise InvalidPackageName(cp)
377 origmatches = self.match(origcp, use_cache=0)
381 for mycpv in origmatches:
382 mycpsplit = catpkgsplit(mycpv)
383 mynewcpv = newcp + "-" + mycpsplit[2]
384 mynewcat = newcp.split("/")[0]
385 if mycpsplit[3] != "r0":
386 mynewcpv += "-" + mycpsplit[3]
387 mycpsplit_new = catpkgsplit(mynewcpv)
388 origpath = self.getpath(mycpv)
389 if not os.path.exists(origpath):
392 if not os.path.exists(self.getpath(mynewcat)):
393 #create the directory
394 os.makedirs(self.getpath(mynewcat))
395 newpath = self.getpath(mynewcpv)
396 if os.path.exists(newpath):
397 #dest already exists; keep this puppy where it is.
399 _movefile(origpath, newpath, mysettings=self.settings)
401 # We need to rename the ebuild now.
402 old_pf = catsplit(mycpv)[1]
403 new_pf = catsplit(mynewcpv)[1]
406 os.rename(os.path.join(newpath, old_pf + ".ebuild"),
407 os.path.join(newpath, new_pf + ".ebuild"))
408 except EnvironmentError, e:
409 if e.errno != errno.ENOENT:
412 write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
413 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
414 fixdbentries([mylist], newpath)
417 def cp_list(self, mycp, use_cache=1):
418 mysplit=catsplit(mycp)
419 if mysplit[0] == '*':
420 mysplit[0] = mysplit[0][1:]
422 mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
425 if use_cache and self.cpcache.has_key(mycp):
426 cpc = self.cpcache[mycp]
429 cat_dir = self.getpath(mysplit[0])
431 dir_list = os.listdir(cat_dir)
432 except EnvironmentError, e:
433 from portage.exception import PermissionDenied
434 if e.errno == PermissionDenied.errno:
435 raise PermissionDenied(cat_dir)
441 if self._excluded_dirs.match(x) is not None:
445 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
448 if ps[0] == mysplit[1]:
449 returnme.append(mysplit[0]+"/"+x)
450 self._cpv_sort_ascending(returnme)
452 self.cpcache[mycp] = [mystat, returnme[:]]
453 elif self.cpcache.has_key(mycp):
454 del self.cpcache[mycp]
457 def cpv_all(self, use_cache=1):
459 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
460 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
461 if self._excluded_dirs.match(x) is not None:
463 if not self._category_re.match(x):
465 for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
466 if self._excluded_dirs.match(y) is not None:
468 subpath = x + "/" + y
469 # -MERGING- should never be a cpv, nor should files.
471 if catpkgsplit(subpath) is None:
472 self.invalidentry(os.path.join(self.root, subpath))
474 except portage.exception.InvalidData:
475 self.invalidentry(os.path.join(self.root, subpath))
477 returnme.append(subpath)
480 def cp_all(self, use_cache=1):
481 mylist = self.cpv_all(use_cache=use_cache)
487 mysplit = catpkgsplit(y)
488 except portage.exception.InvalidData:
489 self.invalidentry(self.getpath(y))
492 self.invalidentry(self.getpath(y))
494 d[mysplit[0]+"/"+mysplit[1]] = None
497 def checkblockers(self, origdep):
500 def match(self, origdep, use_cache=1):
501 "caching match function"
503 origdep, mydb=self, use_cache=use_cache, settings=self.settings)
504 mykey = dep_getkey(mydep)
505 mycat = catsplit(mykey)[0]
507 if self.matchcache.has_key(mycat):
508 del self.mtdircache[mycat]
509 del self.matchcache[mycat]
510 mymatch = match_from_list(mydep,
511 self.cp_list(mykey, use_cache=use_cache))
512 myslot = dep_getslot(mydep)
513 if myslot is not None:
514 mymatch = [cpv for cpv in mymatch \
515 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
518 curmtime = os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
519 except (IOError, OSError):
522 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
524 self.mtdircache[mycat] = curmtime
525 self.matchcache[mycat] = {}
526 if not self.matchcache[mycat].has_key(mydep):
527 mymatch = match_from_list(mydep, self.cp_list(mykey, use_cache=use_cache))
528 myslot = dep_getslot(mydep)
529 if myslot is not None:
530 mymatch = [cpv for cpv in mymatch \
531 if self.aux_get(cpv, ["SLOT"])[0] == myslot]
532 self.matchcache[mycat][mydep] = mymatch
533 return self.matchcache[mycat][mydep][:]
535 def findname(self, mycpv):
536 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
538 def flush_cache(self):
539 """If the current user has permission and the internal aux_get cache has
540 been updated, save it to disk and mark it unmodified. This is called
541 by emerge after it has loaded the full vdb for use in dependency
542 calculations. Currently, the cache is only written if the user has
543 superuser privileges (since that's required to obtain a lock), but all
544 users have read access and benefit from faster metadata lookups (as
545 long as at least part of the cache is still valid)."""
546 if self._aux_cache is not None and \
547 self._aux_cache["modified"] and \
549 valid_nodes = set(self.cpv_all())
550 for cpv in self._aux_cache["packages"].keys():
551 if cpv not in valid_nodes:
552 del self._aux_cache["packages"][cpv]
553 del self._aux_cache["modified"]
555 f = atomic_ofstream(self._aux_cache_filename)
556 cPickle.dump(self._aux_cache, f, -1)
558 apply_secpass_permissions(
559 self._aux_cache_filename, gid=portage_gid, mode=0644)
560 except (IOError, OSError), e:
562 self._aux_cache["modified"] = False
564 def aux_get(self, mycpv, wants):
565 """This automatically caches selected keys that are frequently needed
566 by emerge for dependency calculations. The cached metadata is
567 considered valid if the mtime of the package directory has not changed
568 since the data was cached. The cache is stored in a pickled dict
569 object with the following format:
571 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
573 If an error occurs while loading the cache pickle or the version is
574 unrecognized, the cache will simple be recreated from scratch (it is
575 completely disposable).
577 cache_these_wants = self._aux_cache_keys.intersection(wants)
579 if self._aux_cache_keys_re.match(x) is not None:
580 cache_these_wants.add(x)
582 if not cache_these_wants:
583 return self._aux_get(mycpv, wants)
585 cache_these = set(self._aux_cache_keys)
586 cache_these.update(cache_these_wants)
588 if self._aux_cache is None:
590 f = open(self._aux_cache_filename)
591 mypickle = cPickle.Unpickler(f)
592 mypickle.find_global = None
593 self._aux_cache = mypickle.load()
596 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
598 if not self._aux_cache or \
599 not isinstance(self._aux_cache, dict) or \
600 self._aux_cache.get("version") != self._aux_cache_version or \
601 not self._aux_cache.get("packages"):
602 self._aux_cache = {"version": self._aux_cache_version}
603 self._aux_cache["packages"] = {}
604 self._aux_cache["modified"] = False
605 mydir = self.getpath(mycpv)
608 mydir_stat = os.stat(mydir)
610 if e.errno != errno.ENOENT:
612 raise KeyError(mycpv)
613 mydir_mtime = long(mydir_stat.st_mtime)
614 pkg_data = self._aux_cache["packages"].get(mycpv)
617 cache_incomplete = False
619 cache_mtime, metadata = pkg_data
620 cache_valid = cache_mtime == mydir_mtime
622 cache_incomplete = cache_these.difference(metadata)
624 # Allow self._aux_cache_keys to change without a cache version
625 # bump and efficiently recycle partial cache whenever possible.
627 pull_me = cache_incomplete.union(wants)
629 pull_me = set(wants).difference(cache_these)
630 mydata.update(metadata)
632 pull_me = cache_these
635 # pull any needed data and cache it
636 aux_keys = list(pull_me)
637 for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
639 if not cache_valid or cache_incomplete:
642 cache_data.update(metadata)
643 for aux_key in cache_these:
644 cache_data[aux_key] = mydata[aux_key]
645 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
646 self._aux_cache["modified"] = True
647 return [mydata[x] for x in wants]
649 def _aux_get(self, mycpv, wants):
650 mydir = self.getpath(mycpv)
652 if not stat.S_ISDIR(os.stat(mydir).st_mode):
653 raise KeyError(mycpv)
655 if e.errno == errno.ENOENT:
656 raise KeyError(mycpv)
662 myf = open(os.path.join(mydir, x), "r")
667 # Preserve \n for metadata that is known to
668 # contain multiple lines.
669 if self._aux_multi_line_re.match(x) is None:
670 myd = " ".join(myd.split())
673 if x == "EAPI" and not myd:
679 def aux_update(self, cpv, values):
680 cat, pkg = catsplit(cpv)
681 mylink = dblink(cat, pkg, self.root, self.settings,
682 treetype="vartree", vartree=self.vartree)
683 if not mylink.exists():
685 for k, v in values.iteritems():
690 os.unlink(os.path.join(self.getpath(cpv), k))
691 except EnvironmentError:
694 def counter_tick(self, myroot, mycpv=None):
695 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
697 def get_counter_tick_core(self, myroot, mycpv=None):
699 Use this method to retrieve the counter instead
700 of having to trust the value of a global counter
701 file that can lead to invalid COUNTER
702 generation. When cache is valid, the package COUNTER
703 files are not read and we rely on the timestamp of
704 the package directory to validate cache. The stat
705 calls should only take a short time, so performance
706 is sufficient without having to rely on a potentially
707 corrupt global counter file.
709 The global counter file located at
710 $CACHE_PATH/counter serves to record the
711 counter of the last installed package and
712 it also corresponds to the total number of
713 installation actions that have occurred in
714 the history of this package database.
716 cp_list = self.cp_list
718 for cp in self.cp_all():
719 for cpv in cp_list(cp):
721 counter = int(self.aux_get(cpv, ["COUNTER"])[0])
722 except (KeyError, OverflowError, ValueError):
724 if counter > max_counter:
725 max_counter = counter
729 cfile = open(self._counter_path, "r")
730 except EnvironmentError, e:
731 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
732 self._counter_path, noiselevel=-1)
733 writemsg("!!! %s\n" % str(e), noiselevel=-1)
738 counter = long(cfile.readline().strip())
741 except (OverflowError, ValueError), e:
742 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
743 self._counter_path, noiselevel=-1)
744 writemsg("!!! %s\n" % str(e), noiselevel=-1)
747 # We must ensure that we return a counter
748 # value that is at least as large as the
749 # highest one from the installed packages,
750 # since having a corrupt value that is too low
751 # can trigger incorrect AUTOCLEAN behavior due
752 # to newly installed packages having lower
753 # COUNTERs than the previous version in the
755 if counter > max_counter:
756 max_counter = counter
759 writemsg("!!! Initializing COUNTER to " + \
760 "value of %d\n" % max_counter, noiselevel=-1)
762 return max_counter + 1
764 def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
765 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
766 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
770 # update new global counter file
771 write_atomic(self._counter_path, str(counter))
774 class vartree(object):
775 "this tree will scan a var/db/pkg database located at root (passed to init)"
776 def __init__(self, root="/", virtual=None, clone=None, categories=None,
779 writemsg("vartree.__init__(): deprecated " + \
780 "use of clone parameter\n", noiselevel=-1)
781 self.root = clone.root[:]
782 self.dbapi = copy.deepcopy(clone.dbapi)
784 from portage import config
785 self.settings = config(clone=clone.settings)
789 from portage import settings
790 self.settings = settings # for key_expand calls
791 if categories is None:
792 categories = settings.categories
793 self.dbapi = vardbapi(self.root, categories=categories,
794 settings=settings, vartree=self)
797 def getpath(self, mykey, filename=None):
798 return self.dbapi.getpath(mykey, filename=filename)
800 def zap(self, mycpv):
803 def inject(self, mycpv):
806 def get_provide(self, mycpv):
810 mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
812 myuse = myuse.split()
813 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
814 for myprovide in mylines:
815 mys = catpkgsplit(myprovide)
817 mys = myprovide.split("/")
818 myprovides += [mys[0] + "/" + mys[1]]
820 except SystemExit, e:
823 mydir = os.path.join(self.root, VDB_PATH, mycpv)
824 writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
827 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
829 writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
832 def get_all_provides(self):
834 for node in self.getallcpv():
835 for mykey in self.get_provide(node):
836 if myprovides.has_key(mykey):
837 myprovides[mykey] += [node]
839 myprovides[mykey] = [node]
842 def dep_bestmatch(self, mydep, use_cache=1):
843 "compatibility method -- all matches, not just visible ones"
844 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
845 mymatch = best(self.dbapi.match(
846 dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
847 use_cache=use_cache))
853 def dep_match(self, mydep, use_cache=1):
854 "compatibility method -- we want to see all matches, not just visible ones"
855 #mymatch = match(mydep,self.dbapi)
856 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
862 def exists_specific(self, cpv):
863 return self.dbapi.cpv_exists(cpv)
866 """temporary function, probably to be renamed --- Gets a list of all
867 category/package-versions installed on the system."""
868 return self.dbapi.cpv_all()
870 def getallnodes(self):
871 """new behavior: these are all *unmasked* nodes. There may or may not be available
872 masked package for nodes in this nodes list."""
873 return self.dbapi.cp_all()
875 def exists_specific_cat(self, cpv, use_cache=1):
876 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
877 settings=self.settings)
881 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
885 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
891 def getebuildpath(self, fullpackage):
892 cat, package = catsplit(fullpackage)
893 return self.getpath(fullpackage, filename=package+".ebuild")
895 def getnode(self, mykey, use_cache=1):
896 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
897 settings=self.settings)
900 mysplit = catsplit(mykey)
901 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
904 mypsplit = pkgsplit(x)
906 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
908 if mypsplit[0] == mysplit[1]:
909 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
910 returnme.append(appendme)
914 def getslot(self, mycatpkg):
915 "Get a slot for a catpkg; assume it exists."
917 return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
921 def hasnode(self, mykey, use_cache):
922 """Does the particular node (cat/pkg key) exist?"""
923 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
924 settings=self.settings)
925 mysplit = catsplit(mykey)
926 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
928 mypsplit = pkgsplit(x)
930 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
932 if mypsplit[0] == mysplit[1]:
939 class dblink(object):
941 This class provides an interface to the installed package database
942 At present this is implemented as a text backend in /var/db/pkg.
946 _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
947 _contents_split_counts = {
955 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
956 vartree=None, blockers=None):
958 Creates a DBlink object for a given CPV.
959 The given CPV may not be present in the database already.
963 @param pkg: Package (PV)
965 @param myroot: Typically ${ROOT}
966 @type myroot: String (Path)
967 @param mysettings: Typically portage.config
968 @type mysettings: An instance of portage.config
969 @param treetype: one of ['porttree','bintree','vartree']
970 @type treetype: String
971 @param vartree: an instance of vartree corresponding to myroot.
972 @type vartree: vartree
977 self.mycpv = self.cat + "/" + self.pkg
978 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
979 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
980 self.treetype = treetype
982 from portage import db
983 vartree = db[myroot]["vartree"]
984 self.vartree = vartree
985 self._blockers = blockers
987 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
988 self.dbcatdir = self.dbroot+"/"+cat
989 self.dbpkgdir = self.dbcatdir+"/"+pkg
990 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
991 self.dbdir = self.dbpkgdir
993 self._lock_vdb = None
995 self.settings = mysettings
996 if self.settings == 1:
1000 protect_obj = ConfigProtect(myroot,
1001 mysettings.get("CONFIG_PROTECT","").split(),
1002 mysettings.get("CONFIG_PROTECT_MASK","").split())
1003 self.updateprotect = protect_obj.updateprotect
1004 self.isprotected = protect_obj.isprotected
1005 self._installed_instance = None
1006 self.contentscache = None
1007 self._contents_inodes = None
1008 self._contents_basenames = None
1012 raise AssertionError("Lock already held.")
1013 # At least the parent needs to exist for the lock file.
1014 ensure_dirs(self.dbroot)
1015 self._lock_vdb = lockdir(self.dbroot)
1019 unlockdir(self._lock_vdb)
1020 self._lock_vdb = None
1023 "return path to location of db information (for >>> informational display)"
1027 "does the db entry exist? boolean."
1028 return os.path.exists(self.dbdir)
1032 Remove this entry from the database
1034 if not os.path.exists(self.dbdir):
1037 for x in os.listdir(self.dbdir):
1038 os.unlink(self.dbdir+"/"+x)
1039 os.rmdir(self.dbdir)
1041 print "!!! Unable to remove db entry for this package."
1042 print "!!! It is possible that a directory is in this one. Portage will still"
1043 print "!!! register this package as installed as long as this directory exists."
1044 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
1049 # Due to mtime granularity, mtime checks do not always properly
1050 # invalidate vardbapi caches.
1051 self.vartree.dbapi.mtdircache.pop(self.cat, None)
1052 self.vartree.dbapi.matchcache.pop(self.cat, None)
1053 self.vartree.dbapi.cpcache.pop(self.mysplit[0], None)
1055 def clearcontents(self):
1057 For a given db entry (self), erase the CONTENTS values.
1059 if os.path.exists(self.dbdir+"/CONTENTS"):
1060 os.unlink(self.dbdir+"/CONTENTS")
1062 def _clear_contents_cache(self):
1063 self.contentscache = None
1064 self._contents_inodes = None
1065 self._contents_basenames = None
1067 def getcontents(self):
1069 Get the installed files of a given package (aka what that package installed)
1071 contents_file = os.path.join(self.dbdir, "CONTENTS")
1072 if self.contentscache is not None:
1073 return self.contentscache
1076 myc = open(contents_file,"r")
1077 except EnvironmentError, e:
1078 if e.errno != errno.ENOENT:
1081 self.contentscache = pkgfiles
1083 mylines = myc.readlines()
1086 normalize_needed = self._normalize_needed
1087 contents_split_counts = self._contents_split_counts
1088 myroot = self.myroot
1089 if myroot == os.path.sep:
1093 for pos, line in enumerate(mylines):
1094 if null_byte in line:
1095 # Null bytes are a common indication of corruption.
1096 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1098 line = line.rstrip("\n")
1099 # Split on " " so that even file paths that
1100 # end with spaces can be handled.
1101 mydat = line.split(" ")
1102 entry_type = mydat[0] # empty string if line is empty
1103 correct_split_count = contents_split_counts.get(entry_type)
1104 if correct_split_count and len(mydat) > correct_split_count:
1105 # Apparently file paths contain spaces, so reassemble
1106 # the split have the correct_split_count.
1107 newsplit = [entry_type]
1108 spaces_total = len(mydat) - correct_split_count
1109 if entry_type == "sym":
1111 splitter = mydat.index("->", 2, len(mydat) - 2)
1113 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1115 spaces_in_path = splitter - 2
1116 spaces_in_target = spaces_total - spaces_in_path
1117 newsplit.append(" ".join(mydat[1:splitter]))
1118 newsplit.append("->")
1119 target_end = splitter + spaces_in_target + 2
1120 newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1121 newsplit.extend(mydat[target_end:])
1123 path_end = spaces_total + 2
1124 newsplit.append(" ".join(mydat[1:path_end]))
1125 newsplit.extend(mydat[path_end:])
1128 # we do this so we can remove from non-root filesystems
1129 # (use the ROOT var to allow maintenance on other partitions)
1131 if normalize_needed.match(mydat[1]):
1132 mydat[1] = normalize_path(mydat[1])
1133 if not mydat[1].startswith(os.path.sep):
1134 mydat[1] = os.path.sep + mydat[1]
1136 mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1137 if mydat[0] == "obj":
1138 #format: type, mtime, md5sum
1139 pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1140 elif mydat[0] == "dir":
1142 pkgfiles[mydat[1]] = [mydat[0]]
1143 elif mydat[0] == "sym":
1144 #format: type, mtime, dest
1145 pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1146 elif mydat[0] == "dev":
1148 pkgfiles[mydat[1]] = [mydat[0]]
1149 elif mydat[0]=="fif":
1151 pkgfiles[mydat[1]] = [mydat[0]]
1153 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1154 except (KeyError, IndexError):
1155 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1157 writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1158 for pos, e in errors:
1159 writemsg("!!! line %d: %s\n" % (pos, e), noiselevel=-1)
1160 self.contentscache = pkgfiles
1163 def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1164 ldpath_mtimes=None, others_in_slot=None):
1167 Unmerges a given package (CPV)
1172 @param pkgfiles: files to unmerge (generally self.getcontents() )
1173 @type pkgfiles: Dictionary
1174 @param trimworld: Remove CPV from world file if True, not if False
1175 @type trimworld: Boolean
1176 @param cleanup: cleanup to pass to doebuild (see doebuild)
1177 @type cleanup: Boolean
1178 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1179 @type ldpath_mtimes: Dictionary
1180 @param others_in_slot: all dblink instances in this slot, excluding self
1181 @type others_in_slot: list
1184 1. os.EX_OK if everything went well.
1185 2. return code of the failed phase (for prerm, postrm, cleanrm)
1188 The caller must ensure that lockdb() and unlockdb() are called
1189 before and after this method.
1191 if self.vartree.dbapi._categories is not None:
1192 self.vartree.dbapi._categories = None
1193 # When others_in_slot is supplied, the security check has already been
1194 # done for this slot, so it shouldn't be repeated until the next
1195 # replacement or unmerge operation.
1196 if others_in_slot is None:
1197 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1198 slot_matches = self.vartree.dbapi.match(
1199 "%s:%s" % (dep_getkey(self.mycpv), slot))
1201 for cur_cpv in slot_matches:
1202 if cur_cpv == self.mycpv:
1204 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1205 self.vartree.root, self.settings, vartree=self.vartree))
1206 retval = self._security_check([self] + others_in_slot)
1210 contents = self.getcontents()
1211 # Now, don't assume that the name of the ebuild is the same as the
1212 # name of the dir; the package may have been moved.
1214 ebuild_phase = "prerm"
1215 mystuff = listdir(self.dbdir, EmptyOnError=1)
1217 if x.endswith(".ebuild"):
1218 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1219 if x[:-7] != self.pkg:
1220 # Clean up after vardbapi.move_ent() breakage in
1221 # portage versions before 2.1.2
1222 os.rename(os.path.join(self.dbdir, x), myebuildpath)
1223 write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1226 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1229 doebuild_environment(myebuildpath, "prerm", self.myroot,
1230 self.settings, 0, 0, self.vartree.dbapi)
1231 except UnsupportedAPIException, e:
1232 # Sometimes this happens due to corruption of the EAPI file.
1233 writemsg("!!! FAILED prerm: %s\n" % \
1234 os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1235 writemsg("%s\n" % str(e), noiselevel=-1)
1237 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1238 ensure_dirs(os.path.dirname(catdir),
1239 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
1240 builddir_lock = None
1245 catdir_lock = lockdir(catdir)
1247 uid=portage_uid, gid=portage_gid,
1249 builddir_lock = lockdir(
1250 self.settings["PORTAGE_BUILDDIR"])
1252 unlockdir(catdir_lock)
1255 # Eventually, we'd like to pass in the saved ebuild env here...
1256 retval = doebuild(myebuildpath, "prerm", self.myroot,
1257 self.settings, cleanup=cleanup, use_cache=0,
1258 mydbapi=self.vartree.dbapi, tree="vartree",
1259 vartree=self.vartree)
1260 # XXX: Decide how to handle failures here.
1261 if retval != os.EX_OK:
1262 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1265 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1267 # Remove the registration of preserved libs for this pkg instance
1268 plib_registry = self.vartree.dbapi.plib_registry
1269 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
1270 self.vartree.dbapi.cpv_counter(self.mycpv))
1273 ebuild_phase = "postrm"
1274 retval = doebuild(myebuildpath, "postrm", self.myroot,
1275 self.settings, use_cache=0, tree="vartree",
1276 mydbapi=self.vartree.dbapi, vartree=self.vartree)
1278 # XXX: Decide how to handle failures here.
1279 if retval != os.EX_OK:
1280 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1283 # regenerate reverse NEEDED map
1284 self.vartree.dbapi.linkmap.rebuild()
1286 # remove preserved libraries that don't have any consumers left
1287 # FIXME: this code is quite ugly and can likely be optimized in several ways
1288 plib_dict = plib_registry.getPreservedLibs()
1289 for cpv in plib_dict:
1290 plib_dict[cpv].sort()
1291 # for the loop below to work correctly, we need all
1292 # symlinks to come before the actual files, such that
1293 # the recorded symlinks (sonames) will be resolved into
1294 # their real target before the object is found not to be
1295 # in the reverse NEEDED map
1296 def symlink_compare(x, y):
1297 if os.path.islink(x):
1298 if os.path.islink(y):
1302 elif os.path.islink(y):
1307 plib_dict[cpv].sort(symlink_compare)
1308 for f in plib_dict[cpv]:
1309 if not os.path.exists(f):
1312 consumers = self.vartree.dbapi.linkmap.findConsumers(f)
1314 unlink_list.append(f)
1318 if c not in self.getcontents():
1322 unlink_list.append(f)
1323 for obj in unlink_list:
1325 if os.path.islink(obj):
1330 writemsg_stdout("<<< !needed %s %s\n" % (obj_type, obj))
1332 if e.errno == errno.ENOENT:
1336 plib_registry.pruneNonExisting()
1342 if retval != os.EX_OK:
1344 msg = ("The '%s' " % ebuild_phase) + \
1345 ("phase of the '%s' package " % self.mycpv) + \
1346 ("has failed with exit value %s." % retval)
1347 from textwrap import wrap
1348 msg_lines.extend(wrap(msg, 72))
1349 msg_lines.append("")
1351 ebuild_name = os.path.basename(myebuildpath)
1352 ebuild_dir = os.path.dirname(myebuildpath)
1353 msg = "The problem occurred while executing " + \
1354 ("the ebuild file named '%s' " % ebuild_name) + \
1355 ("located in the '%s' directory. " \
1357 "If necessary, manually remove " + \
1358 "the environment.bz2 file and/or the " + \
1359 "ebuild file located in that directory."
1360 msg_lines.extend(wrap(msg, 72))
1361 msg_lines.append("")
1363 msg = "Removal " + \
1364 "of the environment.bz2 file is " + \
1365 "preferred since it may allow the " + \
1366 "removal phases to execute successfully. " + \
1367 "The ebuild will be " + \
1368 "sourced and the eclasses " + \
1369 "from the current portage tree will be used " + \
1370 "when necessary. Removal of " + \
1371 "the ebuild file will cause the " + \
1372 "pkg_prerm() and pkg_postrm() removal " + \
1373 "phases to be skipped entirely."
1374 msg_lines.extend(wrap(msg, 72))
1375 from portage.elog.messages import eerror
1377 eerror(l, phase=ebuild_phase, key=self.mycpv)
1379 # process logs created during pre/postrm
1380 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
1381 if retval == os.EX_OK:
1382 doebuild(myebuildpath, "cleanrm", self.myroot,
1383 self.settings, tree="vartree",
1384 mydbapi=self.vartree.dbapi,
1385 vartree=self.vartree)
1387 unlockdir(builddir_lock)
1389 if myebuildpath and not catdir_lock:
1390 # Lock catdir for removal if empty.
1391 catdir_lock = lockdir(catdir)
1397 if e.errno not in (errno.ENOENT,
1398 errno.ENOTEMPTY, errno.EEXIST):
1401 unlockdir(catdir_lock)
1402 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
1403 contents=contents, env=self.settings.environ())
1406 def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1409 Unmerges the contents of a package from the liveFS
1410 Removes the VDB entry for self
1412 @param pkgfiles: typically self.getcontents()
1413 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1414 @param others_in_slot: all dblink instances in this slot, excluding self
1415 @type others_in_slot: list
1420 writemsg_stdout("No package files given... Grabbing a set.\n")
1421 pkgfiles = self.getcontents()
1423 if others_in_slot is None:
1425 slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1426 slot_matches = self.vartree.dbapi.match(
1427 "%s:%s" % (dep_getkey(self.mycpv), slot))
1428 for cur_cpv in slot_matches:
1429 if cur_cpv == self.mycpv:
1431 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1432 self.vartree.root, self.settings,
1433 vartree=self.vartree))
1434 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
1436 dest_root_len = len(dest_root) - 1
1438 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1439 cfgfiledict = grabdict(conf_mem_file)
1442 unmerge_orphans = "unmerge-orphans" in self.settings.features
1445 self.updateprotect()
1446 mykeys = pkgfiles.keys()
1450 #process symlinks second-to-last, directories last.
1452 ignored_unlink_errnos = (
1453 errno.EBUSY, errno.ENOENT,
1454 errno.ENOTDIR, errno.EISDIR)
1455 ignored_rmdir_errnos = (
1456 errno.EEXIST, errno.ENOTEMPTY,
1457 errno.EBUSY, errno.ENOENT,
1458 errno.ENOTDIR, errno.EISDIR)
1459 modprotect = os.path.join(self.vartree.root, "lib/modules/")
1461 def unlink(file_name, lstatobj):
1463 if lstatobj.st_flags != 0:
1464 bsd_chflags.lchflags(file_name, 0)
1465 parent_name = os.path.dirname(file_name)
1466 # Use normal stat/chflags for the parent since we want to
1467 # follow any symlinks to the real parent directory.
1468 pflags = os.stat(parent_name).st_flags
1470 bsd_chflags.chflags(parent_name, 0)
1472 if not stat.S_ISLNK(lstatobj.st_mode):
1473 # Remove permissions to ensure that any hardlinks to
1474 # suid/sgid files are rendered harmless.
1475 os.chmod(file_name, 0)
1476 os.unlink(file_name)
1478 if bsd_chflags and pflags != 0:
1479 # Restore the parent flags we saved before unlinking
1480 bsd_chflags.chflags(parent_name, pflags)
1482 def show_unmerge(zing, desc, file_type, file_name):
1483 writemsg_stdout("%s %s %s %s\n" % \
1484 (zing, desc.ljust(8), file_type, file_name))
1485 for objkey in mykeys:
1486 obj = normalize_path(objkey)
1487 file_data = pkgfiles[objkey]
1488 file_type = file_data[0]
1491 statobj = os.stat(obj)
1496 lstatobj = os.lstat(obj)
1497 except (OSError, AttributeError):
1499 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1500 if lstatobj is None:
1501 show_unmerge("---", "!found", file_type, obj)
1503 if obj.startswith(dest_root):
1504 relative_path = obj[dest_root_len:]
1505 if not others_in_slot and \
1506 relative_path in cfgfiledict:
1507 stale_confmem.append(relative_path)
1509 for dblnk in others_in_slot:
1510 if dblnk.isowner(relative_path, dest_root):
1514 # A new instance of this package claims the file, so
1516 show_unmerge("---", "replaced", file_type, obj)
1518 # next line includes a tweak to protect modules from being unmerged,
1519 # but we don't protect modules from being overwritten if they are
1520 # upgraded. We effectively only want one half of the config protection
1521 # functionality for /lib/modules. For portage-ng both capabilities
1522 # should be able to be independently specified.
1523 if obj.startswith(modprotect):
1524 show_unmerge("---", "cfgpro", file_type, obj)
1527 # Don't unlink symlinks to directories here since that can
1528 # remove /lib and /usr/lib symlinks.
1529 if unmerge_orphans and \
1530 lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1531 not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1532 not self.isprotected(obj):
1534 unlink(obj, lstatobj)
1535 except EnvironmentError, e:
1536 if e.errno not in ignored_unlink_errnos:
1539 show_unmerge("<<<", "", file_type, obj)
1542 lmtime = str(lstatobj[stat.ST_MTIME])
1543 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
1544 show_unmerge("---", "!mtime", file_type, obj)
1547 if pkgfiles[objkey][0] == "dir":
1548 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
1549 show_unmerge("---", "!dir", file_type, obj)
1552 elif pkgfiles[objkey][0] == "sym":
1554 show_unmerge("---", "!sym", file_type, obj)
1556 # Go ahead and unlink symlinks to directories here when
1557 # they're actually recorded as symlinks in the contents.
1558 # Normally, symlinks such as /lib -> lib64 are not recorded
1559 # as symlinks in the contents of a package. If a package
1560 # installs something into ${D}/lib/, it is recorded in the
1561 # contents as a directory even if it happens to correspond
1562 # to a symlink when it's merged to the live filesystem.
1564 unlink(obj, lstatobj)
1565 show_unmerge("<<<", "", file_type, obj)
1566 except (OSError, IOError),e:
1567 if e.errno not in ignored_unlink_errnos:
1570 show_unmerge("!!!", "", file_type, obj)
1571 elif pkgfiles[objkey][0] == "obj":
1572 if statobj is None or not stat.S_ISREG(statobj.st_mode):
1573 show_unmerge("---", "!obj", file_type, obj)
1577 mymd5 = perform_md5(obj, calc_prelink=1)
1578 except FileNotFound, e:
1579 # the file has disappeared between now and our stat call
1580 show_unmerge("---", "!obj", file_type, obj)
1583 # string.lower is needed because db entries used to be in upper-case. The
1584 # string.lower allows for backwards compatibility.
1585 if mymd5 != pkgfiles[objkey][2].lower():
1586 show_unmerge("---", "!md5", file_type, obj)
1589 unlink(obj, lstatobj)
1590 except (OSError, IOError), e:
1591 if e.errno not in ignored_unlink_errnos:
1594 show_unmerge("<<<", "", file_type, obj)
1595 elif pkgfiles[objkey][0] == "fif":
1596 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
1597 show_unmerge("---", "!fif", file_type, obj)
1599 show_unmerge("---", "", file_type, obj)
1600 elif pkgfiles[objkey][0] == "dev":
1601 show_unmerge("---", "", file_type, obj)
1609 lstatobj = os.lstat(obj)
1610 if lstatobj.st_flags != 0:
1611 bsd_chflags.lchflags(obj, 0)
1612 parent_name = os.path.dirname(obj)
1613 # Use normal stat/chflags for the parent since we want to
1614 # follow any symlinks to the real parent directory.
1615 pflags = os.stat(parent_name).st_flags
1617 bsd_chflags.chflags(parent_name, 0)
1621 if bsd_chflags and pflags != 0:
1622 # Restore the parent flags we saved before unlinking
1623 bsd_chflags.chflags(parent_name, pflags)
1624 show_unmerge("<<<", "", "dir", obj)
1625 except EnvironmentError, e:
1626 if e.errno not in ignored_rmdir_errnos:
1628 if e.errno != errno.ENOENT:
1629 show_unmerge("---", "!empty", "dir", obj)
1632 # Remove stale entries from config memory.
1634 for filename in stale_confmem:
1635 del cfgfiledict[filename]
1636 writedict(cfgfiledict, conf_mem_file)
1638 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1639 self.vartree.zap(self.mycpv)
1641 def isowner(self,filename, destroot):
1643 Check if a file belongs to this package. This may
1644 result in a stat call for the parent directory of
1645 every installed file, since the inode numbers are
1646 used to work around the problem of ambiguous paths
1647 caused by symlinked directories. The results of
1648 stat calls are cached to optimize multiple calls
1657 1. True if this package owns the file.
1658 2. False if this package does not own the file.
1660 destfile = normalize_path(
1661 os.path.join(destroot, filename.lstrip(os.path.sep)))
1663 pkgfiles = self.getcontents()
1664 if pkgfiles and destfile in pkgfiles:
1667 basename = os.path.basename(destfile)
1668 if self._contents_basenames is None:
1669 self._contents_basenames = set(
1670 os.path.basename(x) for x in pkgfiles)
1671 if basename not in self._contents_basenames:
1672 # This is a shortcut that, in most cases, allows us to
1673 # eliminate this package as an owner without the need
1674 # to examine inode numbers of parent directories.
1677 # Use stat rather than lstat since we want to follow
1678 # any symlinks to the real parent directory.
1679 parent_path = os.path.dirname(destfile)
1681 parent_stat = os.stat(parent_path)
1682 except EnvironmentError, e:
1683 if e.errno != errno.ENOENT:
1687 if self._contents_inodes is None:
1688 self._contents_inodes = {}
1689 parent_paths = set()
1691 p_path = os.path.dirname(x)
1692 if p_path in parent_paths:
1694 parent_paths.add(p_path)
1700 inode_key = (s.st_dev, s.st_ino)
1701 # Use lists of paths in case multiple
1702 # paths reference the same inode.
1703 p_path_list = self._contents_inodes.get(inode_key)
1704 if p_path_list is None:
1706 self._contents_inodes[inode_key] = p_path_list
1707 if p_path not in p_path_list:
1708 p_path_list.append(p_path)
1709 p_path_list = self._contents_inodes.get(
1710 (parent_stat.st_dev, parent_stat.st_ino))
1712 for p_path in p_path_list:
1713 x = os.path.join(p_path, basename)
1719 def _preserve_libs(self, srcroot, destroot, mycontents, counter, inforoot):
1720 # read global reverse NEEDED map
1721 linkmap = self.vartree.dbapi.linkmap
1722 linkmap.rebuild(include_file=os.path.join(inforoot, "NEEDED.ELF.2"))
1723 liblist = linkmap.listLibraryObjects()
1725 # get list of libraries from old package instance
1726 old_contents = self._installed_instance.getcontents().keys()
1727 old_libs = set(old_contents).intersection(liblist)
1729 # get list of libraries from new package instance
1730 mylibs = set([os.path.join(os.sep, x) for x in mycontents]).intersection(liblist)
1732 # check which libs are present in the old, but not the new package instance
1733 candidates = old_libs.difference(mylibs)
1735 for x in old_contents:
1736 if os.path.islink(x) and os.path.realpath(x) in candidates and x not in mycontents:
1739 # ignore any libs that are only internally used by the package
1740 def has_external_consumers(lib, contents, otherlibs):
1741 consumers = linkmap.findConsumers(lib)
1742 contents_without_libs = [x for x in contents if x not in otherlibs]
1744 # just used by objects that will be autocleaned
1745 if len(consumers.difference(contents_without_libs)) == 0:
1747 # used by objects that are referenced as well, need to check those
1748 # recursively to break any reference cycles
1749 elif len(consumers.difference(contents)) == 0:
1750 otherlibs = set(otherlibs)
1751 for ol in otherlibs.intersection(consumers):
1752 if has_external_consumers(ol, contents, otherlibs.difference([lib])):
1755 # used by external objects directly
1759 for lib in list(candidates):
1760 if not has_external_consumers(lib, old_contents, candidates):
1761 candidates.remove(lib)
1763 # only preserve the lib if there is no other copy to use for each consumer
1765 for c in linkmap.findConsumers(lib):
1767 providers = linkmap.findProviders(c)
1769 for soname in providers:
1770 if lib in providers[soname]:
1771 for p in providers[soname]:
1772 if p not in candidates or os.path.exists(os.path.join(srcroot, p.lstrip(os.sep))):
1779 candidates.remove(lib)
1782 del mylibs, mycontents, old_contents, liblist
1784 # inject files that should be preserved into our image dir
1787 for x in candidates:
1788 # skip existing files so the 'new' libs aren't overwritten
1789 if os.path.exists(os.path.join(srcroot, x.lstrip(os.sep))):
1790 missing_paths.append(x)
1792 print "injecting %s into %s" % (x, srcroot)
1793 if not os.path.exists(os.path.join(destroot, x.lstrip(os.sep))):
1794 print "%s does not exist so can't be preserved" % x
1795 missing_paths.append(x)
1797 mydir = os.path.join(srcroot, os.path.dirname(x).lstrip(os.sep))
1798 if not os.path.exists(mydir):
1801 # resolve symlinks and extend preserve list
1802 # NOTE: we're extending the list in the loop to emulate recursion to
1803 # also get indirect symlinks
1804 if os.path.islink(x):
1805 linktarget = os.readlink(x)
1806 os.symlink(linktarget, os.path.join(srcroot, x.lstrip(os.sep)))
1807 if linktarget[0] != os.sep:
1808 linktarget = os.path.join(os.path.dirname(x), linktarget)
1809 candidates.add(linktarget)
1811 shutil.copy2(os.path.join(destroot, x.lstrip(os.sep)),
1812 os.path.join(srcroot, x.lstrip(os.sep)))
1814 preserve_paths = [x for x in candidates if x not in missing_paths]
1816 del missing_paths, candidates
1818 # keep track of the libs we preserved
1819 self.vartree.dbapi.plib_registry.register(self.mycpv, self.settings["SLOT"], counter, preserve_paths)
1823 def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
1824 collision_ignore = set([normalize_path(myignore) for myignore in \
1825 self.settings.get("COLLISION_IGNORE", "").split()])
1830 destroot = normalize_path(destroot).rstrip(os.path.sep) + \
1832 writemsg_stdout("%s checking %d files for package collisions\n" % \
1833 (green("*"), len(mycontents)))
1834 for f in mycontents:
1837 writemsg_stdout("%d files checked ...\n" % i)
1838 dest_path = normalize_path(
1839 os.path.join(destroot, f.lstrip(os.path.sep)))
1841 dest_lstat = os.lstat(dest_path)
1842 except EnvironmentError, e:
1843 if e.errno == errno.ENOENT:
1846 elif e.errno == errno.ENOTDIR:
1848 # A non-directory is in a location where this package
1849 # expects to have a directory.
1851 parent_path = dest_path
1852 while len(parent_path) > len(destroot):
1853 parent_path = os.path.dirname(parent_path)
1855 dest_lstat = os.lstat(parent_path)
1857 except EnvironmentError, e:
1858 if e.errno != errno.ENOTDIR:
1862 raise AssertionError(
1863 "unable to find non-directory " + \
1864 "parent for '%s'" % dest_path)
1865 dest_path = parent_path
1866 f = os.path.sep + dest_path[len(destroot):]
1874 for ver in [self] + mypkglist:
1875 if (ver.isowner(f, destroot) or ver.isprotected(f)):
1880 if collision_ignore:
1881 if f in collision_ignore:
1884 for myignore in collision_ignore:
1885 if f.startswith(myignore + os.path.sep):
1889 collisions.append(f)
1892 def _security_check(self, installed_instances):
1893 if not installed_instances:
1896 for dblnk in installed_instances:
1897 file_paths.update(dblnk.getcontents())
1900 for path in file_paths:
1904 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1908 if not stat.S_ISREG(s.st_mode):
1910 path = os.path.realpath(path)
1911 if path in real_paths:
1913 real_paths.add(path)
1914 if s.st_nlink > 1 and \
1915 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
1916 k = (s.st_dev, s.st_ino)
1917 inode_map.setdefault(k, []).append((path, s))
1918 suspicious_hardlinks = []
1919 for path_list in inode_map.itervalues():
1920 path, s = path_list[0]
1921 if len(path_list) == s.st_nlink:
1922 # All hardlinks seem to be owned by this package.
1924 suspicious_hardlinks.append(path_list)
1925 if not suspicious_hardlinks:
1927 from portage.output import colorize
1928 prefix = colorize("SECURITY_WARN", "*") + " WARNING: "
1929 writemsg(prefix + "suid/sgid file(s) " + \
1930 "with suspicious hardlink(s):\n", noiselevel=-1)
1931 for path_list in suspicious_hardlinks:
1932 for path, s in path_list:
1933 writemsg(prefix + " '%s'\n" % path, noiselevel=-1)
1934 writemsg(prefix + "See the Gentoo Security Handbook " + \
1935 "guide for advice on how to proceed.\n", noiselevel=-1)
1938 def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
1939 mydbapi=None, prev_mtimes=None):
1942 This function does the following:
1944 calls self._preserve_libs if FEATURES=preserve-libs
1945 calls self._collision_protect if FEATURES=collision-protect
1946 calls doebuild(mydo=pkg_preinst)
1947 Merges the package to the livefs
1948 unmerges old version (if required)
1949 calls doebuild(mydo=pkg_postinst)
1953 @param srcroot: Typically this is ${D}
1954 @type srcroot: String (Path)
1955 @param destroot: Path to merge to (usually ${ROOT})
1956 @type destroot: String (Path)
1957 @param inforoot: root of the vardb entry ?
1958 @type inforoot: String (Path)
1959 @param myebuild: path to the ebuild that we are processing
1960 @type myebuild: String (Path)
1961 @param mydbapi: dbapi which is handed to doebuild.
1962 @type mydbapi: portdbapi instance
1963 @param prev_mtimes: { Filename:mtime } mapping for env_update
1964 @type prev_mtimes: Dictionary
1970 secondhand is a list of symlinks that have been skipped due to their target
1971 not existing; we will merge these symlinks at a later time.
1974 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
1975 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
1977 if not os.path.isdir(srcroot):
1978 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
1982 inforoot_slot_file = os.path.join(inforoot, "SLOT")
1985 f = open(inforoot_slot_file)
1987 slot = f.read().strip()
1990 except EnvironmentError, e:
1991 if e.errno != errno.ENOENT:
1998 from portage.elog.messages import eerror as _eerror
2001 _eerror(l, phase="preinst", key=self.settings.mycpv)
2003 if slot != self.settings["SLOT"]:
2004 writemsg("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2005 (self.settings["SLOT"], slot))
2007 if not os.path.exists(self.dbcatdir):
2008 os.makedirs(self.dbcatdir)
2011 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2012 otherversions.append(v.split("/")[1])
2014 # filter any old-style virtual matches
2015 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2016 "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2017 if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2019 if self.mycpv not in slot_matches and \
2020 self.vartree.dbapi.cpv_exists(self.mycpv):
2021 # handle multislot or unapplied slotmove
2022 slot_matches.append(self.mycpv)
2025 from portage import config
2026 for cur_cpv in slot_matches:
2027 # Clone the config in case one of these has to be unmerged since
2028 # we need it to have private ${T} etc... for things like elog.
2029 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2030 self.vartree.root, config(clone=self.settings),
2031 vartree=self.vartree))
2032 retval = self._security_check(others_in_slot)
2037 # Used by self.isprotected().
2040 for dblnk in others_in_slot:
2041 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2042 if cur_counter > max_counter:
2043 max_counter = cur_counter
2045 self._installed_instance = max_dblnk
2047 # get current counter value (counter_tick also takes care of incrementing it)
2048 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2049 # XXX bis: leads to some invalidentry() call through cp_all().
2050 # Note: The counter is generated here but written later because preserve_libs
2051 # needs the counter value but has to be before dbtmpdir is made (which
2052 # has to be before the counter is written) - genone
2053 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2055 # Save this for unregistering preserved-libs if the merge fails.
2056 self.settings["COUNTER"] = str(counter)
2057 self.settings.backup_changes("COUNTER")
2063 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2065 file_path = os.path.join(parent, f)
2066 file_mode = os.lstat(file_path).st_mode
2067 if stat.S_ISREG(file_mode):
2068 myfilelist.append(file_path[len(srcroot):])
2069 elif stat.S_ISLNK(file_mode):
2070 # Note: os.walk puts symlinks to directories in the "dirs"
2071 # list and it does not traverse them since that could lead
2072 # to an infinite recursion loop.
2073 mylinklist.append(file_path[len(srcroot):])
2075 # If there are no files to merge, and an installed package in the same
2076 # slot has files, it probably means that something went wrong.
2077 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2078 not myfilelist and not mylinklist and others_in_slot:
2079 installed_files = None
2080 for other_dblink in others_in_slot:
2081 installed_files = other_dblink.getcontents()
2082 if not installed_files:
2084 from textwrap import wrap
2091 msg.extend(wrap(("The '%s' package will not install " + \
2092 "any files, but the currently installed '%s'" + \
2093 " package has the following files: ") % d, wrap_width))
2095 msg.extend(sorted(installed_files))
2097 msg.append("package %s NOT merged" % self.mycpv)
2100 ("Manually run `emerge --unmerge =%s` " % \
2101 other_dblink.mycpv) + "if you really want to " + \
2102 "remove the above files. Set " + \
2103 "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2104 "/etc/make.conf if you do not want to " + \
2105 "abort in cases like this.",
2111 # Preserve old libs if they are still in use
2112 if slot_matches and "preserve-libs" in self.settings.features:
2113 self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
2115 # check for package collisions
2117 if self._blockers is not None:
2118 # This is only supposed to be called when
2119 # the vdb is locked, like it is here.
2120 blockers = self._blockers()
2121 if blockers is None:
2123 collisions = self._collision_protect(srcroot, destroot,
2124 others_in_slot + blockers, myfilelist + mylinklist)
2126 # Make sure the ebuild environment is initialized and that ${T}/elog
2127 # exists for logging of collision-protect eerror messages.
2128 if myebuild is None:
2129 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2130 doebuild_environment(myebuild, "preinst", destroot,
2131 self.settings, 0, 0, mydbapi)
2132 prepare_build_dirs(destroot, self.settings, cleanup)
2135 collision_protect = "collision-protect" in self.settings.features
2136 msg = "This package will overwrite one or more files that" + \
2137 " may belong to other packages (see list below)."
2138 if not collision_protect:
2139 msg += " Add \"collision-protect\" to FEATURES in" + \
2140 " make.conf if you would like the merge to abort" + \
2141 " in cases like this."
2142 if self.settings.get("PORTAGE_QUIET") != "1":
2143 msg += " You can use a command such as" + \
2144 " `portageq owners / <filename>` to identify the" + \
2145 " installed package that owns a file. If portageq" + \
2146 " reports that only one package owns a file then do NOT" + \
2147 " file a bug report. A bug report is only useful if it" + \
2148 " identifies at least two or more packages that are known" + \
2149 " to install the same file(s)." + \
2150 " If a collision occurs and you" + \
2151 " can not explain where the file came from then you" + \
2152 " should simply ignore the collision since there is not" + \
2153 " enough information to determine if a real problem" + \
2154 " exists. Please do NOT file a bug report at" + \
2155 " http://bugs.gentoo.org unless you report exactly which" + \
2156 " two packages install the same file(s). Once again," + \
2157 " please do NOT file a bug report unless you have" + \
2158 " completely understood the above message."
2160 self.settings["EBUILD_PHASE"] = "preinst"
2161 from textwrap import wrap
2163 if collision_protect:
2165 msg.append("package %s NOT merged" % self.settings.mycpv)
2167 msg.append("Detected file collision(s):")
2170 for f in collisions:
2171 msg.append("\t%s" % \
2172 os.path.join(destroot, f.lstrip(os.path.sep)))
2176 if collision_protect:
2179 msg.append("Searching all installed" + \
2180 " packages for file collisions...")
2182 msg.append("Press Ctrl-C to Stop")
2187 for cpv in self.vartree.dbapi.cpv_all():
2188 cat, pkg = catsplit(cpv)
2189 mylink = dblink(cat, pkg, destroot, self.settings,
2190 vartree=self.vartree)
2192 for f in collisions:
2193 if mylink.isowner(f, destroot):
2194 mycollisions.append(f)
2198 msg.append("%s" % cpv)
2199 for f in mycollisions:
2200 msg.append("\t%s" % os.path.join(destroot,
2201 f.lstrip(os.path.sep)))
2204 eerror(["None of the installed" + \
2205 " packages claim the file(s)."])
2208 writemsg_stdout(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2210 # The merge process may move files out of the image directory,
2211 # which causes invalidation of the .installed flag.
2213 os.unlink(os.path.join(
2214 os.path.dirname(normalize_path(srcroot)), ".installed"))
2216 if e.errno != errno.ENOENT:
2220 self.dbdir = self.dbtmpdir
2222 ensure_dirs(self.dbtmpdir)
2224 # run preinst script
2225 a = doebuild(myebuild, "preinst", destroot, self.settings,
2226 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2227 vartree=self.vartree)
2229 # XXX: Decide how to handle failures here.
2231 writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
2234 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2235 for x in listdir(inforoot):
2236 self.copyfile(inforoot+"/"+x)
2238 # write local package counter for recording
2239 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2240 lcfile.write(str(counter))
2243 # open CONTENTS file (possibly overwriting old one) for recording
2244 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2246 self.updateprotect()
2248 #if we have a file containing previously-merged config file md5sums, grab it.
2249 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2250 cfgfiledict = grabdict(conf_mem_file)
2251 if self.settings.has_key("NOCONFMEM"):
2252 cfgfiledict["IGNORE"]=1
2254 cfgfiledict["IGNORE"]=0
2256 # Always behave like --noconfmem is enabled for downgrades
2257 # so that people who don't know about this option are less
2258 # likely to get confused when doing upgrade/downgrade cycles.
2259 pv_split = catpkgsplit(self.mycpv)[1:]
2260 for other in others_in_slot:
2261 if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
2262 cfgfiledict["IGNORE"] = 1
2265 # Don't bump mtimes on merge since some application require
2266 # preservation of timestamps. This means that the unmerge phase must
2267 # check to see if file belongs to an installed instance in the same
2271 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2272 prevmask = os.umask(0)
2275 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2276 # "second hand" of symlinks to merge later
2277 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
2280 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
2281 # broken symlinks. We'll merge them too.
2283 while len(secondhand) and len(secondhand)!=lastlen:
2284 # clear the thirdhand. Anything from our second hand that
2285 # couldn't get merged will be added to thirdhand.
2288 self.mergeme(srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime)
2291 lastlen = len(secondhand)
2293 # our thirdhand now becomes our secondhand. It's ok to throw
2294 # away secondhand since thirdhand contains all the stuff that
2295 # couldn't be merged.
2296 secondhand = thirdhand
2299 # force merge of remaining symlinks (broken or circular; oh well)
2300 self.mergeme(srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime)
2305 #if we opened it, close it
2309 # These caches are populated during collision-protect and the data
2310 # they contain is now invalid. It's very important to invalidate
2311 # the contents_inodes cache so that FEATURES=unmerge-orphans
2312 # doesn't unmerge anything that belongs to this package that has
2314 others_in_slot.append(self) # self has just been merged
2315 for dblnk in others_in_slot:
2316 dblnk.contentscache = None
2317 dblnk._contents_inodes = None
2318 dblnk._contents_basenames = None
2320 # If portage is reinstalling itself, remove the old
2321 # version now since we want to use the temporary
2322 # PORTAGE_BIN_PATH that will be removed when we return.
2323 reinstall_self = False
2324 if self.myroot == "/" and \
2325 "sys-apps" == self.cat and \
2326 "portage" == pkgsplit(self.pkg)[0]:
2327 reinstall_self = True
2329 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
2330 for dblnk in list(others_in_slot):
2333 if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
2335 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
2336 others_in_slot.remove(dblnk) # dblnk will unmerge itself now
2337 dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
2338 others_in_slot=others_in_slot)
2339 # TODO: Check status and abort if necessary.
2341 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
2343 if len(others_in_slot) > 1:
2344 from portage.output import colorize
2345 writemsg_stdout(colorize("WARN", "WARNING:")
2346 + " AUTOCLEAN is disabled. This can cause serious"
2347 + " problems due to overlapping packages.\n")
2349 # We hold both directory locks.
2350 self.dbdir = self.dbpkgdir
2352 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
2354 # Check for file collisions with blocking packages
2355 # and remove any colliding files from their CONTENTS
2356 # since they now belong to this package.
2357 self._clear_contents_cache()
2358 contents = self.getcontents()
2359 destroot_len = len(destroot) - 1
2360 for blocker in blockers:
2361 blocker_contents = blocker.getcontents()
2363 for filename in blocker_contents:
2364 relative_filename = filename[destroot_len:]
2365 if self.isowner(relative_filename, destroot):
2366 collisions.append(filename)
2369 for filename in collisions:
2370 del blocker_contents[filename]
2371 f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
2372 for filename in sorted(blocker_contents):
2373 entry_data = blocker_contents[filename]
2374 entry_type = entry_data[0]
2375 relative_filename = filename[destroot_len:]
2376 if entry_type == "obj":
2377 entry_type, mtime, md5sum = entry_data
2378 line = "%s %s %s %s\n" % \
2379 (entry_type, relative_filename, md5sum, mtime)
2380 elif entry_type == "sym":
2381 entry_type, mtime, link = entry_data
2382 line = "%s %s -> %s %s\n" % \
2383 (entry_type, relative_filename, link, mtime)
2384 else: # dir, dev, fif
2385 line = "%s %s\n" % (entry_type, relative_filename)
2389 # Due to mtime granularity, mtime checks do not always properly
2390 # invalidate vardbapi caches.
2391 self.vartree.dbapi.mtdircache.pop(self.cat, None)
2392 self.vartree.dbapi.matchcache.pop(self.cat, None)
2393 self.vartree.dbapi.cpcache.pop(self.mysplit[0], None)
2394 contents = self.getcontents()
2396 #write out our collection of md5sums
2397 if cfgfiledict.has_key("IGNORE"):
2398 del cfgfiledict["IGNORE"]
2400 my_private_path = os.path.join(destroot, PRIVATE_PATH)
2401 ensure_dirs(my_private_path, gid=portage_gid, mode=02750, mask=02)
2403 writedict(cfgfiledict, conf_mem_file)
2406 # regenerate reverse NEEDED map
2407 self.vartree.dbapi.linkmap.rebuild()
2410 self.settings["PORTAGE_UPDATE_ENV"] = \
2411 os.path.join(self.dbpkgdir, "environment.bz2")
2412 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
2413 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
2414 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
2415 self.settings.pop("PORTAGE_UPDATE_ENV", None)
2417 # XXX: Decide how to handle failures here.
2419 writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
2423 for v in otherversions:
2424 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
2427 #update environment settings, library paths. DO NOT change symlinks.
2428 env_update(makelinks=(not downgrade),
2429 target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
2430 contents=contents, env=self.settings.environ())
2432 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
2435 def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
2438 This function handles actual merging of the package contents to the livefs.
2439 It also handles config protection.
2441 @param srcroot: Where are we copying files from (usually ${D})
2442 @type srcroot: String (Path)
2443 @param destroot: Typically ${ROOT}
2444 @type destroot: String (Path)
2445 @param outfile: File to log operations to
2446 @type outfile: File Object
2447 @param secondhand: A set of items to merge in pass two (usually
2448 or symlinks that point to non-existing files that may get merged later)
2449 @type secondhand: List
2450 @param stufftomerge: Either a diretory to merge, or a list of items.
2451 @type stufftomerge: String or List
2452 @param cfgfiledict: { File:mtime } mapping for config_protected files
2453 @type cfgfiledict: Dictionary
2454 @param thismtime: The current time (typically long(time.time())
2455 @type thismtime: Long
2456 @rtype: None or Boolean
2462 from os.path import sep, join
2463 srcroot = normalize_path(srcroot).rstrip(sep) + sep
2464 destroot = normalize_path(destroot).rstrip(sep) + sep
2466 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
2467 if isinstance(stufftomerge, basestring):
2468 #A directory is specified. Figure out protection paths, listdir() it and process it.
2469 mergelist = os.listdir(join(srcroot, stufftomerge))
2470 offset = stufftomerge
2472 mergelist = stufftomerge
2475 mysrc = join(srcroot, offset, x)
2476 mydest = join(destroot, offset, x)
2477 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2478 myrealdest = join(sep, offset, x)
2479 # stat file once, test using S_* macros many times (faster that way)
2481 mystat = os.lstat(mysrc)
2484 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2485 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
2486 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
2487 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2488 writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
2489 writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
2491 except Exception, e:
2493 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
2494 writemsg(red("!!! A stat call returned the following error for the following file:"))
2495 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
2496 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
2497 writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
2498 writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
2502 mymode = mystat[stat.ST_MODE]
2503 # handy variables; mydest is the target object on the live filesystems;
2504 # mysrc is the source object in the temporary install dir
2506 mydstat = os.lstat(mydest)
2507 mydmode = mydstat.st_mode
2509 if e.errno != errno.ENOENT:
2512 #dest file doesn't exist
2516 if stat.S_ISLNK(mymode):
2517 # we are merging a symbolic link
2518 myabsto = abssymlink(mysrc)
2519 if myabsto.startswith(srcroot):
2520 myabsto = myabsto[len(srcroot):]
2521 myabsto = myabsto.lstrip(sep)
2522 myto = os.readlink(mysrc)
2523 if self.settings and self.settings["D"]:
2524 if myto.startswith(self.settings["D"]):
2525 myto = myto[len(self.settings["D"]):]
2526 # myrealto contains the path of the real file to which this symlink points.
2527 # we can simply test for existence of this file to see if the target has been merged yet
2528 myrealto = normalize_path(os.path.join(destroot, myabsto))
2531 if not stat.S_ISLNK(mydmode):
2532 if stat.S_ISDIR(mydmode):
2533 # directory in the way: we can't merge a symlink over a directory
2534 # we won't merge this, continue with next file...
2537 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
2538 # Kill file blocking installation of symlink to dir #71787
2540 elif self.isprotected(mydest):
2541 # Use md5 of the target in ${D} if it exists...
2543 newmd5 = perform_md5(join(srcroot, myabsto))
2544 except FileNotFound:
2545 # Maybe the target is merged already.
2547 newmd5 = perform_md5(myrealto)
2548 except FileNotFound:
2550 mydest = new_protect_filename(mydest, newmd5=newmd5)
2552 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
2553 if (secondhand != None) and (not os.path.exists(myrealto)):
2554 # either the target directory doesn't exist yet or the target file doesn't exist -- or
2555 # the target is a broken symlink. We will add this file to our "second hand" and merge
2557 secondhand.append(mysrc[len(srcroot):])
2559 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
2560 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2562 writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
2563 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
2565 print "!!! Failed to move file."
2566 print "!!!", mydest, "->", myto
2568 elif stat.S_ISDIR(mymode):
2569 # we are merging a directory
2571 # destination exists
2574 # Save then clear flags on dest.
2575 dflags = mydstat.st_flags
2577 bsd_chflags.lchflags(mydest, 0)
2579 if not os.access(mydest, os.W_OK):
2580 pkgstuff = pkgsplit(self.pkg)
2581 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
2582 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
2583 writemsg("!!! You may start the merge process again by using ebuild:\n")
2584 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
2585 writemsg("!!! And finish by running this: env-update\n\n")
2588 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
2589 # a symlink to an existing directory will work for us; keep it:
2590 writemsg_stdout("--- %s/\n" % mydest)
2592 bsd_chflags.lchflags(mydest, dflags)
2594 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
2595 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
2597 print "bak", mydest, mydest+".backup"
2598 #now create our directory
2599 if self.settings.selinux_enabled():
2601 sid = selinux.get_sid(mysrc)
2602 selinux.secure_mkdir(mydest,sid)
2606 bsd_chflags.lchflags(mydest, dflags)
2607 os.chmod(mydest, mystat[0])
2608 os.chown(mydest, mystat[4], mystat[5])
2609 writemsg_stdout(">>> %s/\n" % mydest)
2611 #destination doesn't exist
2612 if self.settings.selinux_enabled():
2614 sid = selinux.get_sid(mysrc)
2615 selinux.secure_mkdir(mydest, sid)
2618 os.chmod(mydest, mystat[0])
2619 os.chown(mydest, mystat[4], mystat[5])
2620 writemsg_stdout(">>> %s/\n" % mydest)
2621 outfile.write("dir "+myrealdest+"\n")
2622 # recurse and merge this directory
2623 if self.mergeme(srcroot, destroot, outfile, secondhand,
2624 join(offset, x), cfgfiledict, thismtime):
2626 elif stat.S_ISREG(mymode):
2627 # we are merging a regular file
2628 mymd5 = perform_md5(mysrc, calc_prelink=1)
2629 # calculate config file protection stuff
2630 mydestdir = os.path.dirname(mydest)
2635 # destination file exists
2636 if stat.S_ISDIR(mydmode):
2637 # install of destination is blocked by an existing directory with the same name
2639 writemsg_stdout("!!! %s\n" % mydest)
2640 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
2642 # install of destination is blocked by an existing regular file,
2643 # or by a symlink to an existing regular file;
2644 # now, config file management may come into play.
2645 # we only need to tweak mydest if cfg file management is in play.
2646 if self.isprotected(mydest):
2647 # we have a protection path; enable config file management.
2648 destmd5 = perform_md5(mydest, calc_prelink=1)
2649 if mymd5 == destmd5:
2650 #file already in place; simply update mtimes of destination
2653 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
2654 """ An identical update has previously been
2655 merged. Skip it unless the user has chosen
2657 moveme = cfgfiledict["IGNORE"]
2658 cfgprot = cfgfiledict["IGNORE"]
2661 mymtime = long(mystat.st_mtime)
2666 # Merging a new file, so update confmem.
2667 cfgfiledict[myrealdest] = [mymd5]
2668 elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
2669 """A previously remembered update has been
2670 accepted, so it is removed from confmem."""
2671 del cfgfiledict[myrealdest]
2673 mydest = new_protect_filename(mydest, newmd5=mymd5)
2675 # whether config protection or not, we merge the new file the
2676 # same way. Unless moveme=0 (blocking directory)
2678 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2684 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
2685 writemsg_stdout("%s %s\n" % (zing,mydest))
2687 # we are merging a fifo or device node
2690 # destination doesn't exist
2691 if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
2695 if stat.S_ISFIFO(mymode):
2696 outfile.write("fif %s\n" % myrealdest)
2698 outfile.write("dev %s\n" % myrealdest)
2699 writemsg_stdout(zing + " " + mydest + "\n")
2701 def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2702 mydbapi=None, prev_mtimes=None):
2704 If portage is reinstalling itself, create temporary
2705 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
2706 to avoid relying on the new versions which may be
2707 incompatible. Register an atexit hook to clean up the
2708 temporary directories. Pre-load elog modules here since
2709 we won't be able to later if they get unmerged (happens
2710 when namespace changes).
2712 if self.vartree.dbapi._categories is not None:
2713 self.vartree.dbapi._categories = None
2714 if self.myroot == "/" and \
2715 "sys-apps" == self.cat and \
2716 "portage" == pkgsplit(self.pkg)[0]:
2717 settings = self.settings
2718 base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
2719 from tempfile import mkdtemp
2721 # Make the temp directory inside PORTAGE_TMPDIR since, unlike
2722 # /tmp, it can't be mounted with the "noexec" option.
2723 base_path_tmp = mkdtemp("", "._portage_reinstall_.",
2724 settings["PORTAGE_TMPDIR"])
2725 from portage.process import atexit_register
2726 atexit_register(shutil.rmtree, base_path_tmp)
2728 for subdir in "bin", "pym":
2729 var_name = "PORTAGE_%s_PATH" % subdir.upper()
2730 var_orig = settings[var_name]
2731 var_new = os.path.join(base_path_tmp, subdir)
2732 settings[var_name] = var_new
2733 settings.backup_changes(var_name)
2734 shutil.copytree(var_orig, var_new, symlinks=True)
2735 os.chmod(var_new, dir_perms)
2736 os.chmod(base_path_tmp, dir_perms)
2737 # This serves so pre-load the modules.
2738 elog_process(self.mycpv, self.settings,
2739 phasefilter=filter_mergephases)
2741 return self._merge(mergeroot, inforoot,
2742 myroot, myebuild=myebuild, cleanup=cleanup,
2743 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2745 def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2746 mydbapi=None, prev_mtimes=None):
2750 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
2751 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2752 # undo registrations of preserved libraries, bug #210501
2753 if retval != os.EX_OK:
2754 self.vartree.dbapi.plib_registry.unregister(self.mycpv, self.settings["SLOT"], self.settings["COUNTER"])
2755 # Process ebuild logfiles
2756 elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
2757 if retval == os.EX_OK and "noclean" not in self.settings.features:
2758 if myebuild is None:
2759 myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2760 doebuild(myebuild, "clean", myroot, self.settings,
2761 tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
2766 def getstring(self,name):
2767 "returns contents of a file with whitespace converted to spaces"
2768 if not os.path.exists(self.dbdir+"/"+name):
2770 myfile = open(self.dbdir+"/"+name,"r")
2771 mydata = myfile.read().split()
2773 return " ".join(mydata)
2775 def copyfile(self,fname):
2777 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
2779 def getfile(self,fname):
2780 if not os.path.exists(self.dbdir+"/"+fname):
2782 myfile = open(self.dbdir+"/"+fname,"r")
2783 mydata = myfile.read()
2787 def setfile(self,fname,data):
2788 write_atomic(os.path.join(self.dbdir, fname), data)
2790 def getelements(self,ename):
2791 if not os.path.exists(self.dbdir+"/"+ename):
2793 myelement = open(self.dbdir+"/"+ename,"r")
2794 mylines = myelement.readlines()
2797 for y in x[:-1].split():
2802 def setelements(self,mylist,ename):
2803 myelement = open(self.dbdir+"/"+ename,"w")
2805 myelement.write(x+"\n")
2808 def isregular(self):
2809 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
2810 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
2812 def tar_contents(contents, root, tar, protect=None, onProgress=None):
2813 from portage.util import normalize_path
2815 root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
2817 maxval = len(contents)
2820 onProgress(maxval, 0)
2821 paths = contents.keys()
2826 lst = os.lstat(path)
2828 if e.errno != errno.ENOENT:
2832 onProgress(maxval, curval)
2834 contents_type = contents[path][0]
2835 if path.startswith(root):
2836 arcname = path[len(root):]
2838 raise ValueError("invalid root argument: '%s'" % root)
2840 if 'dir' == contents_type and \
2841 not stat.S_ISDIR(lst.st_mode) and \
2842 os.path.isdir(live_path):
2843 # Even though this was a directory in the original ${D}, it exists
2844 # as a symlink to a directory in the live filesystem. It must be
2845 # recorded as a real directory in the tar file to ensure that tar
2846 # can properly extract it's children.
2847 live_path = os.path.realpath(live_path)
2848 tarinfo = tar.gettarinfo(live_path, arcname)
2849 # store numbers instead of real names like tar's --numeric-owner
2850 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
2851 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
2853 if stat.S_ISREG(lst.st_mode):
2854 # break hardlinks due to bug #185305
2855 tarinfo.type = tarfile.REGTYPE
2856 if protect and protect(path):
2857 # Create an empty file as a place holder in order to avoid
2858 # potential collision-protect issues.
2860 tar.addfile(tarinfo)
2864 tar.addfile(tarinfo, f)
2868 tar.addfile(tarinfo)
2870 onProgress(maxval, curval)