* To avoid excessive IO on each call to LinkageMap.rebuild(), cache
[portage.git] / pym / portage / dbapi / vartree.py
1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id$
4
5 from portage.checksum import perform_md5
6 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, PORTAGE_BIN_PATH, \
7         PRIVATE_PATH, VDB_PATH
8 from portage.data import portage_gid, portage_uid, secpass
9 from portage.dbapi import dbapi
10 from portage.dep import dep_getslot, use_reduce, paren_reduce, isvalidatom, \
11         isjustname, dep_getkey, match_from_list
12 from portage.exception import InvalidAtom, InvalidData, InvalidPackageName, \
13         FileNotFound, PermissionDenied, UnsupportedAPIException
14 from portage.locks import lockdir, unlockdir
15 from portage.output import bold, red, green
16 from portage.update import fixdbentries
17 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
18         writemsg, writemsg_stdout, write_atomic, atomic_ofstream, writedict, \
19         grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
20 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
21
22 from portage import listdir, dep_expand, flatten, key_expand, \
23         doebuild_environment, doebuild, env_update, prepare_build_dirs, \
24         abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
25
26 from portage.elog import elog_process
27 from portage.elog.messages import ewarn
28 from portage.elog.filtering import filter_mergephases, filter_unmergephases
29
30 import os, re, sys, stat, errno, commands, copy, time, subprocess
31 from itertools import izip
32
33 try:
34         import cPickle
35 except ImportError:
36         import pickle as cPickle
37
38 class PreservedLibsRegistry(object):
39         """ This class handles the tracking of preserved library objects """
40         def __init__(self, filename, autocommit=True):
41                 """ @param filename: absolute path for saving the preserved libs records
42                     @type filename: String
43                         @param autocommit: determines if the file is written after every update
44                         @type autocommit: Boolean
45                 """
46                 self._filename = filename
47                 self._autocommit = autocommit
48                 self.load()
49         
50         def load(self):
51                 """ Reload the registry data from file """
52                 try:
53                         self._data = cPickle.load(open(self._filename, "r"))
54                 except IOError, e:
55                         if e.errno == errno.ENOENT:
56                                 self._data = {}
57                         elif e.errno == PermissionDenied.errno:
58                                 raise PermissionDenied(self._filename)
59                         else:
60                                 raise e
61                 
62         def store(self):
63                 """ Store the registry data to file. No need to call this if autocommit
64                     was enabled.
65                 """
66                 f = atomic_ofstream(self._filename)
67                 cPickle.dump(self._data, f)
68                 f.close()
69
70         def register(self, cpv, slot, counter, paths):
71                 """ Register new objects in the registry. If there is a record with the
72                         same packagename (internally derived from cpv) and slot it is 
73                         overwritten with the new data.
74                         @param cpv: package instance that owns the objects
75                         @type cpv: CPV (as String)
76                         @param slot: the value of SLOT of the given package instance
77                         @type slot: String
78                         @param counter: vdb counter value for the package instace
79                         @type counter: Integer
80                         @param paths: absolute paths of objects that got preserved during an update
81                         @type paths: List
82                 """
83                 cp = "/".join(catpkgsplit(cpv)[:2])
84                 cps = cp+":"+slot
85                 if len(paths) == 0 and self._data.has_key(cps) \
86                                 and self._data[cps][0] == cpv and int(self._data[cps][1]) == int(counter):
87                         del self._data[cps]
88                 elif len(paths) > 0:
89                         self._data[cps] = (cpv, counter, paths)
90                 if self._autocommit:
91                         self.store()
92         
93         def unregister(self, cpv, slot, counter):
94                 """ Remove a previous registration of preserved objects for the given package.
95                         @param cpv: package instance whose records should be removed
96                         @type cpv: CPV (as String)
97                         @param slot: the value of SLOT of the given package instance
98                         @type slot: String
99                 """
100                 self.register(cpv, slot, counter, [])
101         
102         def pruneNonExisting(self):
103                 """ Remove all records for objects that no longer exist on the filesystem. """
104                 for cps in self._data.keys():
105                         cpv, counter, paths = self._data[cps]
106                         paths = [f for f in paths if os.path.exists(f)]
107                         if len(paths) > 0:
108                                 self._data[cps] = (cpv, counter, paths)
109                         else:
110                                 del self._data[cps]
111                 if self._autocommit:
112                         self.store()
113         
114         def hasEntries(self):
115                 """ Check if this registry contains any records. """
116                 return len(self._data) > 0
117         
118         def getPreservedLibs(self):
119                 """ Return a mapping of packages->preserved objects.
120                         @returns mapping of package instances to preserved objects
121                         @rtype Dict cpv->list-of-paths
122                 """
123                 rValue = {}
124                 for cps in self._data:
125                         rValue[self._data[cps][0]] = self._data[cps][2]
126                 return rValue
127
128 class LinkageMap(object):
129         def __init__(self, vardbapi):
130                 self._dbapi = vardbapi
131                 self._libs = {}
132                 self._obj_properties = {}
133                 self._defpath = getlibpaths()
134         
135         def rebuild(self, include_file=None):
136                 libs = {}
137                 obj_properties = {}
138                 lines = []
139                 for cpv in self._dbapi.cpv_all():
140                         lines += self._dbapi.aux_get(cpv, ["NEEDED.ELF.2"])[0].split('\n')
141                 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
142                 self._dbapi.flush_cache()
143                 
144                 if include_file:
145                         lines += grabfile(include_file)
146                 
147                 # have to call scanelf for preserved libs here as they aren't 
148                 # registered in NEEDED.ELF.2 files
149                 if self._dbapi.plib_registry and self._dbapi.plib_registry.getPreservedLibs():
150                         args = ["/usr/bin/scanelf", "-yqF", "%a;%F;%S;%r;%n"]
151                         for items in self._dbapi.plib_registry.getPreservedLibs().values():
152                                 args += items
153                         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
154                         output = [l[3:] for l in proc.communicate()[0].split("\n")]
155                         lines += output
156
157                 for l in lines:
158                         if l.strip() == "":
159                                 continue
160                         fields = l.strip("\n").split(";")
161                         if len(fields) < 5:
162                                 print "Error", fields
163                                 # insufficient field length
164                                 continue
165                         arch = fields[0]
166                         obj = os.path.realpath(fields[1])
167                         soname = fields[2]
168                         path = fields[3].replace("${ORIGIN}", os.path.dirname(obj)).replace("$ORIGIN", os.path.dirname(obj)).split(":")
169                         needed = fields[4].split(",")
170                         if soname:
171                                 libs.setdefault(soname, {arch: {"providers": [], "consumers": []}})
172                                 libs[soname].setdefault(arch, {"providers": [], "consumers": []})
173                                 libs[soname][arch]["providers"].append(obj)
174                         for x in needed:
175                                 libs.setdefault(x, {arch: {"providers": [], "consumers": []}})
176                                 libs[x].setdefault(arch, {"providers": [], "consumers": []})
177                                 libs[x][arch]["consumers"].append(obj)
178                         obj_properties[obj] = (arch, needed, path, soname)
179                 
180                 self._libs = libs
181                 self._obj_properties = obj_properties
182
183         def listLibraryObjects(self):
184                 rValue = []
185                 if not self._libs:
186                         self.rebuild()
187                 for soname in self._libs:
188                         for arch in self._libs[soname]:
189                                 rValue.extend(self._libs[soname][arch]["providers"])
190                 return rValue
191         
192         def findProviders(self, obj):
193                 if not self._libs:
194                         self.rebuild()
195                 rValue = {}
196                 if obj not in self._obj_properties:
197                         obj = os.path.realpath(obj)
198                         if obj not in self._obj_properties:
199                                 raise KeyError("%s not in object list" % obj)
200                 arch, needed, path, soname = self._obj_properties[obj]
201                 path.extend(self._defpath)
202                 path = [os.path.realpath(x) for x in path]
203                 for x in needed:
204                         rValue[x] = set()
205                         if x not in self._libs or arch not in self._libs[x]:
206                                 continue
207                         for y in self._libs[x][arch]["providers"]:
208                                 if x[0] == os.sep and os.path.realpath(x) == os.path.realpath(y):
209                                         rValue[x].add(y)
210                                 elif os.path.realpath(os.path.dirname(y)) in path:
211                                         rValue[x].add(y)
212                 return rValue
213         
214         def findConsumers(self, obj):
215                 if not self._libs:
216                         self.rebuild()
217                 if obj not in self._obj_properties:
218                         obj = os.path.realpath(obj)
219                         if obj not in self._obj_properties:
220                                 raise KeyError("%s not in object list" % obj)
221                 rValue = set()
222                 for soname in self._libs:
223                         for arch in self._libs[soname]:
224                                 if obj in self._libs[soname][arch]["providers"]:
225                                         for x in self._libs[soname][arch]["consumers"]:
226                                                 path = self._obj_properties[x][2]
227                                                 path = [os.path.realpath(y) for y in path+self._defpath]
228                                                 if soname[0] == os.sep and os.path.realpath(soname) == os.path.realpath(obj):
229                                                         rValue.add(x)
230                                                 elif os.path.realpath(os.path.dirname(obj)) in path:
231                                                         rValue.add(x)
232                 return rValue
233                                         
234 class vardbapi(dbapi):
235
236         _excluded_dirs = ["CVS", "lost+found"]
237         _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
238         _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
239                 "|".join(_excluded_dirs) + r')$')
240
241         _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
242         _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
243
244         def __init__(self, root, categories=None, settings=None, vartree=None):
245                 """
246                 The categories parameter is unused since the dbapi class
247                 now has a categories property that is generated from the
248                 available packages.
249                 """
250                 self.root = root[:]
251
252                 #cache for category directory mtimes
253                 self.mtdircache = {}
254
255                 #cache for dependency checks
256                 self.matchcache = {}
257
258                 #cache for cp_list results
259                 self.cpcache = {}
260
261                 self.blockers = None
262                 if settings is None:
263                         from portage import settings
264                 self.settings = settings
265                 if vartree is None:
266                         from portage import db
267                         vartree = db[root]["vartree"]
268                 self.vartree = vartree
269                 self._aux_cache_keys = set(
270                         ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
271                         "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
272                         "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
273                         "repository", "RESTRICT" , "SLOT", "USE"])
274                 self._aux_cache = None
275                 self._aux_cache_version = "1"
276                 self._aux_cache_filename = os.path.join(self.root,
277                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
278                 self._counter_path = os.path.join(root,
279                         CACHE_PATH.lstrip(os.path.sep), "counter")
280
281                 try:
282                         self.plib_registry = PreservedLibsRegistry(
283                                 os.path.join(self.root, PRIVATE_PATH, "preserved_libs_registry"))
284                 except PermissionDenied:
285                         # apparently this user isn't allowed to access PRIVATE_PATH
286                         self.plib_registry = None
287
288                 self.linkmap = LinkageMap(self)
289
290         def getpath(self, mykey, filename=None):
291                 rValue = os.path.join(self.root, VDB_PATH, mykey)
292                 if filename != None:
293                         rValue = os.path.join(rValue, filename)
294                 return rValue
295
296         def cpv_exists(self, mykey):
297                 "Tells us whether an actual ebuild exists on disk (no masking)"
298                 return os.path.exists(self.getpath(mykey))
299
300         def cpv_counter(self, mycpv):
301                 "This method will grab the COUNTER. Returns a counter value."
302                 try:
303                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
304                 except (KeyError, ValueError):
305                         pass
306                 cdir = self.getpath(mycpv)
307                 cpath = self.getpath(mycpv, filename="COUNTER")
308
309                 # We write our new counter value to a new file that gets moved into
310                 # place to avoid filesystem corruption on XFS (unexpected reboot.)
311                 corrupted = 0
312                 if os.path.exists(cpath):
313                         cfile = open(cpath, "r")
314                         try:
315                                 counter = long(cfile.readline())
316                         except ValueError:
317                                 print "portage: COUNTER for", mycpv, "was corrupted; resetting to value of 0"
318                                 counter = long(0)
319                                 corrupted = 1
320                         cfile.close()
321                 elif os.path.exists(cdir):
322                         mys = pkgsplit(mycpv)
323                         myl = self.match(mys[0], use_cache=0)
324                         print mys, myl
325                         if len(myl) == 1:
326                                 try:
327                                         # Only one package... Counter doesn't matter.
328                                         write_atomic(cpath, "1")
329                                         counter = 1
330                                 except SystemExit, e:
331                                         raise
332                                 except Exception, e:
333                                         writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
334                                                 noiselevel=-1)
335                                         writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
336                                                 noiselevel=-1)
337                                         writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
338                                         writemsg("!!! %s\n" % e, noiselevel=-1)
339                                         sys.exit(1)
340                         else:
341                                 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
342                                         noiselevel=-1)
343                                 writemsg("!!! Please run %s/fix-db.py or\n" % PORTAGE_BIN_PATH,
344                                         noiselevel=-1)
345                                 writemsg("!!! remerge the package.\n", noiselevel=-1)
346                                 sys.exit(1)
347                 else:
348                         counter = long(0)
349                 if corrupted:
350                         # update new global counter file
351                         write_atomic(cpath, str(counter))
352                 return counter
353
354         def cpv_inject(self, mycpv):
355                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
356                 os.makedirs(self.getpath(mycpv))
357                 counter = self.counter_tick(self.root, mycpv=mycpv)
358                 # write local package counter so that emerge clean does the right thing
359                 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
360
361         def isInjected(self, mycpv):
362                 if self.cpv_exists(mycpv):
363                         if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
364                                 return True
365                         if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
366                                 return True
367                 return False
368
369         def move_ent(self, mylist):
370                 origcp = mylist[1]
371                 newcp = mylist[2]
372
373                 # sanity check
374                 for cp in [origcp, newcp]:
375                         if not (isvalidatom(cp) and isjustname(cp)):
376                                 raise InvalidPackageName(cp)
377                 origmatches = self.match(origcp, use_cache=0)
378                 moves = 0
379                 if not origmatches:
380                         return moves
381                 for mycpv in origmatches:
382                         mycpsplit = catpkgsplit(mycpv)
383                         mynewcpv = newcp + "-" + mycpsplit[2]
384                         mynewcat = newcp.split("/")[0]
385                         if mycpsplit[3] != "r0":
386                                 mynewcpv += "-" + mycpsplit[3]
387                         mycpsplit_new = catpkgsplit(mynewcpv)
388                         origpath = self.getpath(mycpv)
389                         if not os.path.exists(origpath):
390                                 continue
391                         moves += 1
392                         if not os.path.exists(self.getpath(mynewcat)):
393                                 #create the directory
394                                 os.makedirs(self.getpath(mynewcat))
395                         newpath = self.getpath(mynewcpv)
396                         if os.path.exists(newpath):
397                                 #dest already exists; keep this puppy where it is.
398                                 continue
399                         _movefile(origpath, newpath, mysettings=self.settings)
400
401                         # We need to rename the ebuild now.
402                         old_pf = catsplit(mycpv)[1]
403                         new_pf = catsplit(mynewcpv)[1]
404                         if new_pf != old_pf:
405                                 try:
406                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
407                                                 os.path.join(newpath, new_pf + ".ebuild"))
408                                 except EnvironmentError, e:
409                                         if e.errno != errno.ENOENT:
410                                                 raise
411                                         del e
412                         write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
413                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
414                         fixdbentries([mylist], newpath)
415                 return moves
416
417         def cp_list(self, mycp, use_cache=1):
418                 mysplit=catsplit(mycp)
419                 if mysplit[0] == '*':
420                         mysplit[0] = mysplit[0][1:]
421                 try:
422                         mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
423                 except OSError:
424                         mystat = 0
425                 if use_cache and self.cpcache.has_key(mycp):
426                         cpc = self.cpcache[mycp]
427                         if cpc[0] == mystat:
428                                 return cpc[1][:]
429                 cat_dir = self.getpath(mysplit[0])
430                 try:
431                         dir_list = os.listdir(cat_dir)
432                 except EnvironmentError, e:
433                         from portage.exception import PermissionDenied
434                         if e.errno == PermissionDenied.errno:
435                                 raise PermissionDenied(cat_dir)
436                         del e
437                         dir_list = []
438
439                 returnme = []
440                 for x in dir_list:
441                         if self._excluded_dirs.match(x) is not None:
442                                 continue
443                         ps = pkgsplit(x)
444                         if not ps:
445                                 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
446                                 continue
447                         if len(mysplit) > 1:
448                                 if ps[0] == mysplit[1]:
449                                         returnme.append(mysplit[0]+"/"+x)
450                 self._cpv_sort_ascending(returnme)
451                 if use_cache:
452                         self.cpcache[mycp] = [mystat, returnme[:]]
453                 elif self.cpcache.has_key(mycp):
454                         del self.cpcache[mycp]
455                 return returnme
456
457         def cpv_all(self, use_cache=1):
458                 returnme = []
459                 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
460                 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
461                         if self._excluded_dirs.match(x) is not None:
462                                 continue
463                         if not self._category_re.match(x):
464                                 continue
465                         for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
466                                 if self._excluded_dirs.match(y) is not None:
467                                         continue
468                                 subpath = x + "/" + y
469                                 # -MERGING- should never be a cpv, nor should files.
470                                 try:
471                                         if catpkgsplit(subpath) is None:
472                                                 self.invalidentry(os.path.join(self.root, subpath))
473                                                 continue
474                                 except portage.exception.InvalidData:
475                                         self.invalidentry(os.path.join(self.root, subpath))
476                                         continue
477                                 returnme.append(subpath)
478                 return returnme
479
480         def cp_all(self, use_cache=1):
481                 mylist = self.cpv_all(use_cache=use_cache)
482                 d={}
483                 for y in mylist:
484                         if y[0] == '*':
485                                 y = y[1:]
486                         try:
487                                 mysplit = catpkgsplit(y)
488                         except portage.exception.InvalidData:
489                                 self.invalidentry(self.getpath(y))
490                                 continue
491                         if not mysplit:
492                                 self.invalidentry(self.getpath(y))
493                                 continue
494                         d[mysplit[0]+"/"+mysplit[1]] = None
495                 return d.keys()
496
497         def checkblockers(self, origdep):
498                 pass
499
500         def match(self, origdep, use_cache=1):
501                 "caching match function"
502                 mydep = dep_expand(
503                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
504                 mykey = dep_getkey(mydep)
505                 mycat = catsplit(mykey)[0]
506                 if not use_cache:
507                         if self.matchcache.has_key(mycat):
508                                 del self.mtdircache[mycat]
509                                 del self.matchcache[mycat]
510                         mymatch = match_from_list(mydep,
511                                 self.cp_list(mykey, use_cache=use_cache))
512                         myslot = dep_getslot(mydep)
513                         if myslot is not None:
514                                 mymatch = [cpv for cpv in mymatch \
515                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
516                         return mymatch
517                 try:
518                         curmtime = os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
519                 except (IOError, OSError):
520                         curmtime=0
521
522                 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
523                         # clear cache entry
524                         self.mtdircache[mycat] = curmtime
525                         self.matchcache[mycat] = {}
526                 if not self.matchcache[mycat].has_key(mydep):
527                         mymatch = match_from_list(mydep, self.cp_list(mykey, use_cache=use_cache))
528                         myslot = dep_getslot(mydep)
529                         if myslot is not None:
530                                 mymatch = [cpv for cpv in mymatch \
531                                         if self.aux_get(cpv, ["SLOT"])[0] == myslot]
532                         self.matchcache[mycat][mydep] = mymatch
533                 return self.matchcache[mycat][mydep][:]
534
535         def findname(self, mycpv):
536                 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
537
538         def flush_cache(self):
539                 """If the current user has permission and the internal aux_get cache has
540                 been updated, save it to disk and mark it unmodified.  This is called
541                 by emerge after it has loaded the full vdb for use in dependency
542                 calculations.  Currently, the cache is only written if the user has
543                 superuser privileges (since that's required to obtain a lock), but all
544                 users have read access and benefit from faster metadata lookups (as
545                 long as at least part of the cache is still valid)."""
546                 if self._aux_cache is not None and \
547                         self._aux_cache["modified"] and \
548                         secpass >= 2:
549                         valid_nodes = set(self.cpv_all())
550                         for cpv in self._aux_cache["packages"].keys():
551                                 if cpv not in valid_nodes:
552                                         del self._aux_cache["packages"][cpv]
553                         del self._aux_cache["modified"]
554                         try:
555                                 f = atomic_ofstream(self._aux_cache_filename)
556                                 cPickle.dump(self._aux_cache, f, -1)
557                                 f.close()
558                                 apply_secpass_permissions(
559                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
560                         except (IOError, OSError), e:
561                                 pass
562                         self._aux_cache["modified"] = False
563
564         def aux_get(self, mycpv, wants):
565                 """This automatically caches selected keys that are frequently needed
566                 by emerge for dependency calculations.  The cached metadata is
567                 considered valid if the mtime of the package directory has not changed
568                 since the data was cached.  The cache is stored in a pickled dict
569                 object with the following format:
570
571                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
572
573                 If an error occurs while loading the cache pickle or the version is
574                 unrecognized, the cache will simple be recreated from scratch (it is
575                 completely disposable).
576                 """
577                 cache_these_wants = self._aux_cache_keys.intersection(wants)
578                 for x in wants:
579                         if self._aux_cache_keys_re.match(x) is not None:
580                                 cache_these_wants.add(x)
581
582                 if not cache_these_wants:
583                         return self._aux_get(mycpv, wants)
584
585                 cache_these = set(self._aux_cache_keys)
586                 cache_these.update(cache_these_wants)
587
588                 if self._aux_cache is None:
589                         try:
590                                 f = open(self._aux_cache_filename)
591                                 mypickle = cPickle.Unpickler(f)
592                                 mypickle.find_global = None
593                                 self._aux_cache = mypickle.load()
594                                 f.close()
595                                 del f
596                         except (IOError, OSError, EOFError, cPickle.UnpicklingError):
597                                 pass
598                         if not self._aux_cache or \
599                                 not isinstance(self._aux_cache, dict) or \
600                                 self._aux_cache.get("version") != self._aux_cache_version or \
601                                 not self._aux_cache.get("packages"):
602                                 self._aux_cache = {"version": self._aux_cache_version}
603                                 self._aux_cache["packages"] = {}
604                         self._aux_cache["modified"] = False
605                 mydir = self.getpath(mycpv)
606                 mydir_stat = None
607                 try:
608                         mydir_stat = os.stat(mydir)
609                 except OSError, e:
610                         if e.errno != errno.ENOENT:
611                                 raise
612                         raise KeyError(mycpv)
613                 mydir_mtime = long(mydir_stat.st_mtime)
614                 pkg_data = self._aux_cache["packages"].get(mycpv)
615                 mydata = {}
616                 cache_valid = False
617                 cache_incomplete = False
618                 if pkg_data:
619                         cache_mtime, metadata = pkg_data
620                         cache_valid = cache_mtime == mydir_mtime
621                 if cache_valid:
622                         cache_incomplete = cache_these.difference(metadata)
623                         if cache_incomplete:
624                                 # Allow self._aux_cache_keys to change without a cache version
625                                 # bump and efficiently recycle partial cache whenever possible.
626                                 cache_valid = False
627                                 pull_me = cache_incomplete.union(wants)
628                         else:
629                                 pull_me = set(wants).difference(cache_these)
630                         mydata.update(metadata)
631                 else:
632                         pull_me = cache_these
633
634                 if pull_me:
635                         # pull any needed data and cache it
636                         aux_keys = list(pull_me)
637                         for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
638                                 mydata[k] = v
639                         if not cache_valid or cache_incomplete:
640                                 cache_data = {}
641                                 if cache_incomplete:
642                                         cache_data.update(metadata)
643                                 for aux_key in cache_these:
644                                         cache_data[aux_key] = mydata[aux_key]
645                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
646                                 self._aux_cache["modified"] = True
647                 return [mydata[x] for x in wants]
648
649         def _aux_get(self, mycpv, wants):
650                 mydir = self.getpath(mycpv)
651                 try:
652                         if not stat.S_ISDIR(os.stat(mydir).st_mode):
653                                 raise KeyError(mycpv)
654                 except OSError, e:
655                         if e.errno == errno.ENOENT:
656                                 raise KeyError(mycpv)
657                         del e
658                         raise
659                 results = []
660                 for x in wants:
661                         try:
662                                 myf = open(os.path.join(mydir, x), "r")
663                                 try:
664                                         myd = myf.read()
665                                 finally:
666                                         myf.close()
667                                 # Preserve \n for metadata that is known to
668                                 # contain multiple lines.
669                                 if self._aux_multi_line_re.match(x) is None:
670                                         myd = " ".join(myd.split())
671                         except IOError:
672                                 myd = ""
673                         if x == "EAPI" and not myd:
674                                 results.append("0")
675                         else:
676                                 results.append(myd)
677                 return results
678
679         def aux_update(self, cpv, values):
680                 cat, pkg = catsplit(cpv)
681                 mylink = dblink(cat, pkg, self.root, self.settings,
682                 treetype="vartree", vartree=self.vartree)
683                 if not mylink.exists():
684                         raise KeyError(cpv)
685                 for k, v in values.iteritems():
686                         if v:
687                                 mylink.setfile(k, v)
688                         else:
689                                 try:
690                                         os.unlink(os.path.join(self.getpath(cpv), k))
691                                 except EnvironmentError:
692                                         pass
693
694         def counter_tick(self, myroot, mycpv=None):
695                 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
696
697         def get_counter_tick_core(self, myroot, mycpv=None):
698                 """
699                 Use this method to retrieve the counter instead
700                 of having to trust the value of a global counter
701                 file that can lead to invalid COUNTER
702                 generation. When cache is valid, the package COUNTER
703                 files are not read and we rely on the timestamp of
704                 the package directory to validate cache. The stat
705                 calls should only take a short time, so performance
706                 is sufficient without having to rely on a potentially
707                 corrupt global counter file.
708
709                 The global counter file located at
710                 $CACHE_PATH/counter serves to record the
711                 counter of the last installed package and
712                 it also corresponds to the total number of
713                 installation actions that have occurred in
714                 the history of this package database.
715                 """
716                 cp_list = self.cp_list
717                 max_counter = 0
718                 for cp in self.cp_all():
719                         for cpv in cp_list(cp):
720                                 try:
721                                         counter = int(self.aux_get(cpv, ["COUNTER"])[0])
722                                 except (KeyError, OverflowError, ValueError):
723                                         continue
724                                 if counter > max_counter:
725                                         max_counter = counter
726
727                 counter = -1
728                 try:
729                         cfile = open(self._counter_path, "r")
730                 except EnvironmentError, e:
731                         writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
732                                 self._counter_path, noiselevel=-1)
733                         writemsg("!!! %s\n" % str(e), noiselevel=-1)
734                         del e
735                 else:
736                         try:
737                                 try:
738                                         counter = long(cfile.readline().strip())
739                                 finally:
740                                         cfile.close()
741                         except (OverflowError, ValueError), e:
742                                 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
743                                         self._counter_path, noiselevel=-1)
744                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
745                                 del e
746
747                 # We must ensure that we return a counter
748                 # value that is at least as large as the
749                 # highest one from the installed packages,
750                 # since having a corrupt value that is too low
751                 # can trigger incorrect AUTOCLEAN behavior due
752                 # to newly installed packages having lower
753                 # COUNTERs than the previous version in the
754                 # same slot.
755                 if counter > max_counter:
756                         max_counter = counter
757
758                 if counter < 0:
759                         writemsg("!!! Initializing COUNTER to " + \
760                                 "value of %d\n" % max_counter, noiselevel=-1)
761
762                 return max_counter + 1
763
764         def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
765                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
766                 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
767                 if incrementing:
768                         #increment counter
769                         counter += 1
770                         # update new global counter file
771                         write_atomic(self._counter_path, str(counter))
772                 return counter
773
774 class vartree(object):
775         "this tree will scan a var/db/pkg database located at root (passed to init)"
776         def __init__(self, root="/", virtual=None, clone=None, categories=None,
777                 settings=None):
778                 if clone:
779                         writemsg("vartree.__init__(): deprecated " + \
780                                 "use of clone parameter\n", noiselevel=-1)
781                         self.root = clone.root[:]
782                         self.dbapi = copy.deepcopy(clone.dbapi)
783                         self.populated = 1
784                         from portage import config
785                         self.settings = config(clone=clone.settings)
786                 else:
787                         self.root = root[:]
788                         if settings is None:
789                                 from portage import settings
790                         self.settings = settings # for key_expand calls
791                         if categories is None:
792                                 categories = settings.categories
793                         self.dbapi = vardbapi(self.root, categories=categories,
794                                 settings=settings, vartree=self)
795                         self.populated = 1
796
797         def getpath(self, mykey, filename=None):
798                 return self.dbapi.getpath(mykey, filename=filename)
799
800         def zap(self, mycpv):
801                 return
802
803         def inject(self, mycpv):
804                 return
805
806         def get_provide(self, mycpv):
807                 myprovides = []
808                 mylines = None
809                 try:
810                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
811                         if mylines:
812                                 myuse = myuse.split()
813                                 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
814                                 for myprovide in mylines:
815                                         mys = catpkgsplit(myprovide)
816                                         if not mys:
817                                                 mys = myprovide.split("/")
818                                         myprovides += [mys[0] + "/" + mys[1]]
819                         return myprovides
820                 except SystemExit, e:
821                         raise
822                 except Exception, e:
823                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
824                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
825                                 noiselevel=-1)
826                         if mylines:
827                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
828                                         noiselevel=-1)
829                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
830                         return []
831
832         def get_all_provides(self):
833                 myprovides = {}
834                 for node in self.getallcpv():
835                         for mykey in self.get_provide(node):
836                                 if myprovides.has_key(mykey):
837                                         myprovides[mykey] += [node]
838                                 else:
839                                         myprovides[mykey] = [node]
840                 return myprovides
841
842         def dep_bestmatch(self, mydep, use_cache=1):
843                 "compatibility method -- all matches, not just visible ones"
844                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
845                 mymatch = best(self.dbapi.match(
846                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
847                         use_cache=use_cache))
848                 if mymatch is None:
849                         return ""
850                 else:
851                         return mymatch
852
853         def dep_match(self, mydep, use_cache=1):
854                 "compatibility method -- we want to see all matches, not just visible ones"
855                 #mymatch = match(mydep,self.dbapi)
856                 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
857                 if mymatch is None:
858                         return []
859                 else:
860                         return mymatch
861
862         def exists_specific(self, cpv):
863                 return self.dbapi.cpv_exists(cpv)
864
865         def getallcpv(self):
866                 """temporary function, probably to be renamed --- Gets a list of all
867                 category/package-versions installed on the system."""
868                 return self.dbapi.cpv_all()
869
870         def getallnodes(self):
871                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
872                 masked package for nodes in this nodes list."""
873                 return self.dbapi.cp_all()
874
875         def exists_specific_cat(self, cpv, use_cache=1):
876                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
877                         settings=self.settings)
878                 a = catpkgsplit(cpv)
879                 if not a:
880                         return 0
881                 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
882                 for x in mylist:
883                         b = pkgsplit(x)
884                         if not b:
885                                 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
886                                 continue
887                         if a[1] == b[0]:
888                                 return 1
889                 return 0
890
891         def getebuildpath(self, fullpackage):
892                 cat, package = catsplit(fullpackage)
893                 return self.getpath(fullpackage, filename=package+".ebuild")
894
895         def getnode(self, mykey, use_cache=1):
896                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
897                         settings=self.settings)
898                 if not mykey:
899                         return []
900                 mysplit = catsplit(mykey)
901                 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
902                 returnme = []
903                 for x in mydirlist:
904                         mypsplit = pkgsplit(x)
905                         if not mypsplit:
906                                 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
907                                 continue
908                         if mypsplit[0] == mysplit[1]:
909                                 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
910                                 returnme.append(appendme)
911                 return returnme
912
913
914         def getslot(self, mycatpkg):
915                 "Get a slot for a catpkg; assume it exists."
916                 try:
917                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
918                 except KeyError:
919                         return ""
920
921         def hasnode(self, mykey, use_cache):
922                 """Does the particular node (cat/pkg key) exist?"""
923                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
924                         settings=self.settings)
925                 mysplit = catsplit(mykey)
926                 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
927                 for x in mydirlist:
928                         mypsplit = pkgsplit(x)
929                         if not mypsplit:
930                                 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
931                                 continue
932                         if mypsplit[0] == mysplit[1]:
933                                 return 1
934                 return 0
935
936         def populate(self):
937                 self.populated=1
938
939 class dblink(object):
940         """
941         This class provides an interface to the installed package database
942         At present this is implemented as a text backend in /var/db/pkg.
943         """
944
945         import re
946         _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
947         _contents_split_counts = {
948                 "dev": 2,
949                 "dir": 2,
950                 "fif": 2,
951                 "obj": 4,
952                 "sym": 5
953         }
954
955         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
956                 vartree=None, blockers=None):
957                 """
958                 Creates a DBlink object for a given CPV.
959                 The given CPV may not be present in the database already.
960                 
961                 @param cat: Category
962                 @type cat: String
963                 @param pkg: Package (PV)
964                 @type pkg: String
965                 @param myroot: Typically ${ROOT}
966                 @type myroot: String (Path)
967                 @param mysettings: Typically portage.config
968                 @type mysettings: An instance of portage.config
969                 @param treetype: one of ['porttree','bintree','vartree']
970                 @type treetype: String
971                 @param vartree: an instance of vartree corresponding to myroot.
972                 @type vartree: vartree
973                 """
974                 
975                 self.cat = cat
976                 self.pkg = pkg
977                 self.mycpv = self.cat + "/" + self.pkg
978                 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
979                 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
980                 self.treetype = treetype
981                 if vartree is None:
982                         from portage import db
983                         vartree = db[myroot]["vartree"]
984                 self.vartree = vartree
985                 self._blockers = blockers
986
987                 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
988                 self.dbcatdir = self.dbroot+"/"+cat
989                 self.dbpkgdir = self.dbcatdir+"/"+pkg
990                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
991                 self.dbdir = self.dbpkgdir
992
993                 self._lock_vdb = None
994
995                 self.settings = mysettings
996                 if self.settings == 1:
997                         raise ValueError
998
999                 self.myroot=myroot
1000                 protect_obj = ConfigProtect(myroot,
1001                         mysettings.get("CONFIG_PROTECT","").split(),
1002                         mysettings.get("CONFIG_PROTECT_MASK","").split())
1003                 self.updateprotect = protect_obj.updateprotect
1004                 self.isprotected = protect_obj.isprotected
1005                 self._installed_instance = None
1006                 self.contentscache = None
1007                 self._contents_inodes = None
1008                 self._contents_basenames = None
1009
1010         def lockdb(self):
1011                 if self._lock_vdb:
1012                         raise AssertionError("Lock already held.")
1013                 # At least the parent needs to exist for the lock file.
1014                 ensure_dirs(self.dbroot)
1015                 self._lock_vdb = lockdir(self.dbroot)
1016
1017         def unlockdb(self):
1018                 if self._lock_vdb:
1019                         unlockdir(self._lock_vdb)
1020                         self._lock_vdb = None
1021
1022         def getpath(self):
1023                 "return path to location of db information (for >>> informational display)"
1024                 return self.dbdir
1025
1026         def exists(self):
1027                 "does the db entry exist?  boolean."
1028                 return os.path.exists(self.dbdir)
1029
1030         def delete(self):
1031                 """
1032                 Remove this entry from the database
1033                 """
1034                 if not os.path.exists(self.dbdir):
1035                         return
1036                 try:
1037                         for x in os.listdir(self.dbdir):
1038                                 os.unlink(self.dbdir+"/"+x)
1039                         os.rmdir(self.dbdir)
1040                 except OSError, e:
1041                         print "!!! Unable to remove db entry for this package."
1042                         print "!!! It is possible that a directory is in this one. Portage will still"
1043                         print "!!! register this package as installed as long as this directory exists."
1044                         print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
1045                         print "!!! "+str(e)
1046                         print
1047                         sys.exit(1)
1048
1049                 # Due to mtime granularity, mtime checks do not always properly
1050                 # invalidate vardbapi caches.
1051                 self.vartree.dbapi.mtdircache.pop(self.cat, None)
1052                 self.vartree.dbapi.matchcache.pop(self.cat, None)
1053                 self.vartree.dbapi.cpcache.pop(self.mysplit[0], None)
1054
1055         def clearcontents(self):
1056                 """
1057                 For a given db entry (self), erase the CONTENTS values.
1058                 """
1059                 if os.path.exists(self.dbdir+"/CONTENTS"):
1060                         os.unlink(self.dbdir+"/CONTENTS")
1061
1062         def _clear_contents_cache(self):
1063                 self.contentscache = None
1064                 self._contents_inodes = None
1065                 self._contents_basenames = None
1066
1067         def getcontents(self):
1068                 """
1069                 Get the installed files of a given package (aka what that package installed)
1070                 """
1071                 contents_file = os.path.join(self.dbdir, "CONTENTS")
1072                 if self.contentscache is not None:
1073                         return self.contentscache
1074                 pkgfiles = {}
1075                 try:
1076                         myc = open(contents_file,"r")
1077                 except EnvironmentError, e:
1078                         if e.errno != errno.ENOENT:
1079                                 raise
1080                         del e
1081                         self.contentscache = pkgfiles
1082                         return pkgfiles
1083                 mylines = myc.readlines()
1084                 myc.close()
1085                 null_byte = "\0"
1086                 normalize_needed = self._normalize_needed
1087                 contents_split_counts = self._contents_split_counts
1088                 myroot = self.myroot
1089                 if myroot == os.path.sep:
1090                         myroot = None
1091                 pos = 0
1092                 errors = []
1093                 for pos, line in enumerate(mylines):
1094                         if null_byte in line:
1095                                 # Null bytes are a common indication of corruption.
1096                                 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1097                                 continue
1098                         line = line.rstrip("\n")
1099                         # Split on " " so that even file paths that
1100                         # end with spaces can be handled.
1101                         mydat = line.split(" ")
1102                         entry_type = mydat[0] # empty string if line is empty
1103                         correct_split_count = contents_split_counts.get(entry_type)
1104                         if correct_split_count and len(mydat) > correct_split_count:
1105                                 # Apparently file paths contain spaces, so reassemble
1106                                 # the split have the correct_split_count.
1107                                 newsplit = [entry_type]
1108                                 spaces_total = len(mydat) - correct_split_count
1109                                 if entry_type == "sym":
1110                                         try:
1111                                                 splitter = mydat.index("->", 2, len(mydat) - 2)
1112                                         except ValueError:
1113                                                 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1114                                                 continue
1115                                         spaces_in_path = splitter - 2
1116                                         spaces_in_target = spaces_total - spaces_in_path
1117                                         newsplit.append(" ".join(mydat[1:splitter]))
1118                                         newsplit.append("->")
1119                                         target_end = splitter + spaces_in_target + 2
1120                                         newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1121                                         newsplit.extend(mydat[target_end:])
1122                                 else:
1123                                         path_end = spaces_total + 2
1124                                         newsplit.append(" ".join(mydat[1:path_end]))
1125                                         newsplit.extend(mydat[path_end:])
1126                                 mydat = newsplit
1127
1128                         # we do this so we can remove from non-root filesystems
1129                         # (use the ROOT var to allow maintenance on other partitions)
1130                         try:
1131                                 if normalize_needed.match(mydat[1]):
1132                                         mydat[1] = normalize_path(mydat[1])
1133                                         if not mydat[1].startswith(os.path.sep):
1134                                                 mydat[1] = os.path.sep + mydat[1]
1135                                 if myroot:
1136                                         mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1137                                 if mydat[0] == "obj":
1138                                         #format: type, mtime, md5sum
1139                                         pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1140                                 elif mydat[0] == "dir":
1141                                         #format: type
1142                                         pkgfiles[mydat[1]] = [mydat[0]]
1143                                 elif mydat[0] == "sym":
1144                                         #format: type, mtime, dest
1145                                         pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1146                                 elif mydat[0] == "dev":
1147                                         #format: type
1148                                         pkgfiles[mydat[1]] = [mydat[0]]
1149                                 elif mydat[0]=="fif":
1150                                         #format: type
1151                                         pkgfiles[mydat[1]] = [mydat[0]]
1152                                 else:
1153                                         errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1154                         except (KeyError, IndexError):
1155                                 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1156                 if errors:
1157                         writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1158                         for pos, e in errors:
1159                                 writemsg("!!!   line %d: %s\n" % (pos, e), noiselevel=-1)
1160                 self.contentscache = pkgfiles
1161                 return pkgfiles
1162
1163         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1164                 ldpath_mtimes=None, others_in_slot=None):
1165                 """
1166                 Calls prerm
1167                 Unmerges a given package (CPV)
1168                 calls postrm
1169                 calls cleanrm
1170                 calls env_update
1171                 
1172                 @param pkgfiles: files to unmerge (generally self.getcontents() )
1173                 @type pkgfiles: Dictionary
1174                 @param trimworld: Remove CPV from world file if True, not if False
1175                 @type trimworld: Boolean
1176                 @param cleanup: cleanup to pass to doebuild (see doebuild)
1177                 @type cleanup: Boolean
1178                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1179                 @type ldpath_mtimes: Dictionary
1180                 @param others_in_slot: all dblink instances in this slot, excluding self
1181                 @type others_in_slot: list
1182                 @rtype: Integer
1183                 @returns:
1184                 1. os.EX_OK if everything went well.
1185                 2. return code of the failed phase (for prerm, postrm, cleanrm)
1186                 
1187                 Notes:
1188                 The caller must ensure that lockdb() and unlockdb() are called
1189                 before and after this method.
1190                 """
1191                 if self.vartree.dbapi._categories is not None:
1192                         self.vartree.dbapi._categories = None
1193                 # When others_in_slot is supplied, the security check has already been
1194                 # done for this slot, so it shouldn't be repeated until the next
1195                 # replacement or unmerge operation.
1196                 if others_in_slot is None:
1197                         slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1198                         slot_matches = self.vartree.dbapi.match(
1199                                 "%s:%s" % (dep_getkey(self.mycpv), slot))
1200                         others_in_slot = []
1201                         for cur_cpv in slot_matches:
1202                                 if cur_cpv == self.mycpv:
1203                                         continue
1204                                 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1205                                         self.vartree.root, self.settings, vartree=self.vartree))
1206                         retval = self._security_check([self] + others_in_slot)
1207                         if retval:
1208                                 return retval
1209
1210                 contents = self.getcontents()
1211                 # Now, don't assume that the name of the ebuild is the same as the
1212                 # name of the dir; the package may have been moved.
1213                 myebuildpath = None
1214                 ebuild_phase = "prerm"
1215                 mystuff = listdir(self.dbdir, EmptyOnError=1)
1216                 for x in mystuff:
1217                         if x.endswith(".ebuild"):
1218                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1219                                 if x[:-7] != self.pkg:
1220                                         # Clean up after vardbapi.move_ent() breakage in
1221                                         # portage versions before 2.1.2
1222                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
1223                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1224                                 break
1225
1226                 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1227                 if myebuildpath:
1228                         try:
1229                                 doebuild_environment(myebuildpath, "prerm", self.myroot,
1230                                         self.settings, 0, 0, self.vartree.dbapi)
1231                         except UnsupportedAPIException, e:
1232                                 # Sometimes this happens due to corruption of the EAPI file.
1233                                 writemsg("!!! FAILED prerm: %s\n" % \
1234                                         os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1235                                 writemsg("%s\n" % str(e), noiselevel=-1)
1236                                 return 1
1237                         catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1238                         ensure_dirs(os.path.dirname(catdir),
1239                                 uid=portage_uid, gid=portage_gid, mode=070, mask=0)
1240                 builddir_lock = None
1241                 catdir_lock = None
1242                 retval = -1
1243                 try:
1244                         if myebuildpath:
1245                                 catdir_lock = lockdir(catdir)
1246                                 ensure_dirs(catdir,
1247                                         uid=portage_uid, gid=portage_gid,
1248                                         mode=070, mask=0)
1249                                 builddir_lock = lockdir(
1250                                         self.settings["PORTAGE_BUILDDIR"])
1251                                 try:
1252                                         unlockdir(catdir_lock)
1253                                 finally:
1254                                         catdir_lock = None
1255                                 # Eventually, we'd like to pass in the saved ebuild env here...
1256                                 retval = doebuild(myebuildpath, "prerm", self.myroot,
1257                                         self.settings, cleanup=cleanup, use_cache=0,
1258                                         mydbapi=self.vartree.dbapi, tree="vartree",
1259                                         vartree=self.vartree)
1260                                 # XXX: Decide how to handle failures here.
1261                                 if retval != os.EX_OK:
1262                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1263                                         return retval
1264
1265                         self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1266                         
1267                         # Remove the registration of preserved libs for this pkg instance
1268                         plib_registry = self.vartree.dbapi.plib_registry
1269                         plib_registry.unregister(self.mycpv, self.settings["SLOT"],
1270                                 self.vartree.dbapi.cpv_counter(self.mycpv))
1271
1272                         if myebuildpath:
1273                                 ebuild_phase = "postrm"
1274                                 retval = doebuild(myebuildpath, "postrm", self.myroot,
1275                                          self.settings, use_cache=0, tree="vartree",
1276                                          mydbapi=self.vartree.dbapi, vartree=self.vartree)
1277
1278                                 # XXX: Decide how to handle failures here.
1279                                 if retval != os.EX_OK:
1280                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1281                                         return retval
1282
1283                         # regenerate reverse NEEDED map
1284                         self.vartree.dbapi.linkmap.rebuild()
1285
1286                         # remove preserved libraries that don't have any consumers left
1287                         # FIXME: this code is quite ugly and can likely be optimized in several ways
1288                         plib_dict = plib_registry.getPreservedLibs()
1289                         for cpv in plib_dict:
1290                                 plib_dict[cpv].sort()
1291                                 # for the loop below to work correctly, we need all
1292                                 # symlinks to come before the actual files, such that
1293                                 # the recorded symlinks (sonames) will be resolved into
1294                                 # their real target before the object is found not to be
1295                                 # in the reverse NEEDED map
1296                                 def symlink_compare(x, y):
1297                                         if os.path.islink(x):
1298                                                 if os.path.islink(y):
1299                                                         return 0
1300                                                 else:
1301                                                         return -1
1302                                         elif os.path.islink(y):
1303                                                 return 1
1304                                         else:
1305                                                 return 0
1306
1307                                 plib_dict[cpv].sort(symlink_compare)
1308                                 for f in plib_dict[cpv]:
1309                                         if not os.path.exists(f):
1310                                                 continue
1311                                         unlink_list = []
1312                                         consumers = self.vartree.dbapi.linkmap.findConsumers(f)
1313                                         if not consumers:
1314                                                 unlink_list.append(f)
1315                                         else:
1316                                                 keep=False
1317                                                 for c in consumers:
1318                                                         if c not in self.getcontents():
1319                                                                 keep=True
1320                                                                 break
1321                                                 if not keep:
1322                                                         unlink_list.append(f)
1323                                         for obj in unlink_list:
1324                                                 try:
1325                                                         if os.path.islink(obj):
1326                                                                 obj_type = "sym"
1327                                                         else:
1328                                                                 obj_type = "obj"
1329                                                         os.unlink(obj)
1330                                                         writemsg_stdout("<<< !needed   %s %s\n" % (obj_type, obj))
1331                                                 except OSError, e:
1332                                                         if e.errno == errno.ENOENT:
1333                                                                 pass
1334                                                         else:
1335                                                                 raise e
1336                         plib_registry.pruneNonExisting()
1337                                                 
1338                 finally:
1339                         if builddir_lock:
1340                                 try:
1341                                         if myebuildpath:
1342                                                 if retval != os.EX_OK:
1343                                                         msg_lines = []
1344                                                         msg = ("The '%s' " % ebuild_phase) + \
1345                                                         ("phase of the '%s' package " % self.mycpv) + \
1346                                                         ("has failed with exit value %s." % retval)
1347                                                         from textwrap import wrap
1348                                                         msg_lines.extend(wrap(msg, 72))
1349                                                         msg_lines.append("")
1350
1351                                                         ebuild_name = os.path.basename(myebuildpath)
1352                                                         ebuild_dir = os.path.dirname(myebuildpath)
1353                                                         msg = "The problem occurred while executing " + \
1354                                                         ("the ebuild file named '%s' " % ebuild_name) + \
1355                                                         ("located in the '%s' directory. " \
1356                                                         % ebuild_dir) + \
1357                                                         "If necessary, manually remove " + \
1358                                                         "the environment.bz2 file and/or the " + \
1359                                                         "ebuild file located in that directory."
1360                                                         msg_lines.extend(wrap(msg, 72))
1361                                                         msg_lines.append("")
1362
1363                                                         msg = "Removal " + \
1364                                                         "of the environment.bz2 file is " + \
1365                                                         "preferred since it may allow the " + \
1366                                                         "removal phases to execute successfully. " + \
1367                                                         "The ebuild will be " + \
1368                                                         "sourced and the eclasses " + \
1369                                                         "from the current portage tree will be used " + \
1370                                                         "when necessary. Removal of " + \
1371                                                         "the ebuild file will cause the " + \
1372                                                         "pkg_prerm() and pkg_postrm() removal " + \
1373                                                         "phases to be skipped entirely."
1374                                                         msg_lines.extend(wrap(msg, 72))
1375                                                         from portage.elog.messages import eerror
1376                                                         for l in msg_lines:
1377                                                                 eerror(l, phase=ebuild_phase, key=self.mycpv)
1378
1379                                                 # process logs created during pre/postrm
1380                                                 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
1381                                                 if retval == os.EX_OK:
1382                                                         doebuild(myebuildpath, "cleanrm", self.myroot,
1383                                                                 self.settings, tree="vartree",
1384                                                                 mydbapi=self.vartree.dbapi,
1385                                                                 vartree=self.vartree)
1386                                 finally:
1387                                         unlockdir(builddir_lock)
1388                         try:
1389                                 if myebuildpath and not catdir_lock:
1390                                         # Lock catdir for removal if empty.
1391                                         catdir_lock = lockdir(catdir)
1392                         finally:
1393                                 if catdir_lock:
1394                                         try:
1395                                                 os.rmdir(catdir)
1396                                         except OSError, e:
1397                                                 if e.errno not in (errno.ENOENT,
1398                                                         errno.ENOTEMPTY, errno.EEXIST):
1399                                                         raise
1400                                                 del e
1401                                         unlockdir(catdir_lock)
1402                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
1403                         contents=contents, env=self.settings.environ())
1404                 return os.EX_OK
1405
1406         def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1407                 """
1408                 
1409                 Unmerges the contents of a package from the liveFS
1410                 Removes the VDB entry for self
1411                 
1412                 @param pkgfiles: typically self.getcontents()
1413                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1414                 @param others_in_slot: all dblink instances in this slot, excluding self
1415                 @type others_in_slot: list
1416                 @rtype: None
1417                 """
1418
1419                 if not pkgfiles:
1420                         writemsg_stdout("No package files given... Grabbing a set.\n")
1421                         pkgfiles = self.getcontents()
1422
1423                 if others_in_slot is None:
1424                         others_in_slot = []
1425                         slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1426                         slot_matches = self.vartree.dbapi.match(
1427                                 "%s:%s" % (dep_getkey(self.mycpv), slot))
1428                         for cur_cpv in slot_matches:
1429                                 if cur_cpv == self.mycpv:
1430                                         continue
1431                                 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1432                                         self.vartree.root, self.settings,
1433                                         vartree=self.vartree))
1434                 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
1435                         os.path.sep
1436                 dest_root_len = len(dest_root) - 1
1437
1438                 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1439                 cfgfiledict = grabdict(conf_mem_file)
1440                 stale_confmem = []
1441
1442                 unmerge_orphans = "unmerge-orphans" in self.settings.features
1443
1444                 if pkgfiles:
1445                         self.updateprotect()
1446                         mykeys = pkgfiles.keys()
1447                         mykeys.sort()
1448                         mykeys.reverse()
1449
1450                         #process symlinks second-to-last, directories last.
1451                         mydirs = []
1452                         ignored_unlink_errnos = (
1453                                 errno.EBUSY, errno.ENOENT,
1454                                 errno.ENOTDIR, errno.EISDIR)
1455                         ignored_rmdir_errnos = (
1456                                 errno.EEXIST, errno.ENOTEMPTY,
1457                                 errno.EBUSY, errno.ENOENT,
1458                                 errno.ENOTDIR, errno.EISDIR)
1459                         modprotect = os.path.join(self.vartree.root, "lib/modules/")
1460
1461                         def unlink(file_name, lstatobj):
1462                                 if bsd_chflags:
1463                                         if lstatobj.st_flags != 0:
1464                                                 bsd_chflags.lchflags(file_name, 0)
1465                                         parent_name = os.path.dirname(file_name)
1466                                         # Use normal stat/chflags for the parent since we want to
1467                                         # follow any symlinks to the real parent directory.
1468                                         pflags = os.stat(parent_name).st_flags
1469                                         if pflags != 0:
1470                                                 bsd_chflags.chflags(parent_name, 0)
1471                                 try:
1472                                         if not stat.S_ISLNK(lstatobj.st_mode):
1473                                                 # Remove permissions to ensure that any hardlinks to
1474                                                 # suid/sgid files are rendered harmless.
1475                                                 os.chmod(file_name, 0)
1476                                         os.unlink(file_name)
1477                                 finally:
1478                                         if bsd_chflags and pflags != 0:
1479                                                 # Restore the parent flags we saved before unlinking
1480                                                 bsd_chflags.chflags(parent_name, pflags)
1481
1482                         def show_unmerge(zing, desc, file_type, file_name):
1483                                         writemsg_stdout("%s %s %s %s\n" % \
1484                                                 (zing, desc.ljust(8), file_type, file_name))
1485                         for objkey in mykeys:
1486                                 obj = normalize_path(objkey)
1487                                 file_data = pkgfiles[objkey]
1488                                 file_type = file_data[0]
1489                                 statobj = None
1490                                 try:
1491                                         statobj = os.stat(obj)
1492                                 except OSError:
1493                                         pass
1494                                 lstatobj = None
1495                                 try:
1496                                         lstatobj = os.lstat(obj)
1497                                 except (OSError, AttributeError):
1498                                         pass
1499                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1500                                 if lstatobj is None:
1501                                                 show_unmerge("---", "!found", file_type, obj)
1502                                                 continue
1503                                 if obj.startswith(dest_root):
1504                                         relative_path = obj[dest_root_len:]
1505                                         if not others_in_slot and \
1506                                                 relative_path in cfgfiledict:
1507                                                 stale_confmem.append(relative_path)
1508                                         is_owned = False
1509                                         for dblnk in others_in_slot:
1510                                                 if dblnk.isowner(relative_path, dest_root):
1511                                                         is_owned = True
1512                                                         break
1513                                         if is_owned:
1514                                                 # A new instance of this package claims the file, so
1515                                                 # don't unmerge it.
1516                                                 show_unmerge("---", "replaced", file_type, obj)
1517                                                 continue
1518                                 # next line includes a tweak to protect modules from being unmerged,
1519                                 # but we don't protect modules from being overwritten if they are
1520                                 # upgraded. We effectively only want one half of the config protection
1521                                 # functionality for /lib/modules. For portage-ng both capabilities
1522                                 # should be able to be independently specified.
1523                                 if obj.startswith(modprotect):
1524                                         show_unmerge("---", "cfgpro", file_type, obj)
1525                                         continue
1526
1527                                 # Don't unlink symlinks to directories here since that can
1528                                 # remove /lib and /usr/lib symlinks.
1529                                 if unmerge_orphans and \
1530                                         lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1531                                         not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1532                                         not self.isprotected(obj):
1533                                         try:
1534                                                 unlink(obj, lstatobj)
1535                                         except EnvironmentError, e:
1536                                                 if e.errno not in ignored_unlink_errnos:
1537                                                         raise
1538                                                 del e
1539                                         show_unmerge("<<<", "", file_type, obj)
1540                                         continue
1541
1542                                 lmtime = str(lstatobj[stat.ST_MTIME])
1543                                 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
1544                                         show_unmerge("---", "!mtime", file_type, obj)
1545                                         continue
1546
1547                                 if pkgfiles[objkey][0] == "dir":
1548                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
1549                                                 show_unmerge("---", "!dir", file_type, obj)
1550                                                 continue
1551                                         mydirs.append(obj)
1552                                 elif pkgfiles[objkey][0] == "sym":
1553                                         if not islink:
1554                                                 show_unmerge("---", "!sym", file_type, obj)
1555                                                 continue
1556                                         # Go ahead and unlink symlinks to directories here when
1557                                         # they're actually recorded as symlinks in the contents.
1558                                         # Normally, symlinks such as /lib -> lib64 are not recorded
1559                                         # as symlinks in the contents of a package.  If a package
1560                                         # installs something into ${D}/lib/, it is recorded in the
1561                                         # contents as a directory even if it happens to correspond
1562                                         # to a symlink when it's merged to the live filesystem.
1563                                         try:
1564                                                 unlink(obj, lstatobj)
1565                                                 show_unmerge("<<<", "", file_type, obj)
1566                                         except (OSError, IOError),e:
1567                                                 if e.errno not in ignored_unlink_errnos:
1568                                                         raise
1569                                                 del e
1570                                                 show_unmerge("!!!", "", file_type, obj)
1571                                 elif pkgfiles[objkey][0] == "obj":
1572                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
1573                                                 show_unmerge("---", "!obj", file_type, obj)
1574                                                 continue
1575                                         mymd5 = None
1576                                         try:
1577                                                 mymd5 = perform_md5(obj, calc_prelink=1)
1578                                         except FileNotFound, e:
1579                                                 # the file has disappeared between now and our stat call
1580                                                 show_unmerge("---", "!obj", file_type, obj)
1581                                                 continue
1582
1583                                         # string.lower is needed because db entries used to be in upper-case.  The
1584                                         # string.lower allows for backwards compatibility.
1585                                         if mymd5 != pkgfiles[objkey][2].lower():
1586                                                 show_unmerge("---", "!md5", file_type, obj)
1587                                                 continue
1588                                         try:
1589                                                 unlink(obj, lstatobj)
1590                                         except (OSError, IOError), e:
1591                                                 if e.errno not in ignored_unlink_errnos:
1592                                                         raise
1593                                                 del e
1594                                         show_unmerge("<<<", "", file_type, obj)
1595                                 elif pkgfiles[objkey][0] == "fif":
1596                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
1597                                                 show_unmerge("---", "!fif", file_type, obj)
1598                                                 continue
1599                                         show_unmerge("---", "", file_type, obj)
1600                                 elif pkgfiles[objkey][0] == "dev":
1601                                         show_unmerge("---", "", file_type, obj)
1602
1603                         mydirs.sort()
1604                         mydirs.reverse()
1605
1606                         for obj in mydirs:
1607                                 try:
1608                                         if bsd_chflags:
1609                                                 lstatobj = os.lstat(obj)
1610                                                 if lstatobj.st_flags != 0:
1611                                                         bsd_chflags.lchflags(obj, 0)
1612                                                 parent_name = os.path.dirname(obj)
1613                                                 # Use normal stat/chflags for the parent since we want to
1614                                                 # follow any symlinks to the real parent directory.
1615                                                 pflags = os.stat(parent_name).st_flags
1616                                                 if pflags != 0:
1617                                                         bsd_chflags.chflags(parent_name, 0)
1618                                         try:
1619                                                 os.rmdir(obj)
1620                                         finally:
1621                                                 if bsd_chflags and pflags != 0:
1622                                                         # Restore the parent flags we saved before unlinking
1623                                                         bsd_chflags.chflags(parent_name, pflags)
1624                                         show_unmerge("<<<", "", "dir", obj)
1625                                 except EnvironmentError, e:
1626                                         if e.errno not in ignored_rmdir_errnos:
1627                                                 raise
1628                                         if e.errno != errno.ENOENT:
1629                                                 show_unmerge("---", "!empty", "dir", obj)
1630                                         del e
1631
1632                 # Remove stale entries from config memory.
1633                 if stale_confmem:
1634                         for filename in stale_confmem:
1635                                 del cfgfiledict[filename]
1636                         writedict(cfgfiledict, conf_mem_file)
1637
1638                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1639                 self.vartree.zap(self.mycpv)
1640
1641         def isowner(self,filename, destroot):
1642                 """ 
1643                 Check if a file belongs to this package. This may
1644                 result in a stat call for the parent directory of
1645                 every installed file, since the inode numbers are
1646                 used to work around the problem of ambiguous paths
1647                 caused by symlinked directories. The results of
1648                 stat calls are cached to optimize multiple calls
1649                 to this method.
1650
1651                 @param filename:
1652                 @type filename:
1653                 @param destroot:
1654                 @type destroot:
1655                 @rtype: Boolean
1656                 @returns:
1657                 1. True if this package owns the file.
1658                 2. False if this package does not own the file.
1659                 """
1660                 destfile = normalize_path(
1661                         os.path.join(destroot, filename.lstrip(os.path.sep)))
1662
1663                 pkgfiles = self.getcontents()
1664                 if pkgfiles and destfile in pkgfiles:
1665                         return True
1666                 if pkgfiles:
1667                         basename = os.path.basename(destfile)
1668                         if self._contents_basenames is None:
1669                                 self._contents_basenames = set(
1670                                         os.path.basename(x) for x in pkgfiles)
1671                         if basename not in self._contents_basenames:
1672                                 # This is a shortcut that, in most cases, allows us to
1673                                 # eliminate this package as an owner without the need
1674                                 # to examine inode numbers of parent directories.
1675                                 return False
1676
1677                         # Use stat rather than lstat since we want to follow
1678                         # any symlinks to the real parent directory.
1679                         parent_path = os.path.dirname(destfile)
1680                         try:
1681                                 parent_stat = os.stat(parent_path)
1682                         except EnvironmentError, e:
1683                                 if e.errno != errno.ENOENT:
1684                                         raise
1685                                 del e
1686                                 return False
1687                         if self._contents_inodes is None:
1688                                 self._contents_inodes = {}
1689                                 parent_paths = set()
1690                                 for x in pkgfiles:
1691                                         p_path = os.path.dirname(x)
1692                                         if p_path in parent_paths:
1693                                                 continue
1694                                         parent_paths.add(p_path)
1695                                         try:
1696                                                 s = os.stat(p_path)
1697                                         except OSError:
1698                                                 pass
1699                                         else:
1700                                                 inode_key = (s.st_dev, s.st_ino)
1701                                                 # Use lists of paths in case multiple
1702                                                 # paths reference the same inode.
1703                                                 p_path_list = self._contents_inodes.get(inode_key)
1704                                                 if p_path_list is None:
1705                                                         p_path_list = []
1706                                                         self._contents_inodes[inode_key] = p_path_list
1707                                                 if p_path not in p_path_list:
1708                                                         p_path_list.append(p_path)
1709                         p_path_list = self._contents_inodes.get(
1710                                 (parent_stat.st_dev, parent_stat.st_ino))
1711                         if p_path_list:
1712                                 for p_path in p_path_list:
1713                                         x = os.path.join(p_path, basename)
1714                                         if x in pkgfiles:
1715                                                 return True
1716
1717                 return False
1718
1719         def _preserve_libs(self, srcroot, destroot, mycontents, counter, inforoot):
1720                 # read global reverse NEEDED map
1721                 linkmap = self.vartree.dbapi.linkmap
1722                 linkmap.rebuild(include_file=os.path.join(inforoot, "NEEDED.ELF.2"))
1723                 liblist = linkmap.listLibraryObjects()
1724
1725                 # get list of libraries from old package instance
1726                 old_contents = self._installed_instance.getcontents().keys()
1727                 old_libs = set(old_contents).intersection(liblist)
1728
1729                 # get list of libraries from new package instance
1730                 mylibs = set([os.path.join(os.sep, x) for x in mycontents]).intersection(liblist)
1731                 
1732                 # check which libs are present in the old, but not the new package instance
1733                 candidates = old_libs.difference(mylibs)
1734                 
1735                 for x in old_contents:
1736                         if os.path.islink(x) and os.path.realpath(x) in candidates and x not in mycontents:
1737                                 candidates.add(x)
1738
1739                 # ignore any libs that are only internally used by the package
1740                 def has_external_consumers(lib, contents, otherlibs):
1741                         consumers = linkmap.findConsumers(lib)
1742                         contents_without_libs = [x for x in contents if x not in otherlibs]
1743                         
1744                         # just used by objects that will be autocleaned
1745                         if len(consumers.difference(contents_without_libs)) == 0:
1746                                 return False
1747                         # used by objects that are referenced as well, need to check those 
1748                         # recursively to break any reference cycles
1749                         elif len(consumers.difference(contents)) == 0:
1750                                 otherlibs = set(otherlibs)
1751                                 for ol in otherlibs.intersection(consumers):
1752                                         if has_external_consumers(ol, contents, otherlibs.difference([lib])):
1753                                                 return True
1754                                 return False
1755                         # used by external objects directly
1756                         else:
1757                                 return True
1758
1759                 for lib in list(candidates):
1760                         if not has_external_consumers(lib, old_contents, candidates):
1761                                 candidates.remove(lib)
1762                                 continue
1763                         # only preserve the lib if there is no other copy to use for each consumer
1764                         keep = False
1765                         for c in linkmap.findConsumers(lib):
1766                                 localkeep = True
1767                                 providers = linkmap.findProviders(c)
1768                                 
1769                                 for soname in providers:
1770                                         if lib in providers[soname]:
1771                                                 for p in providers[soname]:
1772                                                         if p not in candidates or os.path.exists(os.path.join(srcroot, p.lstrip(os.sep))):
1773                                                                 localkeep = False
1774                                                                 break
1775                                                 break
1776                                 if localkeep:
1777                                         keep = True
1778                         if not keep:
1779                                 candidates.remove(lib)
1780                                 continue
1781                 
1782                 del mylibs, mycontents, old_contents, liblist
1783                 
1784                 # inject files that should be preserved into our image dir
1785                 import shutil
1786                 missing_paths = []
1787                 for x in candidates:
1788                         # skip existing files so the 'new' libs aren't overwritten
1789                         if os.path.exists(os.path.join(srcroot, x.lstrip(os.sep))):
1790                                 missing_paths.append(x)
1791                                 continue
1792                         print "injecting %s into %s" % (x, srcroot)
1793                         if not os.path.exists(os.path.join(destroot, x.lstrip(os.sep))):
1794                                 print "%s does not exist so can't be preserved" % x
1795                                 missing_paths.append(x)
1796                                 continue
1797                         mydir = os.path.join(srcroot, os.path.dirname(x).lstrip(os.sep))
1798                         if not os.path.exists(mydir):
1799                                 os.makedirs(mydir)
1800
1801                         # resolve symlinks and extend preserve list
1802                         # NOTE: we're extending the list in the loop to emulate recursion to
1803                         #       also get indirect symlinks
1804                         if os.path.islink(x):
1805                                 linktarget = os.readlink(x)
1806                                 os.symlink(linktarget, os.path.join(srcroot, x.lstrip(os.sep)))
1807                                 if linktarget[0] != os.sep:
1808                                         linktarget = os.path.join(os.path.dirname(x), linktarget)
1809                                 candidates.add(linktarget)
1810                         else:
1811                                 shutil.copy2(os.path.join(destroot, x.lstrip(os.sep)),
1812                                         os.path.join(srcroot, x.lstrip(os.sep)))
1813
1814                 preserve_paths = [x for x in candidates if x not in missing_paths]
1815
1816                 del missing_paths, candidates
1817
1818                 # keep track of the libs we preserved
1819                 self.vartree.dbapi.plib_registry.register(self.mycpv, self.settings["SLOT"], counter, preserve_paths)
1820
1821                 del preserve_paths
1822         
1823         def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
1824                         collision_ignore = set([normalize_path(myignore) for myignore in \
1825                                 self.settings.get("COLLISION_IGNORE", "").split()])
1826
1827                         stopmerge = False
1828                         i=0
1829                         collisions = []
1830                         destroot = normalize_path(destroot).rstrip(os.path.sep) + \
1831                                 os.path.sep
1832                         writemsg_stdout("%s checking %d files for package collisions\n" % \
1833                                 (green("*"), len(mycontents)))
1834                         for f in mycontents:
1835                                 i = i + 1
1836                                 if i % 1000 == 0:
1837                                         writemsg_stdout("%d files checked ...\n" % i)
1838                                 dest_path = normalize_path(
1839                                         os.path.join(destroot, f.lstrip(os.path.sep)))
1840                                 try:
1841                                         dest_lstat = os.lstat(dest_path)
1842                                 except EnvironmentError, e:
1843                                         if e.errno == errno.ENOENT:
1844                                                 del e
1845                                                 continue
1846                                         elif e.errno == errno.ENOTDIR:
1847                                                 del e
1848                                                 # A non-directory is in a location where this package
1849                                                 # expects to have a directory.
1850                                                 dest_lstat = None
1851                                                 parent_path = dest_path
1852                                                 while len(parent_path) > len(destroot):
1853                                                         parent_path = os.path.dirname(parent_path)
1854                                                         try:
1855                                                                 dest_lstat = os.lstat(parent_path)
1856                                                                 break
1857                                                         except EnvironmentError, e:
1858                                                                 if e.errno != errno.ENOTDIR:
1859                                                                         raise
1860                                                                 del e
1861                                                 if not dest_lstat:
1862                                                         raise AssertionError(
1863                                                                 "unable to find non-directory " + \
1864                                                                 "parent for '%s'" % dest_path)
1865                                                 dest_path = parent_path
1866                                                 f = os.path.sep + dest_path[len(destroot):]
1867                                                 if f in collisions:
1868                                                         continue
1869                                         else:
1870                                                 raise
1871                                 if f[0] != "/":
1872                                         f="/"+f
1873                                 isowned = False
1874                                 for ver in [self] + mypkglist:
1875                                         if (ver.isowner(f, destroot) or ver.isprotected(f)):
1876                                                 isowned = True
1877                                                 break
1878                                 if not isowned:
1879                                         stopmerge = True
1880                                         if collision_ignore:
1881                                                 if f in collision_ignore:
1882                                                         stopmerge = False
1883                                                 else:
1884                                                         for myignore in collision_ignore:
1885                                                                 if f.startswith(myignore + os.path.sep):
1886                                                                         stopmerge = False
1887                                                                         break
1888                                         if stopmerge:
1889                                                 collisions.append(f)
1890                         return collisions
1891
1892         def _security_check(self, installed_instances):
1893                 if not installed_instances:
1894                         return 0
1895                 file_paths = set()
1896                 for dblnk in installed_instances:
1897                         file_paths.update(dblnk.getcontents())
1898                 inode_map = {}
1899                 real_paths = set()
1900                 for path in file_paths:
1901                         try:
1902                                 s = os.lstat(path)
1903                         except OSError, e:
1904                                 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1905                                         raise
1906                                 del e
1907                                 continue
1908                         if not stat.S_ISREG(s.st_mode):
1909                                 continue
1910                         path = os.path.realpath(path)
1911                         if path in real_paths:
1912                                 continue
1913                         real_paths.add(path)
1914                         if s.st_nlink > 1 and \
1915                                 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
1916                                 k = (s.st_dev, s.st_ino)
1917                                 inode_map.setdefault(k, []).append((path, s))
1918                 suspicious_hardlinks = []
1919                 for path_list in inode_map.itervalues():
1920                         path, s = path_list[0]
1921                         if len(path_list) == s.st_nlink:
1922                                 # All hardlinks seem to be owned by this package.
1923                                 continue
1924                         suspicious_hardlinks.append(path_list)
1925                 if not suspicious_hardlinks:
1926                         return 0
1927                 from portage.output import colorize
1928                 prefix = colorize("SECURITY_WARN", "*") + " WARNING: "
1929                 writemsg(prefix + "suid/sgid file(s) " + \
1930                         "with suspicious hardlink(s):\n", noiselevel=-1)
1931                 for path_list in suspicious_hardlinks:
1932                         for path, s in path_list:
1933                                 writemsg(prefix + "  '%s'\n" % path, noiselevel=-1)
1934                 writemsg(prefix + "See the Gentoo Security Handbook " + \
1935                         "guide for advice on how to proceed.\n", noiselevel=-1)
1936                 return 1
1937
1938         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
1939                 mydbapi=None, prev_mtimes=None):
1940                 """
1941                 
1942                 This function does the following:
1943                 
1944                 calls self._preserve_libs if FEATURES=preserve-libs
1945                 calls self._collision_protect if FEATURES=collision-protect
1946                 calls doebuild(mydo=pkg_preinst)
1947                 Merges the package to the livefs
1948                 unmerges old version (if required)
1949                 calls doebuild(mydo=pkg_postinst)
1950                 calls env_update
1951                 calls elog_process
1952                 
1953                 @param srcroot: Typically this is ${D}
1954                 @type srcroot: String (Path)
1955                 @param destroot: Path to merge to (usually ${ROOT})
1956                 @type destroot: String (Path)
1957                 @param inforoot: root of the vardb entry ?
1958                 @type inforoot: String (Path)
1959                 @param myebuild: path to the ebuild that we are processing
1960                 @type myebuild: String (Path)
1961                 @param mydbapi: dbapi which is handed to doebuild.
1962                 @type mydbapi: portdbapi instance
1963                 @param prev_mtimes: { Filename:mtime } mapping for env_update
1964                 @type prev_mtimes: Dictionary
1965                 @rtype: Boolean
1966                 @returns:
1967                 1. 0 on success
1968                 2. 1 on failure
1969                 
1970                 secondhand is a list of symlinks that have been skipped due to their target
1971                 not existing; we will merge these symlinks at a later time.
1972                 """
1973
1974                 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
1975                 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
1976
1977                 if not os.path.isdir(srcroot):
1978                         writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
1979                                 noiselevel=-1)
1980                         return 1
1981
1982                 inforoot_slot_file = os.path.join(inforoot, "SLOT")
1983                 slot = None
1984                 try:
1985                         f = open(inforoot_slot_file)
1986                         try:
1987                                 slot = f.read().strip()
1988                         finally:
1989                                 f.close()
1990                 except EnvironmentError, e:
1991                         if e.errno != errno.ENOENT:
1992                                 raise
1993                         del e
1994
1995                 if slot is None:
1996                         slot = ""
1997
1998                 from portage.elog.messages import eerror as _eerror
1999                 def eerror(lines):
2000                         for l in lines:
2001                                 _eerror(l, phase="preinst", key=self.settings.mycpv)
2002
2003                 if slot != self.settings["SLOT"]:
2004                         writemsg("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2005                                 (self.settings["SLOT"], slot))
2006
2007                 if not os.path.exists(self.dbcatdir):
2008                         os.makedirs(self.dbcatdir)
2009
2010                 otherversions = []
2011                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2012                         otherversions.append(v.split("/")[1])
2013
2014                 # filter any old-style virtual matches
2015                 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2016                         "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2017                         if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2018
2019                 if self.mycpv not in slot_matches and \
2020                         self.vartree.dbapi.cpv_exists(self.mycpv):
2021                         # handle multislot or unapplied slotmove
2022                         slot_matches.append(self.mycpv)
2023
2024                 others_in_slot = []
2025                 from portage import config
2026                 for cur_cpv in slot_matches:
2027                         # Clone the config in case one of these has to be unmerged since
2028                         # we need it to have private ${T} etc... for things like elog.
2029                         others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2030                                 self.vartree.root, config(clone=self.settings),
2031                                 vartree=self.vartree))
2032                 retval = self._security_check(others_in_slot)
2033                 if retval:
2034                         return retval
2035
2036                 if slot_matches:
2037                         # Used by self.isprotected().
2038                         max_dblnk = None
2039                         max_counter = -1
2040                         for dblnk in others_in_slot:
2041                                 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2042                                 if cur_counter > max_counter:
2043                                         max_counter = cur_counter
2044                                         max_dblnk = dblnk
2045                         self._installed_instance = max_dblnk
2046
2047                 # get current counter value (counter_tick also takes care of incrementing it)
2048                 # XXX Need to make this destroot, but it needs to be initialized first. XXX
2049                 # XXX bis: leads to some invalidentry() call through cp_all().
2050                 # Note: The counter is generated here but written later because preserve_libs
2051                 #       needs the counter value but has to be before dbtmpdir is made (which
2052                 #       has to be before the counter is written) - genone
2053                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2054
2055                 # Save this for unregistering preserved-libs if the merge fails.
2056                 self.settings["COUNTER"] = str(counter)
2057                 self.settings.backup_changes("COUNTER")
2058
2059                 myfilelist = []
2060                 mylinklist = []
2061                 def onerror(e):
2062                         raise
2063                 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2064                         for f in files:
2065                                 file_path = os.path.join(parent, f)
2066                                 file_mode = os.lstat(file_path).st_mode
2067                                 if stat.S_ISREG(file_mode):
2068                                         myfilelist.append(file_path[len(srcroot):])
2069                                 elif stat.S_ISLNK(file_mode):
2070                                         # Note: os.walk puts symlinks to directories in the "dirs"
2071                                         # list and it does not traverse them since that could lead
2072                                         # to an infinite recursion loop.
2073                                         mylinklist.append(file_path[len(srcroot):])
2074
2075                 # If there are no files to merge, and an installed package in the same
2076                 # slot has files, it probably means that something went wrong.
2077                 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2078                         not myfilelist and not mylinklist and others_in_slot:
2079                         installed_files = None
2080                         for other_dblink in others_in_slot:
2081                                 installed_files = other_dblink.getcontents()
2082                                 if not installed_files:
2083                                         continue
2084                                 from textwrap import wrap
2085                                 wrap_width = 72
2086                                 msg = []
2087                                 d = (
2088                                         self.mycpv,
2089                                         other_dblink.mycpv
2090                                 )
2091                                 msg.extend(wrap(("The '%s' package will not install " + \
2092                                         "any files, but the currently installed '%s'" + \
2093                                         " package has the following files: ") % d, wrap_width))
2094                                 msg.append("")
2095                                 msg.extend(sorted(installed_files))
2096                                 msg.append("")
2097                                 msg.append("package %s NOT merged" % self.mycpv)
2098                                 msg.append("")
2099                                 msg.extend(wrap(
2100                                         ("Manually run `emerge --unmerge =%s` " % \
2101                                         other_dblink.mycpv) + "if you really want to " + \
2102                                         "remove the above files. Set " + \
2103                                         "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2104                                         "/etc/make.conf if you do not want to " + \
2105                                         "abort in cases like this.",
2106                                         wrap_width))
2107                                 eerror(msg)
2108                         if installed_files:
2109                                 return 1
2110
2111                 # Preserve old libs if they are still in use
2112                 if slot_matches and "preserve-libs" in self.settings.features:
2113                         self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
2114
2115                 # check for package collisions
2116                 blockers = None
2117                 if self._blockers is not None:
2118                         # This is only supposed to be called when
2119                         # the vdb is locked, like it is here.
2120                         blockers = self._blockers()
2121                 if blockers is None:
2122                         blockers = []
2123                 collisions = self._collision_protect(srcroot, destroot,
2124                         others_in_slot + blockers, myfilelist + mylinklist)
2125
2126                 # Make sure the ebuild environment is initialized and that ${T}/elog
2127                 # exists for logging of collision-protect eerror messages.
2128                 if myebuild is None:
2129                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2130                 doebuild_environment(myebuild, "preinst", destroot,
2131                         self.settings, 0, 0, mydbapi)
2132                 prepare_build_dirs(destroot, self.settings, cleanup)
2133
2134                 if collisions:
2135                         collision_protect = "collision-protect" in self.settings.features
2136                         msg = "This package will overwrite one or more files that" + \
2137                         " may belong to other packages (see list below)."
2138                         if not collision_protect:
2139                                 msg += " Add \"collision-protect\" to FEATURES in" + \
2140                                 " make.conf if you would like the merge to abort" + \
2141                                 " in cases like this."
2142                         if self.settings.get("PORTAGE_QUIET") != "1":
2143                                 msg += " You can use a command such as" + \
2144                                 " `portageq owners / <filename>` to identify the" + \
2145                                 " installed package that owns a file. If portageq" + \
2146                                 " reports that only one package owns a file then do NOT" + \
2147                                 " file a bug report. A bug report is only useful if it" + \
2148                                 " identifies at least two or more packages that are known" + \
2149                                 " to install the same file(s)." + \
2150                                 " If a collision occurs and you" + \
2151                                 " can not explain where the file came from then you" + \
2152                                 " should simply ignore the collision since there is not" + \
2153                                 " enough information to determine if a real problem" + \
2154                                 " exists. Please do NOT file a bug report at" + \
2155                                 " http://bugs.gentoo.org unless you report exactly which" + \
2156                                 " two packages install the same file(s). Once again," + \
2157                                 " please do NOT file a bug report unless you have" + \
2158                                 " completely understood the above message."
2159
2160                         self.settings["EBUILD_PHASE"] = "preinst"
2161                         from textwrap import wrap
2162                         msg = wrap(msg, 70)
2163                         if collision_protect:
2164                                 msg.append("")
2165                                 msg.append("package %s NOT merged" % self.settings.mycpv)
2166                         msg.append("")
2167                         msg.append("Detected file collision(s):")
2168                         msg.append("")
2169
2170                         for f in collisions:
2171                                 msg.append("\t%s" % \
2172                                         os.path.join(destroot, f.lstrip(os.path.sep)))
2173
2174                         eerror(msg)
2175
2176                         if collision_protect:
2177                                 msg = []
2178                                 msg.append("")
2179                                 msg.append("Searching all installed" + \
2180                                         " packages for file collisions...")
2181                                 msg.append("")
2182                                 msg.append("Press Ctrl-C to Stop")
2183                                 msg.append("")
2184                                 eerror(msg)
2185
2186                                 found_owner = False
2187                                 for cpv in self.vartree.dbapi.cpv_all():
2188                                         cat, pkg = catsplit(cpv)
2189                                         mylink = dblink(cat, pkg, destroot, self.settings,
2190                                                 vartree=self.vartree)
2191                                         mycollisions = []
2192                                         for f in collisions:
2193                                                 if mylink.isowner(f, destroot):
2194                                                         mycollisions.append(f)
2195                                         if mycollisions:
2196                                                 found_owner = True
2197                                                 msg = []
2198                                                 msg.append("%s" % cpv)
2199                                                 for f in mycollisions:
2200                                                         msg.append("\t%s" % os.path.join(destroot,
2201                                                                 f.lstrip(os.path.sep)))
2202                                                 eerror(msg)
2203                                 if not found_owner:
2204                                         eerror(["None of the installed" + \
2205                                                 " packages claim the file(s)."])
2206                                 return 1
2207
2208                 writemsg_stdout(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2209
2210                 # The merge process may move files out of the image directory,
2211                 # which causes invalidation of the .installed flag.
2212                 try:
2213                         os.unlink(os.path.join(
2214                                 os.path.dirname(normalize_path(srcroot)), ".installed"))
2215                 except OSError, e:
2216                         if e.errno != errno.ENOENT:
2217                                 raise
2218                         del e
2219
2220                 self.dbdir = self.dbtmpdir
2221                 self.delete()
2222                 ensure_dirs(self.dbtmpdir)
2223
2224                 # run preinst script
2225                 a = doebuild(myebuild, "preinst", destroot, self.settings,
2226                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2227                         vartree=self.vartree)
2228
2229                 # XXX: Decide how to handle failures here.
2230                 if a != os.EX_OK:
2231                         writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
2232                         return a
2233
2234                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2235                 for x in listdir(inforoot):
2236                         self.copyfile(inforoot+"/"+x)
2237
2238                 # write local package counter for recording
2239                 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2240                 lcfile.write(str(counter))
2241                 lcfile.close()
2242
2243                 # open CONTENTS file (possibly overwriting old one) for recording
2244                 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2245
2246                 self.updateprotect()
2247
2248                 #if we have a file containing previously-merged config file md5sums, grab it.
2249                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2250                 cfgfiledict = grabdict(conf_mem_file)
2251                 if self.settings.has_key("NOCONFMEM"):
2252                         cfgfiledict["IGNORE"]=1
2253                 else:
2254                         cfgfiledict["IGNORE"]=0
2255
2256                 # Always behave like --noconfmem is enabled for downgrades
2257                 # so that people who don't know about this option are less
2258                 # likely to get confused when doing upgrade/downgrade cycles.
2259                 pv_split = catpkgsplit(self.mycpv)[1:]
2260                 for other in others_in_slot:
2261                         if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
2262                                 cfgfiledict["IGNORE"] = 1
2263                                 break
2264
2265                 # Don't bump mtimes on merge since some application require
2266                 # preservation of timestamps.  This means that the unmerge phase must
2267                 # check to see if file belongs to an installed instance in the same
2268                 # slot.
2269                 mymtime = None
2270
2271                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2272                 prevmask = os.umask(0)
2273                 secondhand = []
2274
2275                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2276                 # "second hand" of symlinks to merge later
2277                 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
2278                         return 1
2279
2280                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
2281                 # broken symlinks.  We'll merge them too.
2282                 lastlen = 0
2283                 while len(secondhand) and len(secondhand)!=lastlen:
2284                         # clear the thirdhand.  Anything from our second hand that
2285                         # couldn't get merged will be added to thirdhand.
2286
2287                         thirdhand = []
2288                         self.mergeme(srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime)
2289
2290                         #swap hands
2291                         lastlen = len(secondhand)
2292
2293                         # our thirdhand now becomes our secondhand.  It's ok to throw
2294                         # away secondhand since thirdhand contains all the stuff that
2295                         # couldn't be merged.
2296                         secondhand = thirdhand
2297
2298                 if len(secondhand):
2299                         # force merge of remaining symlinks (broken or circular; oh well)
2300                         self.mergeme(srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime)
2301
2302                 #restore umask
2303                 os.umask(prevmask)
2304
2305                 #if we opened it, close it
2306                 outfile.flush()
2307                 outfile.close()
2308
2309                 # These caches are populated during collision-protect and the data
2310                 # they contain is now invalid. It's very important to invalidate
2311                 # the contents_inodes cache so that FEATURES=unmerge-orphans
2312                 # doesn't unmerge anything that belongs to this package that has
2313                 # just been merged.
2314                 others_in_slot.append(self)  # self has just been merged
2315                 for dblnk in others_in_slot:
2316                         dblnk.contentscache = None
2317                         dblnk._contents_inodes = None
2318                         dblnk._contents_basenames = None
2319
2320                 # If portage is reinstalling itself, remove the old
2321                 # version now since we want to use the temporary
2322                 # PORTAGE_BIN_PATH that will be removed when we return.
2323                 reinstall_self = False
2324                 if self.myroot == "/" and \
2325                         "sys-apps" == self.cat and \
2326                         "portage" == pkgsplit(self.pkg)[0]:
2327                         reinstall_self = True
2328
2329                 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
2330                 for dblnk in list(others_in_slot):
2331                         if dblnk is self:
2332                                 continue
2333                         if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
2334                                 continue
2335                         writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
2336                         others_in_slot.remove(dblnk) # dblnk will unmerge itself now
2337                         dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
2338                                 others_in_slot=others_in_slot)
2339                         # TODO: Check status and abort if necessary.
2340                         dblnk.delete()
2341                         writemsg_stdout(">>> Original instance of package unmerged safely.\n")
2342
2343                 if len(others_in_slot) > 1:
2344                         from portage.output import colorize
2345                         writemsg_stdout(colorize("WARN", "WARNING:")
2346                                 + " AUTOCLEAN is disabled.  This can cause serious"
2347                                 + " problems due to overlapping packages.\n")
2348
2349                 # We hold both directory locks.
2350                 self.dbdir = self.dbpkgdir
2351                 self.delete()
2352                 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
2353
2354                 # Check for file collisions with blocking packages
2355                 # and remove any colliding files from their CONTENTS
2356                 # since they now belong to this package.
2357                 self._clear_contents_cache()
2358                 contents = self.getcontents()
2359                 destroot_len = len(destroot) - 1
2360                 for blocker in blockers:
2361                         blocker_contents = blocker.getcontents()
2362                         collisions = []
2363                         for filename in blocker_contents:
2364                                 relative_filename = filename[destroot_len:]
2365                                 if self.isowner(relative_filename, destroot):
2366                                         collisions.append(filename)
2367                         if not collisions:
2368                                 continue
2369                         for filename in collisions:
2370                                 del blocker_contents[filename]
2371                         f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
2372                         for filename in sorted(blocker_contents):
2373                                 entry_data = blocker_contents[filename]
2374                                 entry_type = entry_data[0]
2375                                 relative_filename = filename[destroot_len:]
2376                                 if entry_type == "obj":
2377                                         entry_type, mtime, md5sum = entry_data
2378                                         line = "%s %s %s %s\n" % \
2379                                                 (entry_type, relative_filename, md5sum, mtime)
2380                                 elif entry_type == "sym":
2381                                         entry_type, mtime, link = entry_data
2382                                         line = "%s %s -> %s %s\n" % \
2383                                                 (entry_type, relative_filename, link, mtime)
2384                                 else: # dir, dev, fif
2385                                         line = "%s %s\n" % (entry_type, relative_filename)
2386                                 f.write(line)
2387                         f.close()
2388
2389                 # Due to mtime granularity, mtime checks do not always properly
2390                 # invalidate vardbapi caches.
2391                 self.vartree.dbapi.mtdircache.pop(self.cat, None)
2392                 self.vartree.dbapi.matchcache.pop(self.cat, None)
2393                 self.vartree.dbapi.cpcache.pop(self.mysplit[0], None)
2394                 contents = self.getcontents()
2395
2396                 #write out our collection of md5sums
2397                 if cfgfiledict.has_key("IGNORE"):
2398                         del cfgfiledict["IGNORE"]
2399
2400                 my_private_path = os.path.join(destroot, PRIVATE_PATH)
2401                 ensure_dirs(my_private_path, gid=portage_gid, mode=02750, mask=02)
2402
2403                 writedict(cfgfiledict, conf_mem_file)
2404                 del conf_mem_file
2405
2406                 # regenerate reverse NEEDED map
2407                 self.vartree.dbapi.linkmap.rebuild()
2408
2409                 #do postinst script
2410                 self.settings["PORTAGE_UPDATE_ENV"] = \
2411                         os.path.join(self.dbpkgdir, "environment.bz2")
2412                 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
2413                 a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
2414                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
2415                 self.settings.pop("PORTAGE_UPDATE_ENV", None)
2416
2417                 # XXX: Decide how to handle failures here.
2418                 if a != os.EX_OK:
2419                         writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
2420                         return a
2421
2422                 downgrade = False
2423                 for v in otherversions:
2424                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
2425                                 downgrade = True
2426
2427                 #update environment settings, library paths. DO NOT change symlinks.
2428                 env_update(makelinks=(not downgrade),
2429                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
2430                         contents=contents, env=self.settings.environ())
2431
2432                 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
2433                 return os.EX_OK
2434
2435         def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
2436                 """
2437                 
2438                 This function handles actual merging of the package contents to the livefs.
2439                 It also handles config protection.
2440                 
2441                 @param srcroot: Where are we copying files from (usually ${D})
2442                 @type srcroot: String (Path)
2443                 @param destroot: Typically ${ROOT}
2444                 @type destroot: String (Path)
2445                 @param outfile: File to log operations to
2446                 @type outfile: File Object
2447                 @param secondhand: A set of items to merge in pass two (usually
2448                 or symlinks that point to non-existing files that may get merged later)
2449                 @type secondhand: List
2450                 @param stufftomerge: Either a diretory to merge, or a list of items.
2451                 @type stufftomerge: String or List
2452                 @param cfgfiledict: { File:mtime } mapping for config_protected files
2453                 @type cfgfiledict: Dictionary
2454                 @param thismtime: The current time (typically long(time.time())
2455                 @type thismtime: Long
2456                 @rtype: None or Boolean
2457                 @returns:
2458                 1. True on failure
2459                 2. None otherwise
2460                 
2461                 """
2462                 from os.path import sep, join
2463                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
2464                 destroot = normalize_path(destroot).rstrip(sep) + sep
2465                 
2466                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
2467                 if isinstance(stufftomerge, basestring):
2468                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
2469                         mergelist = os.listdir(join(srcroot, stufftomerge))
2470                         offset = stufftomerge
2471                 else:
2472                         mergelist = stufftomerge
2473                         offset = ""
2474                 for x in mergelist:
2475                         mysrc = join(srcroot, offset, x)
2476                         mydest = join(destroot, offset, x)
2477                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2478                         myrealdest = join(sep, offset, x)
2479                         # stat file once, test using S_* macros many times (faster that way)
2480                         try:
2481                                 mystat = os.lstat(mysrc)
2482                         except OSError, e:
2483                                 writemsg("\n")
2484                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2485                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
2486                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
2487                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2488                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
2489                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
2490                                 sys.exit(1)
2491                         except Exception, e:
2492                                 writemsg("\n")
2493                                 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
2494                                 writemsg(red("!!!        A stat call returned the following error for the following file:"))
2495                                 writemsg(    "!!!        Please ensure that your filesystem is intact, otherwise report\n")
2496                                 writemsg(    "!!!        this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
2497                                 writemsg(    "!!!        File:  "+str(mysrc)+"\n", noiselevel=-1)
2498                                 writemsg(    "!!!        Error: "+str(e)+"\n", noiselevel=-1)
2499                                 sys.exit(1)
2500
2501
2502                         mymode = mystat[stat.ST_MODE]
2503                         # handy variables; mydest is the target object on the live filesystems;
2504                         # mysrc is the source object in the temporary install dir
2505                         try:
2506                                 mydstat = os.lstat(mydest)
2507                                 mydmode = mydstat.st_mode
2508                         except OSError, e:
2509                                 if e.errno != errno.ENOENT:
2510                                         raise
2511                                 del e
2512                                 #dest file doesn't exist
2513                                 mydstat = None
2514                                 mydmode = None
2515
2516                         if stat.S_ISLNK(mymode):
2517                                 # we are merging a symbolic link
2518                                 myabsto = abssymlink(mysrc)
2519                                 if myabsto.startswith(srcroot):
2520                                         myabsto = myabsto[len(srcroot):]
2521                                 myabsto = myabsto.lstrip(sep)
2522                                 myto = os.readlink(mysrc)
2523                                 if self.settings and self.settings["D"]:
2524                                         if myto.startswith(self.settings["D"]):
2525                                                 myto = myto[len(self.settings["D"]):]
2526                                 # myrealto contains the path of the real file to which this symlink points.
2527                                 # we can simply test for existence of this file to see if the target has been merged yet
2528                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
2529                                 if mydmode!=None:
2530                                         #destination exists
2531                                         if not stat.S_ISLNK(mydmode):
2532                                                 if stat.S_ISDIR(mydmode):
2533                                                         # directory in the way: we can't merge a symlink over a directory
2534                                                         # we won't merge this, continue with next file...
2535                                                         continue
2536
2537                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
2538                                                         # Kill file blocking installation of symlink to dir #71787
2539                                                         pass
2540                                                 elif self.isprotected(mydest):
2541                                                         # Use md5 of the target in ${D} if it exists...
2542                                                         try:
2543                                                                 newmd5 = perform_md5(join(srcroot, myabsto))
2544                                                         except FileNotFound:
2545                                                                 # Maybe the target is merged already.
2546                                                                 try:
2547                                                                         newmd5 = perform_md5(myrealto)
2548                                                                 except FileNotFound:
2549                                                                         newmd5 = None
2550                                                         mydest = new_protect_filename(mydest, newmd5=newmd5)
2551
2552                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
2553                                 if (secondhand != None) and (not os.path.exists(myrealto)):
2554                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
2555                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
2556                                         # it later.
2557                                         secondhand.append(mysrc[len(srcroot):])
2558                                         continue
2559                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
2560                                 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2561                                 if mymtime != None:
2562                                         writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
2563                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
2564                                 else:
2565                                         print "!!! Failed to move file."
2566                                         print "!!!", mydest, "->", myto
2567                                         sys.exit(1)
2568                         elif stat.S_ISDIR(mymode):
2569                                 # we are merging a directory
2570                                 if mydmode != None:
2571                                         # destination exists
2572
2573                                         if bsd_chflags:
2574                                                 # Save then clear flags on dest.
2575                                                 dflags = mydstat.st_flags
2576                                                 if dflags != 0:
2577                                                         bsd_chflags.lchflags(mydest, 0)
2578
2579                                         if not os.access(mydest, os.W_OK):
2580                                                 pkgstuff = pkgsplit(self.pkg)
2581                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
2582                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
2583                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
2584                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
2585                                                 writemsg("!!! And finish by running this: env-update\n\n")
2586                                                 return 1
2587
2588                                         if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
2589                                                 # a symlink to an existing directory will work for us; keep it:
2590                                                 writemsg_stdout("--- %s/\n" % mydest)
2591                                                 if bsd_chflags:
2592                                                         bsd_chflags.lchflags(mydest, dflags)
2593                                         else:
2594                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
2595                                                 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
2596                                                         sys.exit(1)
2597                                                 print "bak", mydest, mydest+".backup"
2598                                                 #now create our directory
2599                                                 if self.settings.selinux_enabled():
2600                                                         import selinux
2601                                                         sid = selinux.get_sid(mysrc)
2602                                                         selinux.secure_mkdir(mydest,sid)
2603                                                 else:
2604                                                         os.mkdir(mydest)
2605                                                 if bsd_chflags:
2606                                                         bsd_chflags.lchflags(mydest, dflags)
2607                                                 os.chmod(mydest, mystat[0])
2608                                                 os.chown(mydest, mystat[4], mystat[5])
2609                                                 writemsg_stdout(">>> %s/\n" % mydest)
2610                                 else:
2611                                         #destination doesn't exist
2612                                         if self.settings.selinux_enabled():
2613                                                 import selinux
2614                                                 sid = selinux.get_sid(mysrc)
2615                                                 selinux.secure_mkdir(mydest, sid)
2616                                         else:
2617                                                 os.mkdir(mydest)
2618                                         os.chmod(mydest, mystat[0])
2619                                         os.chown(mydest, mystat[4], mystat[5])
2620                                         writemsg_stdout(">>> %s/\n" % mydest)
2621                                 outfile.write("dir "+myrealdest+"\n")
2622                                 # recurse and merge this directory
2623                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
2624                                         join(offset, x), cfgfiledict, thismtime):
2625                                         return 1
2626                         elif stat.S_ISREG(mymode):
2627                                 # we are merging a regular file
2628                                 mymd5 = perform_md5(mysrc, calc_prelink=1)
2629                                 # calculate config file protection stuff
2630                                 mydestdir = os.path.dirname(mydest)
2631                                 moveme = 1
2632                                 zing = "!!!"
2633                                 mymtime = None
2634                                 if mydmode != None:
2635                                         # destination file exists
2636                                         if stat.S_ISDIR(mydmode):
2637                                                 # install of destination is blocked by an existing directory with the same name
2638                                                 moveme = 0
2639                                                 writemsg_stdout("!!! %s\n" % mydest)
2640                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
2641                                                 cfgprot = 0
2642                                                 # install of destination is blocked by an existing regular file,
2643                                                 # or by a symlink to an existing regular file;
2644                                                 # now, config file management may come into play.
2645                                                 # we only need to tweak mydest if cfg file management is in play.
2646                                                 if self.isprotected(mydest):
2647                                                         # we have a protection path; enable config file management.
2648                                                         destmd5 = perform_md5(mydest, calc_prelink=1)
2649                                                         if mymd5 == destmd5:
2650                                                                 #file already in place; simply update mtimes of destination
2651                                                                 moveme = 1
2652                                                         else:
2653                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
2654                                                                         """ An identical update has previously been
2655                                                                         merged.  Skip it unless the user has chosen
2656                                                                         --noconfmem."""
2657                                                                         moveme = cfgfiledict["IGNORE"]
2658                                                                         cfgprot = cfgfiledict["IGNORE"]
2659                                                                         if not moveme:
2660                                                                                 zing = "---"
2661                                                                                 mymtime = long(mystat.st_mtime)
2662                                                                 else:
2663                                                                         moveme = 1
2664                                                                         cfgprot = 1
2665                                                         if moveme:
2666                                                                 # Merging a new file, so update confmem.
2667                                                                 cfgfiledict[myrealdest] = [mymd5]
2668                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
2669                                                                 """A previously remembered update has been
2670                                                                 accepted, so it is removed from confmem."""
2671                                                                 del cfgfiledict[myrealdest]
2672                                                 if cfgprot:
2673                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
2674
2675                                 # whether config protection or not, we merge the new file the
2676                                 # same way.  Unless moveme=0 (blocking directory)
2677                                 if moveme:
2678                                         mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2679                                         if mymtime is None:
2680                                                 sys.exit(1)
2681                                         zing = ">>>"
2682
2683                                 if mymtime != None:
2684                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
2685                                 writemsg_stdout("%s %s\n" % (zing,mydest))
2686                         else:
2687                                 # we are merging a fifo or device node
2688                                 zing = "!!!"
2689                                 if mydmode is None:
2690                                         # destination doesn't exist
2691                                         if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
2692                                                 zing = ">>>"
2693                                         else:
2694                                                 sys.exit(1)
2695                                 if stat.S_ISFIFO(mymode):
2696                                         outfile.write("fif %s\n" % myrealdest)
2697                                 else:
2698                                         outfile.write("dev %s\n" % myrealdest)
2699                                 writemsg_stdout(zing + " " + mydest + "\n")
2700
2701         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2702                 mydbapi=None, prev_mtimes=None):
2703                 """
2704                 If portage is reinstalling itself, create temporary
2705                 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
2706                 to avoid relying on the new versions which may be
2707                 incompatible. Register an atexit hook to clean up the
2708                 temporary directories. Pre-load elog modules here since
2709                 we won't be able to later if they get unmerged (happens
2710                 when namespace changes).
2711                 """
2712                 if self.vartree.dbapi._categories is not None:
2713                         self.vartree.dbapi._categories = None
2714                 if self.myroot == "/" and \
2715                         "sys-apps" == self.cat and \
2716                         "portage" == pkgsplit(self.pkg)[0]:
2717                         settings = self.settings
2718                         base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
2719                         from tempfile import mkdtemp
2720                         import shutil
2721                         # Make the temp directory inside PORTAGE_TMPDIR since, unlike
2722                         # /tmp, it can't be mounted with the "noexec" option.
2723                         base_path_tmp = mkdtemp("", "._portage_reinstall_.",
2724                                 settings["PORTAGE_TMPDIR"])
2725                         from portage.process import atexit_register
2726                         atexit_register(shutil.rmtree, base_path_tmp)
2727                         dir_perms = 0755
2728                         for subdir in "bin", "pym":
2729                                 var_name = "PORTAGE_%s_PATH" % subdir.upper()
2730                                 var_orig = settings[var_name]
2731                                 var_new = os.path.join(base_path_tmp, subdir)
2732                                 settings[var_name] = var_new
2733                                 settings.backup_changes(var_name)
2734                                 shutil.copytree(var_orig, var_new, symlinks=True)
2735                                 os.chmod(var_new, dir_perms)
2736                         os.chmod(base_path_tmp, dir_perms)
2737                         # This serves so pre-load the modules.
2738                         elog_process(self.mycpv, self.settings,
2739                                 phasefilter=filter_mergephases)
2740
2741                 return self._merge(mergeroot, inforoot,
2742                                 myroot, myebuild=myebuild, cleanup=cleanup,
2743                                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2744
2745         def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2746                 mydbapi=None, prev_mtimes=None):
2747                 retval = -1
2748                 self.lockdb()
2749                 try:
2750                         retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
2751                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2752                         # undo registrations of preserved libraries, bug #210501
2753                         if retval != os.EX_OK:
2754                                 self.vartree.dbapi.plib_registry.unregister(self.mycpv, self.settings["SLOT"], self.settings["COUNTER"])
2755                         # Process ebuild logfiles
2756                         elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
2757                         if retval == os.EX_OK and "noclean" not in self.settings.features:
2758                                 if myebuild is None:
2759                                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2760                                 doebuild(myebuild, "clean", myroot, self.settings,
2761                                         tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
2762                 finally:
2763                         self.unlockdb()
2764                 return retval
2765
2766         def getstring(self,name):
2767                 "returns contents of a file with whitespace converted to spaces"
2768                 if not os.path.exists(self.dbdir+"/"+name):
2769                         return ""
2770                 myfile = open(self.dbdir+"/"+name,"r")
2771                 mydata = myfile.read().split()
2772                 myfile.close()
2773                 return " ".join(mydata)
2774
2775         def copyfile(self,fname):
2776                 import shutil
2777                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
2778
2779         def getfile(self,fname):
2780                 if not os.path.exists(self.dbdir+"/"+fname):
2781                         return ""
2782                 myfile = open(self.dbdir+"/"+fname,"r")
2783                 mydata = myfile.read()
2784                 myfile.close()
2785                 return mydata
2786
2787         def setfile(self,fname,data):
2788                 write_atomic(os.path.join(self.dbdir, fname), data)
2789
2790         def getelements(self,ename):
2791                 if not os.path.exists(self.dbdir+"/"+ename):
2792                         return []
2793                 myelement = open(self.dbdir+"/"+ename,"r")
2794                 mylines = myelement.readlines()
2795                 myreturn = []
2796                 for x in mylines:
2797                         for y in x[:-1].split():
2798                                 myreturn.append(y)
2799                 myelement.close()
2800                 return myreturn
2801
2802         def setelements(self,mylist,ename):
2803                 myelement = open(self.dbdir+"/"+ename,"w")
2804                 for x in mylist:
2805                         myelement.write(x+"\n")
2806                 myelement.close()
2807
2808         def isregular(self):
2809                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
2810                 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
2811
2812 def tar_contents(contents, root, tar, protect=None, onProgress=None):
2813         from portage.util import normalize_path
2814         import tarfile
2815         root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
2816         id_strings = {}
2817         maxval = len(contents)
2818         curval = 0
2819         if onProgress:
2820                 onProgress(maxval, 0)
2821         paths = contents.keys()
2822         paths.sort()
2823         for path in paths:
2824                 curval += 1
2825                 try:
2826                         lst = os.lstat(path)
2827                 except OSError, e:
2828                         if e.errno != errno.ENOENT:
2829                                 raise
2830                         del e
2831                         if onProgress:
2832                                 onProgress(maxval, curval)
2833                         continue
2834                 contents_type = contents[path][0]
2835                 if path.startswith(root):
2836                         arcname = path[len(root):]
2837                 else:
2838                         raise ValueError("invalid root argument: '%s'" % root)
2839                 live_path = path
2840                 if 'dir' == contents_type and \
2841                         not stat.S_ISDIR(lst.st_mode) and \
2842                         os.path.isdir(live_path):
2843                         # Even though this was a directory in the original ${D}, it exists
2844                         # as a symlink to a directory in the live filesystem.  It must be
2845                         # recorded as a real directory in the tar file to ensure that tar
2846                         # can properly extract it's children.
2847                         live_path = os.path.realpath(live_path)
2848                 tarinfo = tar.gettarinfo(live_path, arcname)
2849                 # store numbers instead of real names like tar's --numeric-owner
2850                 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
2851                 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
2852
2853                 if stat.S_ISREG(lst.st_mode):
2854                         # break hardlinks due to bug #185305
2855                         tarinfo.type = tarfile.REGTYPE
2856                         if protect and protect(path):
2857                                 # Create an empty file as a place holder in order to avoid
2858                                 # potential collision-protect issues.
2859                                 tarinfo.size = 0
2860                                 tar.addfile(tarinfo)
2861                         else:
2862                                 f = open(path)
2863                                 try:
2864                                         tar.addfile(tarinfo, f)
2865                                 finally:
2866                                         f.close()
2867                 else:
2868                         tar.addfile(tarinfo)
2869                 if onProgress:
2870                         onProgress(maxval, curval)