c8a4870e10578153a601bdc7a8bc5852cf623b2e
[portage.git] / pym / portage / dbapi / vartree.py
1 # Copyright 1998-2007 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3 # $Id$
4
5 __all__ = [
6         "vardbapi", "vartree", "dblink"] + \
7         ["write_contents", "tar_contents"]
8
9 from portage.checksum import perform_md5
10 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
11         PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
12 from portage.data import portage_gid, portage_uid, secpass
13 from portage.dbapi import dbapi
14 from portage.dep import use_reduce, paren_reduce, isvalidatom, \
15         isjustname, dep_getkey, match_from_list
16 from portage.exception import CommandNotFound, \
17         InvalidData, InvalidPackageName, \
18         FileNotFound, PermissionDenied, UnsupportedAPIException
19 from portage.locks import lockdir, unlockdir
20 from portage.output import bold, red, green
21 from portage.update import fixdbentries
22 from portage.util import apply_secpass_permissions, ConfigProtect, ensure_dirs, \
23         writemsg, writemsg_level, write_atomic, atomic_ofstream, writedict, \
24         grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
25 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
26
27 from portage import listdir, dep_expand, digraph, flatten, key_expand, \
28         doebuild_environment, doebuild, env_update, prepare_build_dirs, \
29         abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
30
31 from portage.elog import elog_process
32 from portage.elog.filtering import filter_mergephases, filter_unmergephases
33 from portage.cache.mappings import slot_dict_class
34
35 import os, re, shutil, stat, errno, copy, subprocess
36 import logging
37 import shlex
38 from itertools import izip
39
40 try:
41         import cPickle as pickle
42 except ImportError:
43         import pickle
44
45 class vardbapi(dbapi):
46
47         _excluded_dirs = ["CVS", "lost+found"]
48         _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
49         _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
50                 "|".join(_excluded_dirs) + r')$')
51
52         _aux_cache_version        = "1"
53         _owners_cache_version     = "1"
54
55         # Number of uncached packages to trigger cache update, since
56         # it's wasteful to update it for every vdb change.
57         _aux_cache_threshold = 5
58
59         _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
60         _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
61
62         def __init__(self, root, categories=None, settings=None, vartree=None):
63                 """
64                 The categories parameter is unused since the dbapi class
65                 now has a categories property that is generated from the
66                 available packages.
67                 """
68                 self.root = root[:]
69
70                 #cache for category directory mtimes
71                 self.mtdircache = {}
72
73                 #cache for dependency checks
74                 self.matchcache = {}
75
76                 #cache for cp_list results
77                 self.cpcache = {}
78
79                 self.blockers = None
80                 if settings is None:
81                         from portage import settings
82                 self.settings = settings
83                 if vartree is None:
84                         from portage import db
85                         vartree = db[root]["vartree"]
86                 self.vartree = vartree
87                 self._aux_cache_keys = set(
88                         ["CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
89                         "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
90                         "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
91                         "repository", "RESTRICT" , "SLOT", "USE"])
92                 self._aux_cache_obj = None
93                 self._aux_cache_filename = os.path.join(self.root,
94                         CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
95                 self._counter_path = os.path.join(root,
96                         CACHE_PATH.lstrip(os.path.sep), "counter")
97
98                 self._owners = self._owners_db(self)
99
100         def getpath(self, mykey, filename=None):
101                 rValue = os.path.join(self.root, VDB_PATH, mykey)
102                 if filename != None:
103                         rValue = os.path.join(rValue, filename)
104                 return rValue
105
106         def cpv_exists(self, mykey):
107                 "Tells us whether an actual ebuild exists on disk (no masking)"
108                 return os.path.exists(self.getpath(mykey))
109
110         def cpv_counter(self, mycpv):
111                 "This method will grab the COUNTER. Returns a counter value."
112                 try:
113                         return long(self.aux_get(mycpv, ["COUNTER"])[0])
114                 except (KeyError, ValueError):
115                         pass
116                 writemsg_level(("portage: COUNTER for %s was corrupted; " + \
117                         "resetting to value of 0\n") % (mycpv,),
118                         level=logging.ERROR, noiselevel=-1)
119                 return 0
120
121         def _counter_hash(self):
122                 try:
123                         from hashlib import md5 as new_hash
124                 except ImportError:
125                         from md5 import new as new_hash
126                 h = new_hash()
127                 aux_keys = ["COUNTER"]
128                 cpv_list = self.cpv_all()
129                 cpv_list.sort()
130                 for cpv in cpv_list:
131                         try:
132                                 counter, = self.aux_get(cpv, aux_keys)
133                         except KeyError:
134                                 continue
135                         h.update(counter)
136                 return h.hexdigest()
137
138         def cpv_inject(self, mycpv):
139                 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
140                 os.makedirs(self.getpath(mycpv))
141                 counter = self.counter_tick(self.root, mycpv=mycpv)
142                 # write local package counter so that emerge clean does the right thing
143                 write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
144
145         def isInjected(self, mycpv):
146                 if self.cpv_exists(mycpv):
147                         if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
148                                 return True
149                         if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
150                                 return True
151                 return False
152
153         def move_ent(self, mylist):
154                 origcp = mylist[1]
155                 newcp = mylist[2]
156
157                 # sanity check
158                 for cp in [origcp, newcp]:
159                         if not (isvalidatom(cp) and isjustname(cp)):
160                                 raise InvalidPackageName(cp)
161                 origmatches = self.match(origcp, use_cache=0)
162                 moves = 0
163                 if not origmatches:
164                         return moves
165                 for mycpv in origmatches:
166                         mycpsplit = catpkgsplit(mycpv)
167                         mynewcpv = newcp + "-" + mycpsplit[2]
168                         mynewcat = newcp.split("/")[0]
169                         if mycpsplit[3] != "r0":
170                                 mynewcpv += "-" + mycpsplit[3]
171                         mycpsplit_new = catpkgsplit(mynewcpv)
172                         origpath = self.getpath(mycpv)
173                         if not os.path.exists(origpath):
174                                 continue
175                         moves += 1
176                         if not os.path.exists(self.getpath(mynewcat)):
177                                 #create the directory
178                                 os.makedirs(self.getpath(mynewcat))
179                         newpath = self.getpath(mynewcpv)
180                         if os.path.exists(newpath):
181                                 #dest already exists; keep this puppy where it is.
182                                 continue
183                         _movefile(origpath, newpath, mysettings=self.settings)
184
185                         # We need to rename the ebuild now.
186                         old_pf = catsplit(mycpv)[1]
187                         new_pf = catsplit(mynewcpv)[1]
188                         if new_pf != old_pf:
189                                 try:
190                                         os.rename(os.path.join(newpath, old_pf + ".ebuild"),
191                                                 os.path.join(newpath, new_pf + ".ebuild"))
192                                 except EnvironmentError, e:
193                                         if e.errno != errno.ENOENT:
194                                                 raise
195                                         del e
196                         write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
197                         write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
198                         fixdbentries([mylist], newpath)
199                 return moves
200
201         def cp_list(self, mycp, use_cache=1):
202                 mysplit=catsplit(mycp)
203                 if mysplit[0] == '*':
204                         mysplit[0] = mysplit[0][1:]
205                 try:
206                         mystat = os.stat(self.getpath(mysplit[0]))[stat.ST_MTIME]
207                 except OSError:
208                         mystat = 0
209                 if use_cache and mycp in self.cpcache:
210                         cpc = self.cpcache[mycp]
211                         if cpc[0] == mystat:
212                                 return cpc[1][:]
213                 cat_dir = self.getpath(mysplit[0])
214                 try:
215                         dir_list = os.listdir(cat_dir)
216                 except EnvironmentError, e:
217                         if e.errno == PermissionDenied.errno:
218                                 raise PermissionDenied(cat_dir)
219                         del e
220                         dir_list = []
221
222                 returnme = []
223                 for x in dir_list:
224                         if self._excluded_dirs.match(x) is not None:
225                                 continue
226                         ps = pkgsplit(x)
227                         if not ps:
228                                 self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
229                                 continue
230                         if len(mysplit) > 1:
231                                 if ps[0] == mysplit[1]:
232                                         returnme.append(mysplit[0]+"/"+x)
233                 self._cpv_sort_ascending(returnme)
234                 if use_cache:
235                         self.cpcache[mycp] = [mystat, returnme[:]]
236                 elif mycp in self.cpcache:
237                         del self.cpcache[mycp]
238                 return returnme
239
240         def cpv_all(self, use_cache=1):
241                 """
242                 Set use_cache=0 to bypass the portage.cachedir() cache in cases
243                 when the accuracy of mtime staleness checks should not be trusted
244                 (generally this is only necessary in critical sections that
245                 involve merge or unmerge of packages).
246                 """
247                 returnme = []
248                 basepath = os.path.join(self.root, VDB_PATH) + os.path.sep
249
250                 if use_cache:
251                         from portage import listdir
252                 else:
253                         def listdir(p, **kwargs):
254                                 try:
255                                         return [x for x in os.listdir(p) \
256                                                 if os.path.isdir(os.path.join(p, x))]
257                                 except EnvironmentError, e:
258                                         if e.errno == PermissionDenied.errno:
259                                                 raise PermissionDenied(p)
260                                         del e
261                                         return []
262
263                 for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
264                         if self._excluded_dirs.match(x) is not None:
265                                 continue
266                         if not self._category_re.match(x):
267                                 continue
268                         for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
269                                 if self._excluded_dirs.match(y) is not None:
270                                         continue
271                                 subpath = x + "/" + y
272                                 # -MERGING- should never be a cpv, nor should files.
273                                 try:
274                                         if catpkgsplit(subpath) is None:
275                                                 self.invalidentry(self.getpath(subpath))
276                                                 continue
277                                 except InvalidData:
278                                         self.invalidentry(self.getpath(subpath))
279                                         continue
280                                 returnme.append(subpath)
281
282                 return returnme
283
284         def cp_all(self, use_cache=1):
285                 mylist = self.cpv_all(use_cache=use_cache)
286                 d={}
287                 for y in mylist:
288                         if y[0] == '*':
289                                 y = y[1:]
290                         try:
291                                 mysplit = catpkgsplit(y)
292                         except InvalidData:
293                                 self.invalidentry(self.getpath(y))
294                                 continue
295                         if not mysplit:
296                                 self.invalidentry(self.getpath(y))
297                                 continue
298                         d[mysplit[0]+"/"+mysplit[1]] = None
299                 return d.keys()
300
301         def checkblockers(self, origdep):
302                 pass
303
304         def _clear_cache(self):
305                 self.mtdircache.clear()
306                 self.matchcache.clear()
307                 self.cpcache.clear()
308                 self._aux_cache_obj = None
309
310         def _add(self, pkg_dblink):
311                 self._clear_pkg_cache(pkg_dblink)
312
313         def _remove(self, pkg_dblink):
314                 self._clear_pkg_cache(pkg_dblink)
315
316         def _clear_pkg_cache(self, pkg_dblink):
317                 # Due to 1 second mtime granularity in <python-2.5, mtime checks
318                 # are not always sufficient to invalidate vardbapi caches. Therefore,
319                 # the caches need to be actively invalidated here.
320                 self.mtdircache.pop(pkg_dblink.cat, None)
321                 self.matchcache.pop(pkg_dblink.cat, None)
322                 self.cpcache.pop(pkg_dblink.mysplit[0], None)
323                 from portage import dircache
324                 dircache.pop(pkg_dblink.dbcatdir, None)
325
326         def match(self, origdep, use_cache=1):
327                 "caching match function"
328                 mydep = dep_expand(
329                         origdep, mydb=self, use_cache=use_cache, settings=self.settings)
330                 mykey = dep_getkey(mydep)
331                 mycat = catsplit(mykey)[0]
332                 if not use_cache:
333                         if mycat in self.matchcache:
334                                 del self.mtdircache[mycat]
335                                 del self.matchcache[mycat]
336                         return list(self._iter_match(mydep,
337                                 self.cp_list(mydep.cp, use_cache=use_cache)))
338                 try:
339                         curmtime = os.stat(self.root+VDB_PATH+"/"+mycat).st_mtime
340                 except (IOError, OSError):
341                         curmtime=0
342
343                 if mycat not in self.matchcache or \
344                         self.mtdircache[mycat] != curmtime:
345                         # clear cache entry
346                         self.mtdircache[mycat] = curmtime
347                         self.matchcache[mycat] = {}
348                 if mydep not in self.matchcache[mycat]:
349                         mymatch = list(self._iter_match(mydep,
350                                 self.cp_list(mydep.cp, use_cache=use_cache)))
351                         self.matchcache[mycat][mydep] = mymatch
352                 return self.matchcache[mycat][mydep][:]
353
354         def findname(self, mycpv):
355                 return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
356
357         def flush_cache(self):
358                 """If the current user has permission and the internal aux_get cache has
359                 been updated, save it to disk and mark it unmodified.  This is called
360                 by emerge after it has loaded the full vdb for use in dependency
361                 calculations.  Currently, the cache is only written if the user has
362                 superuser privileges (since that's required to obtain a lock), but all
363                 users have read access and benefit from faster metadata lookups (as
364                 long as at least part of the cache is still valid)."""
365                 if self._aux_cache is not None and \
366                         len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
367                         secpass >= 2:
368                         self._owners.populate() # index any unindexed contents
369                         valid_nodes = set(self.cpv_all())
370                         for cpv in self._aux_cache["packages"].keys():
371                                 if cpv not in valid_nodes:
372                                         del self._aux_cache["packages"][cpv]
373                         del self._aux_cache["modified"]
374                         try:
375                                 f = atomic_ofstream(self._aux_cache_filename)
376                                 pickle.dump(self._aux_cache, f, -1)
377                                 f.close()
378                                 apply_secpass_permissions(
379                                         self._aux_cache_filename, gid=portage_gid, mode=0644)
380                         except (IOError, OSError), e:
381                                 pass
382                         self._aux_cache["modified"] = set()
383
384         @property
385         def _aux_cache(self):
386                 if self._aux_cache_obj is None:
387                         self._aux_cache_init()
388                 return self._aux_cache_obj
389
390         def _aux_cache_init(self):
391                 aux_cache = None
392                 try:
393                         f = open(self._aux_cache_filename)
394                         mypickle = pickle.Unpickler(f)
395                         mypickle.find_global = None
396                         aux_cache = mypickle.load()
397                         f.close()
398                         del f
399                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
400                         if isinstance(e, pickle.UnpicklingError):
401                                 writemsg("!!! Error loading '%s': %s\n" % \
402                                         (self._aux_cache_filename, str(e)), noiselevel=-1)
403                         del e
404
405                 if not aux_cache or \
406                         not isinstance(aux_cache, dict) or \
407                         aux_cache.get("version") != self._aux_cache_version or \
408                         not aux_cache.get("packages"):
409                         aux_cache = {"version": self._aux_cache_version}
410                         aux_cache["packages"] = {}
411
412                 owners = aux_cache.get("owners")
413                 if owners is not None:
414                         if not isinstance(owners, dict):
415                                 owners = None
416                         elif "version" not in owners:
417                                 owners = None
418                         elif owners["version"] != self._owners_cache_version:
419                                 owners = None
420                         elif "base_names" not in owners:
421                                 owners = None
422                         elif not isinstance(owners["base_names"], dict):
423                                 owners = None
424
425                 if owners is None:
426                         owners = {
427                                 "base_names" : {},
428                                 "version"    : self._owners_cache_version
429                         }
430                         aux_cache["owners"] = owners
431
432                 aux_cache["modified"] = set()
433                 self._aux_cache_obj = aux_cache
434
435         def aux_get(self, mycpv, wants):
436                 """This automatically caches selected keys that are frequently needed
437                 by emerge for dependency calculations.  The cached metadata is
438                 considered valid if the mtime of the package directory has not changed
439                 since the data was cached.  The cache is stored in a pickled dict
440                 object with the following format:
441
442                 {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
443
444                 If an error occurs while loading the cache pickle or the version is
445                 unrecognized, the cache will simple be recreated from scratch (it is
446                 completely disposable).
447                 """
448                 cache_these_wants = self._aux_cache_keys.intersection(wants)
449                 for x in wants:
450                         if self._aux_cache_keys_re.match(x) is not None:
451                                 cache_these_wants.add(x)
452
453                 if not cache_these_wants:
454                         return self._aux_get(mycpv, wants)
455
456                 cache_these = set(self._aux_cache_keys)
457                 cache_these.update(cache_these_wants)
458
459                 mydir = self.getpath(mycpv)
460                 mydir_stat = None
461                 try:
462                         mydir_stat = os.stat(mydir)
463                 except OSError, e:
464                         if e.errno != errno.ENOENT:
465                                 raise
466                         raise KeyError(mycpv)
467                 mydir_mtime = long(mydir_stat.st_mtime)
468                 pkg_data = self._aux_cache["packages"].get(mycpv)
469                 pull_me = cache_these.union(wants)
470                 mydata = {"_mtime_" : mydir_mtime}
471                 cache_valid = False
472                 cache_incomplete = False
473                 cache_mtime = None
474                 metadata = None
475                 if pkg_data is not None:
476                         if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
477                                 pkg_data = None
478                         else:
479                                 cache_mtime, metadata = pkg_data
480                                 if not isinstance(cache_mtime, (long, int)) or \
481                                         not isinstance(metadata, dict):
482                                         pkg_data = None
483
484                 if pkg_data:
485                         cache_mtime, metadata = pkg_data
486                         cache_valid = cache_mtime == mydir_mtime
487                 if cache_valid:
488                         mydata.update(metadata)
489                         pull_me.difference_update(mydata)
490
491                 if pull_me:
492                         # pull any needed data and cache it
493                         aux_keys = list(pull_me)
494                         for k, v in izip(aux_keys,
495                                 self._aux_get(mycpv, aux_keys, st=mydir_stat)):
496                                 mydata[k] = v
497                         if not cache_valid or cache_these.difference(metadata):
498                                 cache_data = {}
499                                 if cache_valid and metadata:
500                                         cache_data.update(metadata)
501                                 for aux_key in cache_these:
502                                         cache_data[aux_key] = mydata[aux_key]
503                                 self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
504                                 self._aux_cache["modified"].add(mycpv)
505                 return [mydata[x] for x in wants]
506
507         def _aux_get(self, mycpv, wants, st=None):
508                 mydir = self.getpath(mycpv)
509                 if st is None:
510                         try:
511                                 st = os.stat(mydir)
512                         except OSError, e:
513                                 if e.errno == errno.ENOENT:
514                                         raise KeyError(mycpv)
515                                 elif e.errno == PermissionDenied.errno:
516                                         raise PermissionDenied(mydir)
517                                 else:
518                                         raise
519                 if not stat.S_ISDIR(st.st_mode):
520                         raise KeyError(mycpv)
521                 results = []
522                 for x in wants:
523                         if x == "_mtime_":
524                                 results.append(st.st_mtime)
525                                 continue
526                         try:
527                                 myf = open(os.path.join(mydir, x), "r")
528                                 try:
529                                         myd = myf.read()
530                                 finally:
531                                         myf.close()
532                                 # Preserve \n for metadata that is known to
533                                 # contain multiple lines.
534                                 if self._aux_multi_line_re.match(x) is None:
535                                         myd = " ".join(myd.split())
536                         except IOError:
537                                 myd = ""
538                         if x == "EAPI" and not myd:
539                                 results.append("0")
540                         else:
541                                 results.append(myd)
542                 return results
543
544         def aux_update(self, cpv, values):
545                 cat, pkg = catsplit(cpv)
546                 mylink = dblink(cat, pkg, self.root, self.settings,
547                 treetype="vartree", vartree=self.vartree)
548                 if not mylink.exists():
549                         raise KeyError(cpv)
550                 for k, v in values.iteritems():
551                         if v:
552                                 mylink.setfile(k, v)
553                         else:
554                                 try:
555                                         os.unlink(os.path.join(self.getpath(cpv), k))
556                                 except EnvironmentError:
557                                         pass
558
559         def counter_tick(self, myroot, mycpv=None):
560                 return self.counter_tick_core(myroot, incrementing=1, mycpv=mycpv)
561
562         def get_counter_tick_core(self, myroot, mycpv=None):
563                 """
564                 Use this method to retrieve the counter instead
565                 of having to trust the value of a global counter
566                 file that can lead to invalid COUNTER
567                 generation. When cache is valid, the package COUNTER
568                 files are not read and we rely on the timestamp of
569                 the package directory to validate cache. The stat
570                 calls should only take a short time, so performance
571                 is sufficient without having to rely on a potentially
572                 corrupt global counter file.
573
574                 The global counter file located at
575                 $CACHE_PATH/counter serves to record the
576                 counter of the last installed package and
577                 it also corresponds to the total number of
578                 installation actions that have occurred in
579                 the history of this package database.
580                 """
581                 cp_list = self.cp_list
582                 max_counter = 0
583                 for cp in self.cp_all():
584                         for cpv in cp_list(cp):
585                                 try:
586                                         counter = int(self.aux_get(cpv, ["COUNTER"])[0])
587                                 except (KeyError, OverflowError, ValueError):
588                                         continue
589                                 if counter > max_counter:
590                                         max_counter = counter
591
592                 new_vdb = False
593                 counter = -1
594                 try:
595                         cfile = open(self._counter_path, "r")
596                 except EnvironmentError, e:
597                         new_vdb = not bool(self.cpv_all())
598                         if not new_vdb:
599                                 writemsg("!!! Unable to read COUNTER file: '%s'\n" % \
600                                         self._counter_path, noiselevel=-1)
601                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
602                         del e
603                 else:
604                         try:
605                                 try:
606                                         counter = long(cfile.readline().strip())
607                                 finally:
608                                         cfile.close()
609                         except (OverflowError, ValueError), e:
610                                 writemsg("!!! COUNTER file is corrupt: '%s'\n" % \
611                                         self._counter_path, noiselevel=-1)
612                                 writemsg("!!! %s\n" % str(e), noiselevel=-1)
613                                 del e
614
615                 # We must ensure that we return a counter
616                 # value that is at least as large as the
617                 # highest one from the installed packages,
618                 # since having a corrupt value that is too low
619                 # can trigger incorrect AUTOCLEAN behavior due
620                 # to newly installed packages having lower
621                 # COUNTERs than the previous version in the
622                 # same slot.
623                 if counter > max_counter:
624                         max_counter = counter
625
626                 if counter < 0 and not new_vdb:
627                         writemsg("!!! Initializing COUNTER to " + \
628                                 "value of %d\n" % max_counter, noiselevel=-1)
629
630                 return max_counter + 1
631
632         def counter_tick_core(self, myroot, incrementing=1, mycpv=None):
633                 "This method will grab the next COUNTER value and record it back to the global file.  Returns new counter value."
634                 counter = self.get_counter_tick_core(myroot, mycpv=mycpv) - 1
635                 if incrementing:
636                         #increment counter
637                         counter += 1
638                         # update new global counter file
639                         write_atomic(self._counter_path, str(counter))
640                 return counter
641
642         def _dblink(self, cpv):
643                 category, pf = catsplit(cpv)
644                 return dblink(category, pf, self.root,
645                         self.settings, vartree=self.vartree, treetype="vartree")
646
647         def removeFromContents(self, pkg, paths, relative_paths=True):
648                 """
649                 @param pkg: cpv for an installed package
650                 @type pkg: string
651                 @param paths: paths of files to remove from contents
652                 @type paths: iterable
653                 """
654                 if not hasattr(pkg, "getcontents"):
655                         pkg = self._dblink(pkg)
656                 root = self.root
657                 root_len = len(root) - 1
658                 new_contents = pkg.getcontents().copy()
659                 removed = 0
660
661                 for filename in paths:
662                         filename = normalize_path(filename)
663                         if relative_paths:
664                                 relative_filename = filename
665                         else:
666                                 relative_filename = filename[root_len:]
667                         contents_key = pkg._match_contents(relative_filename, root)
668                         if contents_key:
669                                 del new_contents[contents_key]
670                                 removed += 1
671
672                 if removed:
673                         f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
674                         write_contents(new_contents, root, f)
675                         f.close()
676                         pkg._clear_contents_cache()
677
678         class _owners_cache(object):
679                 """
680                 This class maintains an hash table that serves to index package
681                 contents by mapping the basename of file to a list of possible
682                 packages that own it. This is used to optimize owner lookups
683                 by narrowing the search down to a smaller number of packages.
684                 """
685                 try:
686                         from hashlib import md5 as _new_hash
687                 except ImportError:
688                         from md5 import new as _new_hash
689
690                 _hash_bits = 16
691                 _hex_chars = _hash_bits / 4
692
693                 def __init__(self, vardb):
694                         self._vardb = vardb
695
696                 def add(self, cpv):
697                         root_len = len(self._vardb.root)
698                         contents = self._vardb._dblink(cpv).getcontents()
699                         pkg_hash = self._hash_pkg(cpv)
700                         if not contents:
701                                 # Empty path is a code used to represent empty contents.
702                                 self._add_path("", pkg_hash)
703                         for x in contents:
704                                 self._add_path(x[root_len:], pkg_hash)
705                         self._vardb._aux_cache["modified"].add(cpv)
706
707                 def _add_path(self, path, pkg_hash):
708                         """
709                         Empty path is a code that represents empty contents.
710                         """
711                         if path:
712                                 name = os.path.basename(path.rstrip(os.path.sep))
713                                 if not name:
714                                         return
715                         else:
716                                 name = path
717                         name_hash = self._hash_str(name)
718                         base_names = self._vardb._aux_cache["owners"]["base_names"]
719                         pkgs = base_names.get(name_hash)
720                         if pkgs is None:
721                                 pkgs = {}
722                                 base_names[name_hash] = pkgs
723                         pkgs[pkg_hash] = None
724
725                 def _hash_str(self, s):
726                         h = self._new_hash()
727                         h.update(s)
728                         h = h.hexdigest()
729                         h = h[-self._hex_chars:]
730                         h = int(h, 16)
731                         return h
732
733                 def _hash_pkg(self, cpv):
734                         counter, mtime = self._vardb.aux_get(
735                                 cpv, ["COUNTER", "_mtime_"])
736                         try:
737                                 counter = int(counter)
738                         except ValueError:
739                                 counter = 0
740                         return (cpv, counter, mtime)
741
742         class _owners_db(object):
743
744                 def __init__(self, vardb):
745                         self._vardb = vardb
746
747                 def populate(self):
748                         self._populate()
749
750                 def _populate(self):
751                         owners_cache = vardbapi._owners_cache(self._vardb)
752                         cached_hashes = set()
753                         base_names = self._vardb._aux_cache["owners"]["base_names"]
754
755                         # Take inventory of all cached package hashes.
756                         for name, hash_values in base_names.items():
757                                 if not isinstance(hash_values, dict):
758                                         del base_names[name]
759                                         continue
760                                 cached_hashes.update(hash_values)
761
762                         # Create sets of valid package hashes and uncached packages.
763                         uncached_pkgs = set()
764                         hash_pkg = owners_cache._hash_pkg
765                         valid_pkg_hashes = set()
766                         for cpv in self._vardb.cpv_all():
767                                 hash_value = hash_pkg(cpv)
768                                 valid_pkg_hashes.add(hash_value)
769                                 if hash_value not in cached_hashes:
770                                         uncached_pkgs.add(cpv)
771
772                         # Cache any missing packages.
773                         for cpv in uncached_pkgs:
774                                 owners_cache.add(cpv)
775
776                         # Delete any stale cache.
777                         stale_hashes = cached_hashes.difference(valid_pkg_hashes)
778                         if stale_hashes:
779                                 for base_name_hash, bucket in base_names.items():
780                                         for hash_value in stale_hashes.intersection(bucket):
781                                                 del bucket[hash_value]
782                                         if not bucket:
783                                                 del base_names[base_name_hash]
784
785                         return owners_cache
786
787                 def get_owners(self, path_iter):
788                         """
789                         @return the owners as a dblink -> set(files) mapping.
790                         """
791                         owners = {}
792                         for owner, f in self.iter_owners(path_iter):
793                                 owned_files = owners.get(owner)
794                                 if owned_files is None:
795                                         owned_files = set()
796                                         owners[owner] = owned_files
797                                 owned_files.add(f)
798                         return owners
799
800                 def getFileOwnerMap(self, path_iter):
801                         owners = self.get_owners(path_iter)
802                         file_owners = {}
803                         for pkg_dblink, files in owners.iteritems():
804                                 for f in files:
805                                         owner_set = file_owners.get(f)
806                                         if owner_set is None:
807                                                 owner_set = set()
808                                                 file_owners[f] = owner_set
809                                         owner_set.add(pkg_dblink)
810                         return file_owners
811
812                 def iter_owners(self, path_iter):
813                         """
814                         Iterate over tuples of (dblink, path). In order to avoid
815                         consuming too many resources for too much time, resources
816                         are only allocated for the duration of a given iter_owners()
817                         call. Therefore, to maximize reuse of resources when searching
818                         for multiple files, it's best to search for them all in a single
819                         call.
820                         """
821
822                         owners_cache = self._populate()
823
824                         vardb = self._vardb
825                         root = vardb.root
826                         hash_pkg = owners_cache._hash_pkg
827                         hash_str = owners_cache._hash_str
828                         base_names = self._vardb._aux_cache["owners"]["base_names"]
829
830                         dblink_cache = {}
831
832                         def dblink(cpv):
833                                 x = dblink_cache.get(cpv)
834                                 if x is None:
835                                         x = self._vardb._dblink(cpv)
836                                         dblink_cache[cpv] = x
837                                 return x
838
839                         for path in path_iter:
840                                 name = os.path.basename(path.rstrip(os.path.sep))
841                                 if not name:
842                                         continue
843
844                                 name_hash = hash_str(name)
845                                 pkgs = base_names.get(name_hash)
846                                 if pkgs is not None:
847                                         for hash_value in pkgs:
848                                                 if not isinstance(hash_value, tuple) or \
849                                                         len(hash_value) != 3:
850                                                         continue
851                                                 cpv, counter, mtime = hash_value
852                                                 if not isinstance(cpv, basestring):
853                                                         continue
854                                                 try:
855                                                         current_hash = hash_pkg(cpv)
856                                                 except KeyError:
857                                                         continue
858
859                                                 if current_hash != hash_value:
860                                                         continue
861                                                 if dblink(cpv).isowner(path, root):
862                                                         yield dblink(cpv), path
863
864 class vartree(object):
865         "this tree will scan a var/db/pkg database located at root (passed to init)"
866         def __init__(self, root="/", virtual=None, clone=None, categories=None,
867                 settings=None):
868                 if clone:
869                         writemsg("vartree.__init__(): deprecated " + \
870                                 "use of clone parameter\n", noiselevel=-1)
871                         self.root = clone.root[:]
872                         self.dbapi = copy.deepcopy(clone.dbapi)
873                         self.populated = 1
874                         from portage import config
875                         self.settings = config(clone=clone.settings)
876                 else:
877                         self.root = root[:]
878                         if settings is None:
879                                 from portage import settings
880                         self.settings = settings # for key_expand calls
881                         if categories is None:
882                                 categories = settings.categories
883                         self.dbapi = vardbapi(self.root, categories=categories,
884                                 settings=settings, vartree=self)
885                         self.populated = 1
886
887         def getpath(self, mykey, filename=None):
888                 return self.dbapi.getpath(mykey, filename=filename)
889
890         def zap(self, mycpv):
891                 return
892
893         def inject(self, mycpv):
894                 return
895
896         def get_provide(self, mycpv):
897                 myprovides = []
898                 mylines = None
899                 try:
900                         mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
901                         if mylines:
902                                 myuse = myuse.split()
903                                 mylines = flatten(use_reduce(paren_reduce(mylines), uselist=myuse))
904                                 for myprovide in mylines:
905                                         mys = catpkgsplit(myprovide)
906                                         if not mys:
907                                                 mys = myprovide.split("/")
908                                         myprovides += [mys[0] + "/" + mys[1]]
909                         return myprovides
910                 except SystemExit, e:
911                         raise
912                 except Exception, e:
913                         mydir = os.path.join(self.root, VDB_PATH, mycpv)
914                         writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
915                                 noiselevel=-1)
916                         if mylines:
917                                 writemsg("Possibly Invalid: '%s'\n" % str(mylines),
918                                         noiselevel=-1)
919                         writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
920                         return []
921
922         def get_all_provides(self):
923                 myprovides = {}
924                 for node in self.getallcpv():
925                         for mykey in self.get_provide(node):
926                                 if mykey in myprovides:
927                                         myprovides[mykey] += [node]
928                                 else:
929                                         myprovides[mykey] = [node]
930                 return myprovides
931
932         def dep_bestmatch(self, mydep, use_cache=1):
933                 "compatibility method -- all matches, not just visible ones"
934                 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
935                 mymatch = best(self.dbapi.match(
936                         dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
937                         use_cache=use_cache))
938                 if mymatch is None:
939                         return ""
940                 else:
941                         return mymatch
942
943         def dep_match(self, mydep, use_cache=1):
944                 "compatibility method -- we want to see all matches, not just visible ones"
945                 #mymatch = match(mydep,self.dbapi)
946                 mymatch = self.dbapi.match(mydep, use_cache=use_cache)
947                 if mymatch is None:
948                         return []
949                 else:
950                         return mymatch
951
952         def exists_specific(self, cpv):
953                 return self.dbapi.cpv_exists(cpv)
954
955         def getallcpv(self):
956                 """temporary function, probably to be renamed --- Gets a list of all
957                 category/package-versions installed on the system."""
958                 return self.dbapi.cpv_all()
959
960         def getallnodes(self):
961                 """new behavior: these are all *unmasked* nodes.  There may or may not be available
962                 masked package for nodes in this nodes list."""
963                 return self.dbapi.cp_all()
964
965         def exists_specific_cat(self, cpv, use_cache=1):
966                 cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
967                         settings=self.settings)
968                 a = catpkgsplit(cpv)
969                 if not a:
970                         return 0
971                 mylist = listdir(self.getpath(a[0]), EmptyOnError=1)
972                 for x in mylist:
973                         b = pkgsplit(x)
974                         if not b:
975                                 self.dbapi.invalidentry(self.getpath(a[0], filename=x))
976                                 continue
977                         if a[1] == b[0]:
978                                 return 1
979                 return 0
980
981         def getebuildpath(self, fullpackage):
982                 cat, package = catsplit(fullpackage)
983                 return self.getpath(fullpackage, filename=package+".ebuild")
984
985         def getnode(self, mykey, use_cache=1):
986                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
987                         settings=self.settings)
988                 if not mykey:
989                         return []
990                 mysplit = catsplit(mykey)
991                 mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1)
992                 returnme = []
993                 for x in mydirlist:
994                         mypsplit = pkgsplit(x)
995                         if not mypsplit:
996                                 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
997                                 continue
998                         if mypsplit[0] == mysplit[1]:
999                                 appendme = [mysplit[0]+"/"+x, [mysplit[0], mypsplit[0], mypsplit[1], mypsplit[2]]]
1000                                 returnme.append(appendme)
1001                 return returnme
1002
1003
1004         def getslot(self, mycatpkg):
1005                 "Get a slot for a catpkg; assume it exists."
1006                 try:
1007                         return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
1008                 except KeyError:
1009                         return ""
1010
1011         def hasnode(self, mykey, use_cache):
1012                 """Does the particular node (cat/pkg key) exist?"""
1013                 mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
1014                         settings=self.settings)
1015                 mysplit = catsplit(mykey)
1016                 mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1)
1017                 for x in mydirlist:
1018                         mypsplit = pkgsplit(x)
1019                         if not mypsplit:
1020                                 self.dbapi.invalidentry(self.getpath(mysplit[0], filename=x))
1021                                 continue
1022                         if mypsplit[0] == mysplit[1]:
1023                                 return 1
1024                 return 0
1025
1026         def populate(self):
1027                 self.populated=1
1028
1029 class dblink(object):
1030         """
1031         This class provides an interface to the installed package database
1032         At present this is implemented as a text backend in /var/db/pkg.
1033         """
1034
1035         import re
1036         _normalize_needed = re.compile(r'.*//.*|^[^/]|.+/$|(^|.*/)\.\.?(/.*|$)')
1037         _contents_split_counts = {
1038                 "dev": 2,
1039                 "dir": 2,
1040                 "fif": 2,
1041                 "obj": 4,
1042                 "sym": 5
1043         }
1044
1045         # When looping over files for merge/unmerge, temporarily yield to the
1046         # scheduler each time this many files are processed.
1047         _file_merge_yield_interval = 20
1048
1049         def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
1050                 vartree=None, blockers=None, scheduler=None):
1051                 """
1052                 Creates a DBlink object for a given CPV.
1053                 The given CPV may not be present in the database already.
1054                 
1055                 @param cat: Category
1056                 @type cat: String
1057                 @param pkg: Package (PV)
1058                 @type pkg: String
1059                 @param myroot: Typically ${ROOT}
1060                 @type myroot: String (Path)
1061                 @param mysettings: Typically portage.config
1062                 @type mysettings: An instance of portage.config
1063                 @param treetype: one of ['porttree','bintree','vartree']
1064                 @type treetype: String
1065                 @param vartree: an instance of vartree corresponding to myroot.
1066                 @type vartree: vartree
1067                 """
1068                 
1069                 self.cat = cat
1070                 self.pkg = pkg
1071                 self.mycpv = self.cat + "/" + self.pkg
1072                 self.mysplit = list(catpkgsplit(self.mycpv)[1:])
1073                 self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
1074                 self.treetype = treetype
1075                 if vartree is None:
1076                         from portage import db
1077                         vartree = db[myroot]["vartree"]
1078                 self.vartree = vartree
1079                 self._blockers = blockers
1080                 self._scheduler = scheduler
1081
1082                 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
1083                 self.dbcatdir = self.dbroot+"/"+cat
1084                 self.dbpkgdir = self.dbcatdir+"/"+pkg
1085                 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
1086                 self.dbdir = self.dbpkgdir
1087
1088                 self._lock_vdb = None
1089
1090                 self.settings = mysettings
1091                 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
1092
1093                 self.myroot=myroot
1094                 protect_obj = ConfigProtect(myroot,
1095                         shlex.split(mysettings.get("CONFIG_PROTECT", "")),
1096                         shlex.split(mysettings.get("CONFIG_PROTECT_MASK", "")))
1097                 self.updateprotect = protect_obj.updateprotect
1098                 self.isprotected = protect_obj.isprotected
1099                 self._installed_instance = None
1100                 self.contentscache = None
1101                 self._contents_inodes = None
1102                 self._contents_basenames = None
1103                 self._md5_merge_map = {}
1104
1105         def lockdb(self):
1106                 if self._lock_vdb:
1107                         raise AssertionError("Lock already held.")
1108                 # At least the parent needs to exist for the lock file.
1109                 ensure_dirs(self.dbroot)
1110                 self._lock_vdb = lockdir(self.dbroot)
1111
1112         def unlockdb(self):
1113                 if self._lock_vdb:
1114                         unlockdir(self._lock_vdb)
1115                         self._lock_vdb = None
1116
1117         def getpath(self):
1118                 "return path to location of db information (for >>> informational display)"
1119                 return self.dbdir
1120
1121         def exists(self):
1122                 "does the db entry exist?  boolean."
1123                 return os.path.exists(self.dbdir)
1124
1125         def delete(self):
1126                 """
1127                 Remove this entry from the database
1128                 """
1129                 if not os.path.exists(self.dbdir):
1130                         return
1131
1132                 # Check validity of self.dbdir before attempting to remove it.
1133                 if not self.dbdir.startswith(self.dbroot):
1134                         writemsg("portage.dblink.delete(): invalid dbdir: %s\n" % \
1135                                 self.dbdir, noiselevel=-1)
1136                         return
1137
1138                 shutil.rmtree(self.dbdir)
1139                 self.vartree.dbapi._remove(self)
1140
1141         def clearcontents(self):
1142                 """
1143                 For a given db entry (self), erase the CONTENTS values.
1144                 """
1145                 if os.path.exists(self.dbdir+"/CONTENTS"):
1146                         os.unlink(self.dbdir+"/CONTENTS")
1147
1148         def _clear_contents_cache(self):
1149                 self.contentscache = None
1150                 self._contents_inodes = None
1151                 self._contents_basenames = None
1152
1153         def getcontents(self):
1154                 """
1155                 Get the installed files of a given package (aka what that package installed)
1156                 """
1157                 contents_file = os.path.join(self.dbdir, "CONTENTS")
1158                 if self.contentscache is not None:
1159                         return self.contentscache
1160                 pkgfiles = {}
1161                 try:
1162                         myc = open(contents_file,"r")
1163                 except EnvironmentError, e:
1164                         if e.errno != errno.ENOENT:
1165                                 raise
1166                         del e
1167                         self.contentscache = pkgfiles
1168                         return pkgfiles
1169                 mylines = myc.readlines()
1170                 myc.close()
1171                 null_byte = "\0"
1172                 normalize_needed = self._normalize_needed
1173                 contents_split_counts = self._contents_split_counts
1174                 myroot = self.myroot
1175                 if myroot == os.path.sep:
1176                         myroot = None
1177                 pos = 0
1178                 errors = []
1179                 for pos, line in enumerate(mylines):
1180                         if null_byte in line:
1181                                 # Null bytes are a common indication of corruption.
1182                                 errors.append((pos + 1, "Null byte found in CONTENTS entry"))
1183                                 continue
1184                         line = line.rstrip("\n")
1185                         # Split on " " so that even file paths that
1186                         # end with spaces can be handled.
1187                         mydat = line.split(" ")
1188                         entry_type = mydat[0] # empty string if line is empty
1189                         correct_split_count = contents_split_counts.get(entry_type)
1190                         if correct_split_count and len(mydat) > correct_split_count:
1191                                 # Apparently file paths contain spaces, so reassemble
1192                                 # the split have the correct_split_count.
1193                                 newsplit = [entry_type]
1194                                 spaces_total = len(mydat) - correct_split_count
1195                                 if entry_type == "sym":
1196                                         try:
1197                                                 splitter = mydat.index("->", 2, len(mydat) - 2)
1198                                         except ValueError:
1199                                                 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1200                                                 continue
1201                                         spaces_in_path = splitter - 2
1202                                         spaces_in_target = spaces_total - spaces_in_path
1203                                         newsplit.append(" ".join(mydat[1:splitter]))
1204                                         newsplit.append("->")
1205                                         target_end = splitter + spaces_in_target + 2
1206                                         newsplit.append(" ".join(mydat[splitter + 1:target_end]))
1207                                         newsplit.extend(mydat[target_end:])
1208                                 else:
1209                                         path_end = spaces_total + 2
1210                                         newsplit.append(" ".join(mydat[1:path_end]))
1211                                         newsplit.extend(mydat[path_end:])
1212                                 mydat = newsplit
1213
1214                         # we do this so we can remove from non-root filesystems
1215                         # (use the ROOT var to allow maintenance on other partitions)
1216                         try:
1217                                 if normalize_needed.match(mydat[1]):
1218                                         mydat[1] = normalize_path(mydat[1])
1219                                         if not mydat[1].startswith(os.path.sep):
1220                                                 mydat[1] = os.path.sep + mydat[1]
1221                                 if myroot:
1222                                         mydat[1] = os.path.join(myroot, mydat[1].lstrip(os.path.sep))
1223                                 if mydat[0] == "obj":
1224                                         #format: type, mtime, md5sum
1225                                         pkgfiles[mydat[1]] = [mydat[0], mydat[3], mydat[2]]
1226                                 elif mydat[0] == "dir":
1227                                         #format: type
1228                                         pkgfiles[mydat[1]] = [mydat[0]]
1229                                 elif mydat[0] == "sym":
1230                                         #format: type, mtime, dest
1231                                         pkgfiles[mydat[1]] = [mydat[0], mydat[4], mydat[3]]
1232                                 elif mydat[0] == "dev":
1233                                         #format: type
1234                                         pkgfiles[mydat[1]] = [mydat[0]]
1235                                 elif mydat[0]=="fif":
1236                                         #format: type
1237                                         pkgfiles[mydat[1]] = [mydat[0]]
1238                                 else:
1239                                         errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1240                         except (KeyError, IndexError):
1241                                 errors.append((pos + 1, "Unrecognized CONTENTS entry"))
1242                 if errors:
1243                         writemsg("!!! Parse error in '%s'\n" % contents_file, noiselevel=-1)
1244                         for pos, e in errors:
1245                                 writemsg("!!!   line %d: %s\n" % (pos, e), noiselevel=-1)
1246                 self.contentscache = pkgfiles
1247                 return pkgfiles
1248
1249         def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
1250                 ldpath_mtimes=None, others_in_slot=None):
1251                 """
1252                 Calls prerm
1253                 Unmerges a given package (CPV)
1254                 calls postrm
1255                 calls cleanrm
1256                 calls env_update
1257                 
1258                 @param pkgfiles: files to unmerge (generally self.getcontents() )
1259                 @type pkgfiles: Dictionary
1260                 @param trimworld: Remove CPV from world file if True, not if False
1261                 @type trimworld: Boolean
1262                 @param cleanup: cleanup to pass to doebuild (see doebuild)
1263                 @type cleanup: Boolean
1264                 @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
1265                 @type ldpath_mtimes: Dictionary
1266                 @param others_in_slot: all dblink instances in this slot, excluding self
1267                 @type others_in_slot: list
1268                 @rtype: Integer
1269                 @returns:
1270                 1. os.EX_OK if everything went well.
1271                 2. return code of the failed phase (for prerm, postrm, cleanrm)
1272                 
1273                 Notes:
1274                 The caller must ensure that lockdb() and unlockdb() are called
1275                 before and after this method.
1276                 """
1277                 showMessage = self._display_merge
1278                 if self.vartree.dbapi._categories is not None:
1279                         self.vartree.dbapi._categories = None
1280                 # When others_in_slot is supplied, the security check has already been
1281                 # done for this slot, so it shouldn't be repeated until the next
1282                 # replacement or unmerge operation.
1283                 if others_in_slot is None:
1284                         slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1285                         slot_matches = self.vartree.dbapi.match(
1286                                 "%s:%s" % (dep_getkey(self.mycpv), slot))
1287                         others_in_slot = []
1288                         for cur_cpv in slot_matches:
1289                                 if cur_cpv == self.mycpv:
1290                                         continue
1291                                 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1292                                         self.vartree.root, self.settings, vartree=self.vartree,
1293                                         treetype="vartree"))
1294
1295                         retval = self._security_check([self] + others_in_slot)
1296                         if retval:
1297                                 return retval
1298
1299                 contents = self.getcontents()
1300                 # Now, don't assume that the name of the ebuild is the same as the
1301                 # name of the dir; the package may have been moved.
1302                 myebuildpath = None
1303                 ebuild_phase = "prerm"
1304                 log_path = None
1305                 mystuff = os.listdir(self.dbdir)
1306                 for x in mystuff:
1307                         if x.endswith(".ebuild"):
1308                                 myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
1309                                 if x[:-7] != self.pkg:
1310                                         # Clean up after vardbapi.move_ent() breakage in
1311                                         # portage versions before 2.1.2
1312                                         os.rename(os.path.join(self.dbdir, x), myebuildpath)
1313                                         write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
1314                                 break
1315
1316                 self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
1317                 if myebuildpath:
1318                         try:
1319                                 doebuild_environment(myebuildpath, "prerm", self.myroot,
1320                                         self.settings, 0, 0, self.vartree.dbapi)
1321                         except UnsupportedAPIException, e:
1322                                 # Sometimes this happens due to corruption of the EAPI file.
1323                                 writemsg("!!! FAILED prerm: %s\n" % \
1324                                         os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
1325                                 writemsg("%s\n" % str(e), noiselevel=-1)
1326                                 myebuildpath = None
1327                         else:
1328                                 catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
1329                                 ensure_dirs(os.path.dirname(catdir), uid=portage_uid,
1330                                         gid=portage_gid, mode=070, mask=0)
1331
1332                 builddir_lock = None
1333                 catdir_lock = None
1334                 scheduler = self._scheduler
1335                 retval = -1
1336                 try:
1337                         if myebuildpath:
1338                                 catdir_lock = lockdir(catdir)
1339                                 ensure_dirs(catdir,
1340                                         uid=portage_uid, gid=portage_gid,
1341                                         mode=070, mask=0)
1342                                 builddir_lock = lockdir(
1343                                         self.settings["PORTAGE_BUILDDIR"])
1344                                 try:
1345                                         unlockdir(catdir_lock)
1346                                 finally:
1347                                         catdir_lock = None
1348
1349                                 prepare_build_dirs(self.myroot, self.settings, 1)
1350                                 log_path = self.settings.get("PORTAGE_LOG_FILE")
1351
1352                                 if scheduler is None:
1353                                         retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1354                                                 self.settings, cleanup=cleanup, use_cache=0,
1355                                                 mydbapi=self.vartree.dbapi, tree=self.treetype,
1356                                                 vartree=self.vartree)
1357                                 else:
1358                                         retval = scheduler.dblinkEbuildPhase(
1359                                                 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1360
1361                                 # XXX: Decide how to handle failures here.
1362                                 if retval != os.EX_OK:
1363                                         writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
1364
1365                         self._unmerge_pkgfiles(pkgfiles, others_in_slot)
1366                         self._clear_contents_cache()
1367
1368                         if myebuildpath:
1369                                 ebuild_phase = "postrm"
1370                                 if scheduler is None:
1371                                         retval = doebuild(myebuildpath, ebuild_phase, self.myroot,
1372                                                 self.settings, use_cache=0, tree=self.treetype,
1373                                                 mydbapi=self.vartree.dbapi, vartree=self.vartree)
1374                                 else:
1375                                         retval = scheduler.dblinkEbuildPhase(
1376                                                 self, self.vartree.dbapi, myebuildpath, ebuild_phase)
1377
1378                                 # XXX: Decide how to handle failures here.
1379                                 if retval != os.EX_OK:
1380                                         writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
1381
1382                 finally:
1383                         if builddir_lock:
1384                                 try:
1385                                         if myebuildpath:
1386                                                 if retval != os.EX_OK:
1387                                                         msg_lines = []
1388                                                         msg = ("The '%s' " % ebuild_phase) + \
1389                                                         ("phase of the '%s' package " % self.mycpv) + \
1390                                                         ("has failed with exit value %s." % retval)
1391                                                         from textwrap import wrap
1392                                                         msg_lines.extend(wrap(msg, 72))
1393                                                         msg_lines.append("")
1394
1395                                                         ebuild_name = os.path.basename(myebuildpath)
1396                                                         ebuild_dir = os.path.dirname(myebuildpath)
1397                                                         msg = "The problem occurred while executing " + \
1398                                                         ("the ebuild file named '%s' " % ebuild_name) + \
1399                                                         ("located in the '%s' directory. " \
1400                                                         % ebuild_dir) + \
1401                                                         "If necessary, manually remove " + \
1402                                                         "the environment.bz2 file and/or the " + \
1403                                                         "ebuild file located in that directory."
1404                                                         msg_lines.extend(wrap(msg, 72))
1405                                                         msg_lines.append("")
1406
1407                                                         msg = "Removal " + \
1408                                                         "of the environment.bz2 file is " + \
1409                                                         "preferred since it may allow the " + \
1410                                                         "removal phases to execute successfully. " + \
1411                                                         "The ebuild will be " + \
1412                                                         "sourced and the eclasses " + \
1413                                                         "from the current portage tree will be used " + \
1414                                                         "when necessary. Removal of " + \
1415                                                         "the ebuild file will cause the " + \
1416                                                         "pkg_prerm() and pkg_postrm() removal " + \
1417                                                         "phases to be skipped entirely."
1418                                                         msg_lines.extend(wrap(msg, 72))
1419
1420                                                         self._eerror(ebuild_phase, msg_lines)
1421
1422                                                 # process logs created during pre/postrm
1423                                                 elog_process(self.mycpv, self.settings, phasefilter=filter_unmergephases)
1424                                                 if retval == os.EX_OK:
1425                                                         if scheduler is None:
1426                                                                 doebuild(myebuildpath, "cleanrm", self.myroot,
1427                                                                         self.settings, tree="vartree",
1428                                                                         mydbapi=self.vartree.dbapi,
1429                                                                         vartree=self.vartree)
1430                                                         else:
1431                                                                 scheduler.dblinkEbuildPhase(
1432                                                                         self, self.vartree.dbapi,
1433                                                                         myebuildpath, "cleanrm")
1434                                 finally:
1435                                         unlockdir(builddir_lock)
1436                         try:
1437                                 if myebuildpath and not catdir_lock:
1438                                         # Lock catdir for removal if empty.
1439                                         catdir_lock = lockdir(catdir)
1440                         finally:
1441                                 if catdir_lock:
1442                                         try:
1443                                                 os.rmdir(catdir)
1444                                         except OSError, e:
1445                                                 if e.errno not in (errno.ENOENT,
1446                                                         errno.ENOTEMPTY, errno.EEXIST):
1447                                                         raise
1448                                                 del e
1449                                         unlockdir(catdir_lock)
1450
1451                 if log_path is not None and os.path.exists(log_path):
1452                         # Restore this since it gets lost somewhere above and it
1453                         # needs to be set for _display_merge() to be able to log.
1454                         # Note that the log isn't necessarily supposed to exist
1455                         # since if PORT_LOGDIR is unset then it's a temp file
1456                         # so it gets cleaned above.
1457                         self.settings["PORTAGE_LOG_FILE"] = log_path
1458                 else:
1459                         self.settings.pop("PORTAGE_LOG_FILE", None)
1460
1461                 env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
1462                         contents=contents, env=self.settings.environ(),
1463                         writemsg_level=self._display_merge)
1464                 return os.EX_OK
1465
1466         def _display_merge(self, msg, level=0, noiselevel=0):
1467                 if not self._verbose and noiselevel >= 0 and level < logging.WARN:
1468                         return
1469                 if self._scheduler is not None:
1470                         self._scheduler.dblinkDisplayMerge(self, msg,
1471                                 level=level, noiselevel=noiselevel)
1472                         return
1473                 writemsg_level(msg, level=level, noiselevel=noiselevel)
1474
1475         def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
1476                 """
1477                 
1478                 Unmerges the contents of a package from the liveFS
1479                 Removes the VDB entry for self
1480                 
1481                 @param pkgfiles: typically self.getcontents()
1482                 @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
1483                 @param others_in_slot: all dblink instances in this slot, excluding self
1484                 @type others_in_slot: list
1485                 @rtype: None
1486                 """
1487
1488                 showMessage = self._display_merge
1489                 scheduler = self._scheduler
1490
1491                 if not pkgfiles:
1492                         showMessage("No package files given... Grabbing a set.\n")
1493                         pkgfiles = self.getcontents()
1494
1495                 if others_in_slot is None:
1496                         others_in_slot = []
1497                         slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
1498                         slot_matches = self.vartree.dbapi.match(
1499                                 "%s:%s" % (dep_getkey(self.mycpv), slot))
1500                         for cur_cpv in slot_matches:
1501                                 if cur_cpv == self.mycpv:
1502                                         continue
1503                                 others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
1504                                         self.vartree.root, self.settings,
1505                                         vartree=self.vartree, treetype="vartree"))
1506
1507                 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
1508                         os.path.sep
1509                 dest_root_len = len(dest_root) - 1
1510
1511                 conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
1512                 cfgfiledict = grabdict(conf_mem_file)
1513                 stale_confmem = []
1514
1515                 unmerge_orphans = "unmerge-orphans" in self.settings.features
1516
1517                 if pkgfiles:
1518                         self.updateprotect()
1519                         mykeys = pkgfiles.keys()
1520                         mykeys.sort()
1521                         mykeys.reverse()
1522
1523                         #process symlinks second-to-last, directories last.
1524                         mydirs = []
1525                         ignored_unlink_errnos = (
1526                                 errno.EBUSY, errno.ENOENT,
1527                                 errno.ENOTDIR, errno.EISDIR)
1528                         ignored_rmdir_errnos = (
1529                                 errno.EEXIST, errno.ENOTEMPTY,
1530                                 errno.EBUSY, errno.ENOENT,
1531                                 errno.ENOTDIR, errno.EISDIR)
1532                         modprotect = os.path.join(self.vartree.root, "lib/modules/")
1533
1534                         def unlink(file_name, lstatobj):
1535                                 if bsd_chflags:
1536                                         if lstatobj.st_flags != 0:
1537                                                 bsd_chflags.lchflags(file_name, 0)
1538                                         parent_name = os.path.dirname(file_name)
1539                                         # Use normal stat/chflags for the parent since we want to
1540                                         # follow any symlinks to the real parent directory.
1541                                         pflags = os.stat(parent_name).st_flags
1542                                         if pflags != 0:
1543                                                 bsd_chflags.chflags(parent_name, 0)
1544                                 try:
1545                                         if not stat.S_ISLNK(lstatobj.st_mode):
1546                                                 # Remove permissions to ensure that any hardlinks to
1547                                                 # suid/sgid files are rendered harmless.
1548                                                 os.chmod(file_name, 0)
1549                                         os.unlink(file_name)
1550                                 finally:
1551                                         if bsd_chflags and pflags != 0:
1552                                                 # Restore the parent flags we saved before unlinking
1553                                                 bsd_chflags.chflags(parent_name, pflags)
1554
1555                         def show_unmerge(zing, desc, file_type, file_name):
1556                                         showMessage("%s %s %s %s\n" % \
1557                                                 (zing, desc.ljust(8), file_type, file_name))
1558                         for i, objkey in enumerate(mykeys):
1559
1560                                 if scheduler is not None and \
1561                                         0 == i % self._file_merge_yield_interval:
1562                                         scheduler.scheduleYield()
1563
1564                                 obj = normalize_path(objkey)
1565                                 file_data = pkgfiles[objkey]
1566                                 file_type = file_data[0]
1567                                 statobj = None
1568                                 try:
1569                                         statobj = os.stat(obj)
1570                                 except OSError:
1571                                         pass
1572                                 lstatobj = None
1573                                 try:
1574                                         lstatobj = os.lstat(obj)
1575                                 except (OSError, AttributeError):
1576                                         pass
1577                                 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
1578                                 if lstatobj is None:
1579                                                 show_unmerge("---", "!found", file_type, obj)
1580                                                 continue
1581                                 if obj.startswith(dest_root):
1582                                         relative_path = obj[dest_root_len:]
1583                                         is_owned = False
1584                                         for dblnk in others_in_slot:
1585                                                 if dblnk.isowner(relative_path, dest_root):
1586                                                         is_owned = True
1587                                                         break
1588                                         if is_owned:
1589                                                 # A new instance of this package claims the file, so
1590                                                 # don't unmerge it.
1591                                                 show_unmerge("---", "replaced", file_type, obj)
1592                                                 continue
1593                                         elif relative_path in cfgfiledict:
1594                                                 stale_confmem.append(relative_path)
1595                                 # next line includes a tweak to protect modules from being unmerged,
1596                                 # but we don't protect modules from being overwritten if they are
1597                                 # upgraded. We effectively only want one half of the config protection
1598                                 # functionality for /lib/modules. For portage-ng both capabilities
1599                                 # should be able to be independently specified.
1600                                 if obj.startswith(modprotect):
1601                                         show_unmerge("---", "cfgpro", file_type, obj)
1602                                         continue
1603
1604                                 # Don't unlink symlinks to directories here since that can
1605                                 # remove /lib and /usr/lib symlinks.
1606                                 if unmerge_orphans and \
1607                                         lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
1608                                         not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
1609                                         not self.isprotected(obj):
1610                                         try:
1611                                                 unlink(obj, lstatobj)
1612                                         except EnvironmentError, e:
1613                                                 if e.errno not in ignored_unlink_errnos:
1614                                                         raise
1615                                                 del e
1616                                         show_unmerge("<<<", "", file_type, obj)
1617                                         continue
1618
1619                                 lmtime = str(lstatobj[stat.ST_MTIME])
1620                                 if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
1621                                         show_unmerge("---", "!mtime", file_type, obj)
1622                                         continue
1623
1624                                 if pkgfiles[objkey][0] == "dir":
1625                                         if statobj is None or not stat.S_ISDIR(statobj.st_mode):
1626                                                 show_unmerge("---", "!dir", file_type, obj)
1627                                                 continue
1628                                         mydirs.append(obj)
1629                                 elif pkgfiles[objkey][0] == "sym":
1630                                         if not islink:
1631                                                 show_unmerge("---", "!sym", file_type, obj)
1632                                                 continue
1633                                         # Go ahead and unlink symlinks to directories here when
1634                                         # they're actually recorded as symlinks in the contents.
1635                                         # Normally, symlinks such as /lib -> lib64 are not recorded
1636                                         # as symlinks in the contents of a package.  If a package
1637                                         # installs something into ${D}/lib/, it is recorded in the
1638                                         # contents as a directory even if it happens to correspond
1639                                         # to a symlink when it's merged to the live filesystem.
1640                                         try:
1641                                                 unlink(obj, lstatobj)
1642                                                 show_unmerge("<<<", "", file_type, obj)
1643                                         except (OSError, IOError),e:
1644                                                 if e.errno not in ignored_unlink_errnos:
1645                                                         raise
1646                                                 del e
1647                                                 show_unmerge("!!!", "", file_type, obj)
1648                                 elif pkgfiles[objkey][0] == "obj":
1649                                         if statobj is None or not stat.S_ISREG(statobj.st_mode):
1650                                                 show_unmerge("---", "!obj", file_type, obj)
1651                                                 continue
1652                                         mymd5 = None
1653                                         try:
1654                                                 mymd5 = perform_md5(obj, calc_prelink=1)
1655                                         except FileNotFound, e:
1656                                                 # the file has disappeared between now and our stat call
1657                                                 show_unmerge("---", "!obj", file_type, obj)
1658                                                 continue
1659
1660                                         # string.lower is needed because db entries used to be in upper-case.  The
1661                                         # string.lower allows for backwards compatibility.
1662                                         if mymd5 != pkgfiles[objkey][2].lower():
1663                                                 show_unmerge("---", "!md5", file_type, obj)
1664                                                 continue
1665                                         try:
1666                                                 unlink(obj, lstatobj)
1667                                         except (OSError, IOError), e:
1668                                                 if e.errno not in ignored_unlink_errnos:
1669                                                         raise
1670                                                 del e
1671                                         show_unmerge("<<<", "", file_type, obj)
1672                                 elif pkgfiles[objkey][0] == "fif":
1673                                         if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
1674                                                 show_unmerge("---", "!fif", file_type, obj)
1675                                                 continue
1676                                         show_unmerge("---", "", file_type, obj)
1677                                 elif pkgfiles[objkey][0] == "dev":
1678                                         show_unmerge("---", "", file_type, obj)
1679
1680                         mydirs.sort()
1681                         mydirs.reverse()
1682
1683                         for obj in mydirs:
1684                                 try:
1685                                         if bsd_chflags:
1686                                                 lstatobj = os.lstat(obj)
1687                                                 if lstatobj.st_flags != 0:
1688                                                         bsd_chflags.lchflags(obj, 0)
1689                                                 parent_name = os.path.dirname(obj)
1690                                                 # Use normal stat/chflags for the parent since we want to
1691                                                 # follow any symlinks to the real parent directory.
1692                                                 pflags = os.stat(parent_name).st_flags
1693                                                 if pflags != 0:
1694                                                         bsd_chflags.chflags(parent_name, 0)
1695                                         try:
1696                                                 os.rmdir(obj)
1697                                         finally:
1698                                                 if bsd_chflags and pflags != 0:
1699                                                         # Restore the parent flags we saved before unlinking
1700                                                         bsd_chflags.chflags(parent_name, pflags)
1701                                         show_unmerge("<<<", "", "dir", obj)
1702                                 except EnvironmentError, e:
1703                                         if e.errno not in ignored_rmdir_errnos:
1704                                                 raise
1705                                         if e.errno != errno.ENOENT:
1706                                                 show_unmerge("---", "!empty", "dir", obj)
1707                                         del e
1708
1709                 # Remove stale entries from config memory.
1710                 if stale_confmem:
1711                         for filename in stale_confmem:
1712                                 del cfgfiledict[filename]
1713                         writedict(cfgfiledict, conf_mem_file)
1714
1715                 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1716                 self.vartree.zap(self.mycpv)
1717
1718         def isowner(self, filename, destroot):
1719                 """ 
1720                 Check if a file belongs to this package. This may
1721                 result in a stat call for the parent directory of
1722                 every installed file, since the inode numbers are
1723                 used to work around the problem of ambiguous paths
1724                 caused by symlinked directories. The results of
1725                 stat calls are cached to optimize multiple calls
1726                 to this method.
1727
1728                 @param filename:
1729                 @type filename:
1730                 @param destroot:
1731                 @type destroot:
1732                 @rtype: Boolean
1733                 @returns:
1734                 1. True if this package owns the file.
1735                 2. False if this package does not own the file.
1736                 """
1737                 return bool(self._match_contents(filename, destroot))
1738
1739         def _match_contents(self, filename, destroot):
1740                 """
1741                 The matching contents entry is returned, which is useful
1742                 since the path may differ from the one given by the caller,
1743                 due to symlinks.
1744
1745                 @rtype: String
1746                 @return: the contents entry corresponding to the given path, or False
1747                         if the file is not owned by this package.
1748                 """
1749
1750                 destfile = normalize_path(
1751                         os.path.join(destroot, filename.lstrip(os.path.sep)))
1752
1753                 pkgfiles = self.getcontents()
1754                 if pkgfiles and destfile in pkgfiles:
1755                         return destfile
1756                 if pkgfiles:
1757                         basename = os.path.basename(destfile)
1758                         if self._contents_basenames is None:
1759                                 self._contents_basenames = set(
1760                                         os.path.basename(x) for x in pkgfiles)
1761                         if basename not in self._contents_basenames:
1762                                 # This is a shortcut that, in most cases, allows us to
1763                                 # eliminate this package as an owner without the need
1764                                 # to examine inode numbers of parent directories.
1765                                 return False
1766
1767                         # Use stat rather than lstat since we want to follow
1768                         # any symlinks to the real parent directory.
1769                         parent_path = os.path.dirname(destfile)
1770                         try:
1771                                 parent_stat = os.stat(parent_path)
1772                         except EnvironmentError, e:
1773                                 if e.errno != errno.ENOENT:
1774                                         raise
1775                                 del e
1776                                 return False
1777                         if self._contents_inodes is None:
1778                                 self._contents_inodes = {}
1779                                 parent_paths = set()
1780                                 for x in pkgfiles:
1781                                         p_path = os.path.dirname(x)
1782                                         if p_path in parent_paths:
1783                                                 continue
1784                                         parent_paths.add(p_path)
1785                                         try:
1786                                                 s = os.stat(p_path)
1787                                         except OSError:
1788                                                 pass
1789                                         else:
1790                                                 inode_key = (s.st_dev, s.st_ino)
1791                                                 # Use lists of paths in case multiple
1792                                                 # paths reference the same inode.
1793                                                 p_path_list = self._contents_inodes.get(inode_key)
1794                                                 if p_path_list is None:
1795                                                         p_path_list = []
1796                                                         self._contents_inodes[inode_key] = p_path_list
1797                                                 if p_path not in p_path_list:
1798                                                         p_path_list.append(p_path)
1799                         p_path_list = self._contents_inodes.get(
1800                                 (parent_stat.st_dev, parent_stat.st_ino))
1801                         if p_path_list:
1802                                 for p_path in p_path_list:
1803                                         x = os.path.join(p_path, basename)
1804                                         if x in pkgfiles:
1805                                                 return x
1806
1807                 return False
1808
1809         def _collision_protect(self, srcroot, destroot, mypkglist, mycontents):
1810                         collision_ignore = set([normalize_path(myignore) for myignore in \
1811                                 shlex.split(self.settings.get("COLLISION_IGNORE", ""))])
1812
1813                         showMessage = self._display_merge
1814                         scheduler = self._scheduler
1815                         stopmerge = False
1816                         collisions = []
1817                         destroot = normalize_path(destroot).rstrip(os.path.sep) + \
1818                                 os.path.sep
1819                         showMessage("%s checking %d files for package collisions\n" % \
1820                                 (green("*"), len(mycontents)))
1821                         for i, f in enumerate(mycontents):
1822                                 if i % 1000 == 0 and i != 0:
1823                                         showMessage("%d files checked ...\n" % i)
1824
1825                                 if scheduler is not None and \
1826                                         0 == i % self._file_merge_yield_interval:
1827                                         scheduler.scheduleYield()
1828
1829                                 dest_path = normalize_path(
1830                                         os.path.join(destroot, f.lstrip(os.path.sep)))
1831                                 try:
1832                                         dest_lstat = os.lstat(dest_path)
1833                                 except EnvironmentError, e:
1834                                         if e.errno == errno.ENOENT:
1835                                                 del e
1836                                                 continue
1837                                         elif e.errno == errno.ENOTDIR:
1838                                                 del e
1839                                                 # A non-directory is in a location where this package
1840                                                 # expects to have a directory.
1841                                                 dest_lstat = None
1842                                                 parent_path = dest_path
1843                                                 while len(parent_path) > len(destroot):
1844                                                         parent_path = os.path.dirname(parent_path)
1845                                                         try:
1846                                                                 dest_lstat = os.lstat(parent_path)
1847                                                                 break
1848                                                         except EnvironmentError, e:
1849                                                                 if e.errno != errno.ENOTDIR:
1850                                                                         raise
1851                                                                 del e
1852                                                 if not dest_lstat:
1853                                                         raise AssertionError(
1854                                                                 "unable to find non-directory " + \
1855                                                                 "parent for '%s'" % dest_path)
1856                                                 dest_path = parent_path
1857                                                 f = os.path.sep + dest_path[len(destroot):]
1858                                                 if f in collisions:
1859                                                         continue
1860                                         else:
1861                                                 raise
1862                                 if f[0] != "/":
1863                                         f="/"+f
1864
1865                                 isowned = False
1866                                 full_path = os.path.join(destroot, f.lstrip(os.path.sep))
1867                                 for ver in mypkglist:
1868                                         if ver.isowner(f, destroot):
1869                                                 isowned = True
1870                                                 break
1871                                 if not isowned and self.isprotected(full_path):
1872                                         isowned = True
1873                                 if not isowned:
1874                                         stopmerge = True
1875                                         if collision_ignore:
1876                                                 if f in collision_ignore:
1877                                                         stopmerge = False
1878                                                 else:
1879                                                         for myignore in collision_ignore:
1880                                                                 if f.startswith(myignore + os.path.sep):
1881                                                                         stopmerge = False
1882                                                                         break
1883                                         if stopmerge:
1884                                                 collisions.append(f)
1885                         return collisions
1886
1887         def _lstat_inode_map(self, path_iter):
1888                 """
1889                 Use lstat to create a map of the form:
1890                   {(st_dev, st_ino) : set([path1, path2, ...])}
1891                 Multiple paths may reference the same inode due to hardlinks.
1892                 All lstat() calls are relative to self.myroot.
1893                 """
1894                 root = self.myroot
1895                 inode_map = {}
1896                 for f in path_iter:
1897                         path = os.path.join(root, f.lstrip(os.sep))
1898                         try:
1899                                 st = os.lstat(path)
1900                         except OSError, e:
1901                                 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1902                                         raise
1903                                 del e
1904                                 continue
1905                         key = (st.st_dev, st.st_ino)
1906                         paths = inode_map.get(key)
1907                         if paths is None:
1908                                 paths = set()
1909                                 inode_map[key] = paths
1910                         paths.add(f)
1911                 return inode_map
1912
1913         def _security_check(self, installed_instances):
1914                 if not installed_instances:
1915                         return 0
1916
1917                 showMessage = self._display_merge
1918                 scheduler = self._scheduler
1919
1920                 file_paths = set()
1921                 for dblnk in installed_instances:
1922                         file_paths.update(dblnk.getcontents())
1923                 inode_map = {}
1924                 real_paths = set()
1925                 for i, path in enumerate(file_paths):
1926
1927                         if scheduler is not None and \
1928                                 0 == i % self._file_merge_yield_interval:
1929                                 scheduler.scheduleYield()
1930
1931                         try:
1932                                 s = os.lstat(path)
1933                         except OSError, e:
1934                                 if e.errno not in (errno.ENOENT, errno.ENOTDIR):
1935                                         raise
1936                                 del e
1937                                 continue
1938                         if not stat.S_ISREG(s.st_mode):
1939                                 continue
1940                         path = os.path.realpath(path)
1941                         if path in real_paths:
1942                                 continue
1943                         real_paths.add(path)
1944                         if s.st_nlink > 1 and \
1945                                 s.st_mode & (stat.S_ISUID | stat.S_ISGID):
1946                                 k = (s.st_dev, s.st_ino)
1947                                 inode_map.setdefault(k, []).append((path, s))
1948                 suspicious_hardlinks = []
1949                 for path_list in inode_map.itervalues():
1950                         path, s = path_list[0]
1951                         if len(path_list) == s.st_nlink:
1952                                 # All hardlinks seem to be owned by this package.
1953                                 continue
1954                         suspicious_hardlinks.append(path_list)
1955                 if not suspicious_hardlinks:
1956                         return 0
1957
1958                 msg = []
1959                 msg.append("suid/sgid file(s) " + \
1960                         "with suspicious hardlink(s):")
1961                 msg.append("")
1962                 for path_list in suspicious_hardlinks:
1963                         for path, s in path_list:
1964                                 msg.append("\t%s" % path)
1965                 msg.append("")
1966                 msg.append("See the Gentoo Security Handbook " + \
1967                         "guide for advice on how to proceed.")
1968
1969                 self._eerror("preinst", msg)
1970
1971                 return 1
1972
1973         def _eerror(self, phase, lines):
1974                 from portage.elog.messages import eerror as _eerror
1975                 if self._scheduler is None:
1976                         for l in lines:
1977                                 _eerror(l, phase=phase, key=self.settings.mycpv)
1978                 else:
1979                         self._scheduler.dblinkElog(self,
1980                                 phase, _eerror, lines)
1981
1982         def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
1983                 mydbapi=None, prev_mtimes=None):
1984                 """
1985                 
1986                 This function does the following:
1987                 
1988                 calls self._preserve_libs if FEATURES=preserve-libs
1989                 calls self._collision_protect if FEATURES=collision-protect
1990                 calls doebuild(mydo=pkg_preinst)
1991                 Merges the package to the livefs
1992                 unmerges old version (if required)
1993                 calls doebuild(mydo=pkg_postinst)
1994                 calls env_update
1995                 calls elog_process
1996                 
1997                 @param srcroot: Typically this is ${D}
1998                 @type srcroot: String (Path)
1999                 @param destroot: Path to merge to (usually ${ROOT})
2000                 @type destroot: String (Path)
2001                 @param inforoot: root of the vardb entry ?
2002                 @type inforoot: String (Path)
2003                 @param myebuild: path to the ebuild that we are processing
2004                 @type myebuild: String (Path)
2005                 @param mydbapi: dbapi which is handed to doebuild.
2006                 @type mydbapi: portdbapi instance
2007                 @param prev_mtimes: { Filename:mtime } mapping for env_update
2008                 @type prev_mtimes: Dictionary
2009                 @rtype: Boolean
2010                 @returns:
2011                 1. 0 on success
2012                 2. 1 on failure
2013                 
2014                 secondhand is a list of symlinks that have been skipped due to their target
2015                 not existing; we will merge these symlinks at a later time.
2016                 """
2017
2018                 showMessage = self._display_merge
2019                 scheduler = self._scheduler
2020
2021                 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
2022                 destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
2023
2024                 if not os.path.isdir(srcroot):
2025                         showMessage("!!! Directory Not Found: D='%s'\n" % srcroot,
2026                                 level=logging.ERROR, noiselevel=-1)
2027                         return 1
2028
2029                 inforoot_slot_file = os.path.join(inforoot, "SLOT")
2030                 slot = None
2031                 try:
2032                         f = open(inforoot_slot_file)
2033                         try:
2034                                 slot = f.read().strip()
2035                         finally:
2036                                 f.close()
2037                 except EnvironmentError, e:
2038                         if e.errno != errno.ENOENT:
2039                                 raise
2040                         del e
2041
2042                 if slot is None:
2043                         slot = ""
2044
2045                 def eerror(lines):
2046                         self._eerror("preinst", lines)
2047
2048                 if slot != self.settings["SLOT"]:
2049                         showMessage("!!! WARNING: Expected SLOT='%s', got '%s'\n" % \
2050                                 (self.settings["SLOT"], slot), level=logging.WARN)
2051
2052                 if not os.path.exists(self.dbcatdir):
2053                         os.makedirs(self.dbcatdir)
2054
2055                 otherversions = []
2056                 for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
2057                         otherversions.append(v.split("/")[1])
2058
2059                 # filter any old-style virtual matches
2060                 slot_matches = [cpv for cpv in self.vartree.dbapi.match(
2061                         "%s:%s" % (cpv_getkey(self.mycpv), slot)) \
2062                         if cpv_getkey(cpv) == cpv_getkey(self.mycpv)]
2063
2064                 if self.mycpv not in slot_matches and \
2065                         self.vartree.dbapi.cpv_exists(self.mycpv):
2066                         # handle multislot or unapplied slotmove
2067                         slot_matches.append(self.mycpv)
2068
2069                 others_in_slot = []
2070                 from portage import config
2071                 for cur_cpv in slot_matches:
2072                         # Clone the config in case one of these has to be unmerged since
2073                         # we need it to have private ${T} etc... for things like elog.
2074                         others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
2075                                 self.vartree.root, config(clone=self.settings),
2076                                 vartree=self.vartree, treetype="vartree",
2077                                 scheduler=self._scheduler))
2078
2079                 retval = self._security_check(others_in_slot)
2080                 if retval:
2081                         return retval
2082
2083                 if slot_matches:
2084                         # Used by self.isprotected().
2085                         max_dblnk = None
2086                         max_counter = -1
2087                         for dblnk in others_in_slot:
2088                                 cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
2089                                 if cur_counter > max_counter:
2090                                         max_counter = cur_counter
2091                                         max_dblnk = dblnk
2092                         self._installed_instance = max_dblnk
2093
2094                 myfilelist = []
2095                 mylinklist = []
2096                 paths_with_newlines = []
2097                 srcroot_len = len(srcroot)
2098                 def onerror(e):
2099                         raise
2100                 for parent, dirs, files in os.walk(srcroot, onerror=onerror):
2101                         for f in files:
2102                                 file_path = os.path.join(parent, f)
2103                                 relative_path = file_path[srcroot_len:]
2104
2105                                 if "\n" in relative_path:
2106                                         paths_with_newlines.append(relative_path)
2107
2108                                 file_mode = os.lstat(file_path).st_mode
2109                                 if stat.S_ISREG(file_mode):
2110                                         myfilelist.append(relative_path)
2111                                 elif stat.S_ISLNK(file_mode):
2112                                         # Note: os.walk puts symlinks to directories in the "dirs"
2113                                         # list and it does not traverse them since that could lead
2114                                         # to an infinite recursion loop.
2115                                         mylinklist.append(relative_path)
2116
2117                 if paths_with_newlines:
2118                         msg = []
2119                         msg.append("This package installs one or more files containing")
2120                         msg.append("a newline (\\n) character:")
2121                         msg.append("")
2122                         paths_with_newlines.sort()
2123                         for f in paths_with_newlines:
2124                                 msg.append("\t/%s" % (f.replace("\n", "\\n")))
2125                         msg.append("")
2126                         msg.append("package %s NOT merged" % self.mycpv)
2127                         msg.append("")
2128                         eerror(msg)
2129                         return 1
2130
2131                 # If there are no files to merge, and an installed package in the same
2132                 # slot has files, it probably means that something went wrong.
2133                 if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
2134                         not myfilelist and not mylinklist and others_in_slot:
2135                         installed_files = None
2136                         for other_dblink in others_in_slot:
2137                                 installed_files = other_dblink.getcontents()
2138                                 if not installed_files:
2139                                         continue
2140                                 from textwrap import wrap
2141                                 wrap_width = 72
2142                                 msg = []
2143                                 d = (
2144                                         self.mycpv,
2145                                         other_dblink.mycpv
2146                                 )
2147                                 msg.extend(wrap(("The '%s' package will not install " + \
2148                                         "any files, but the currently installed '%s'" + \
2149                                         " package has the following files: ") % d, wrap_width))
2150                                 msg.append("")
2151                                 msg.extend(sorted(installed_files))
2152                                 msg.append("")
2153                                 msg.append("package %s NOT merged" % self.mycpv)
2154                                 msg.append("")
2155                                 msg.extend(wrap(
2156                                         ("Manually run `emerge --unmerge =%s` " % \
2157                                         other_dblink.mycpv) + "if you really want to " + \
2158                                         "remove the above files. Set " + \
2159                                         "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in " + \
2160                                         "/etc/make.conf if you do not want to " + \
2161                                         "abort in cases like this.",
2162                                         wrap_width))
2163                                 eerror(msg)
2164                         if installed_files:
2165                                 return 1
2166
2167                 # check for package collisions
2168                 blockers = None
2169                 if self._blockers is not None:
2170                         # This is only supposed to be called when
2171                         # the vdb is locked, like it is here.
2172                         blockers = self._blockers()
2173                 if blockers is None:
2174                         blockers = []
2175                 collisions = \
2176                         self._collision_protect(srcroot, destroot,
2177                         others_in_slot + blockers, myfilelist + mylinklist)
2178
2179                 # Make sure the ebuild environment is initialized and that ${T}/elog
2180                 # exists for logging of collision-protect eerror messages.
2181                 if myebuild is None:
2182                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2183                 doebuild_environment(myebuild, "preinst", destroot,
2184                         self.settings, 0, 0, mydbapi)
2185                 prepare_build_dirs(destroot, self.settings, cleanup)
2186
2187                 if collisions:
2188                         collision_protect = "collision-protect" in self.settings.features
2189                         protect_owned = "protect-owned" in self.settings.features
2190                         msg = "This package will overwrite one or more files that" + \
2191                         " may belong to other packages (see list below)."
2192                         if not (collision_protect or protect_owned):
2193                                 msg += " Add either \"collision-protect\" or" + \
2194                                 " \"protect-owned\" to FEATURES in" + \
2195                                 " make.conf if you would like the merge to abort" + \
2196                                 " in cases like this. See the make.conf man page for" + \
2197                                 " more information about these features."
2198                         if self.settings.get("PORTAGE_QUIET") != "1":
2199                                 msg += " You can use a command such as" + \
2200                                 " `portageq owners / <filename>` to identify the" + \
2201                                 " installed package that owns a file. If portageq" + \
2202                                 " reports that only one package owns a file then do NOT" + \
2203                                 " file a bug report. A bug report is only useful if it" + \
2204                                 " identifies at least two or more packages that are known" + \
2205                                 " to install the same file(s)." + \
2206                                 " If a collision occurs and you" + \
2207                                 " can not explain where the file came from then you" + \
2208                                 " should simply ignore the collision since there is not" + \
2209                                 " enough information to determine if a real problem" + \
2210                                 " exists. Please do NOT file a bug report at" + \
2211                                 " http://bugs.gentoo.org unless you report exactly which" + \
2212                                 " two packages install the same file(s). Once again," + \
2213                                 " please do NOT file a bug report unless you have" + \
2214                                 " completely understood the above message."
2215
2216                         self.settings["EBUILD_PHASE"] = "preinst"
2217                         from textwrap import wrap
2218                         msg = wrap(msg, 70)
2219                         if collision_protect:
2220                                 msg.append("")
2221                                 msg.append("package %s NOT merged" % self.settings.mycpv)
2222                         msg.append("")
2223                         msg.append("Detected file collision(s):")
2224                         msg.append("")
2225
2226                         for f in collisions:
2227                                 msg.append("\t%s" % \
2228                                         os.path.join(destroot, f.lstrip(os.path.sep)))
2229
2230                         eerror(msg)
2231
2232                         msg = []
2233                         msg.append("")
2234                         msg.append("Searching all installed" + \
2235                                 " packages for file collisions...")
2236                         msg.append("")
2237                         msg.append("Press Ctrl-C to Stop")
2238                         msg.append("")
2239                         eerror(msg)
2240
2241                         owners = self.vartree.dbapi._owners.get_owners(collisions)
2242                         self.vartree.dbapi.flush_cache()
2243
2244                         for pkg, owned_files in owners.iteritems():
2245                                 cpv = pkg.mycpv
2246                                 msg = []
2247                                 msg.append("%s" % cpv)
2248                                 for f in sorted(owned_files):
2249                                         msg.append("\t%s" % os.path.join(destroot,
2250                                                 f.lstrip(os.path.sep)))
2251                                 msg.append("")
2252                                 eerror(msg)
2253
2254                         if not owners:
2255                                 eerror(["None of the installed" + \
2256                                         " packages claim the file(s).", ""])
2257
2258                         # The explanation about the collision and how to solve
2259                         # it may not be visible via a scrollback buffer, especially
2260                         # if the number of file collisions is large. Therefore,
2261                         # show a summary at the end.
2262                         if collision_protect:
2263                                 msg = "Package '%s' NOT merged due to file collisions." % \
2264                                         self.settings.mycpv
2265                         elif protect_owned and owners:
2266                                 msg = "Package '%s' NOT merged due to file collisions." % \
2267                                         self.settings.mycpv
2268                         else:
2269                                 msg = "Package '%s' merged despite file collisions." % \
2270                                         self.settings.mycpv
2271                         msg += " If necessary, refer to your elog " + \
2272                                 "messages for the whole content of the above message."
2273                         eerror(wrap(msg, 70))
2274
2275                         if collision_protect or (protect_owned and owners):
2276                                 return 1
2277
2278                 # The merge process may move files out of the image directory,
2279                 # which causes invalidation of the .installed flag.
2280                 try:
2281                         os.unlink(os.path.join(
2282                                 os.path.dirname(normalize_path(srcroot)), ".installed"))
2283                 except OSError, e:
2284                         if e.errno != errno.ENOENT:
2285                                 raise
2286                         del e
2287
2288                 self.dbdir = self.dbtmpdir
2289                 self.delete()
2290                 ensure_dirs(self.dbtmpdir)
2291
2292                 # run preinst script
2293                 if scheduler is None:
2294                         showMessage(">>> Merging %s to %s\n" % (self.mycpv, destroot))
2295                         a = doebuild(myebuild, "preinst", destroot, self.settings,
2296                                 use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2297                                 vartree=self.vartree)
2298                 else:
2299                         a = scheduler.dblinkEbuildPhase(
2300                                 self, mydbapi, myebuild, "preinst")
2301
2302                 # XXX: Decide how to handle failures here.
2303                 if a != os.EX_OK:
2304                         showMessage("!!! FAILED preinst: "+str(a)+"\n",
2305                                 level=logging.ERROR, noiselevel=-1)
2306                         return a
2307
2308                 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
2309                 for x in os.listdir(inforoot):
2310                         self.copyfile(inforoot+"/"+x)
2311
2312                 # write local package counter for recording
2313                 counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
2314                 lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w")
2315                 lcfile.write(str(counter))
2316                 lcfile.close()
2317
2318                 # open CONTENTS file (possibly overwriting old one) for recording
2319                 outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w")
2320
2321                 self.updateprotect()
2322
2323                 #if we have a file containing previously-merged config file md5sums, grab it.
2324                 conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
2325                 cfgfiledict = grabdict(conf_mem_file)
2326                 cfgfiledict_orig = cfgfiledict.copy()
2327                 if "NOCONFMEM" in self.settings:
2328                         cfgfiledict["IGNORE"]=1
2329                 else:
2330                         cfgfiledict["IGNORE"]=0
2331
2332                 # Always behave like --noconfmem is enabled for downgrades
2333                 # so that people who don't know about this option are less
2334                 # likely to get confused when doing upgrade/downgrade cycles.
2335                 pv_split = catpkgsplit(self.mycpv)[1:]
2336                 for other in others_in_slot:
2337                         if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
2338                                 cfgfiledict["IGNORE"] = 1
2339                                 break
2340
2341                 # Don't bump mtimes on merge since some application require
2342                 # preservation of timestamps.  This means that the unmerge phase must
2343                 # check to see if file belongs to an installed instance in the same
2344                 # slot.
2345                 mymtime = None
2346
2347                 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
2348                 prevmask = os.umask(0)
2349                 secondhand = []
2350
2351                 # we do a first merge; this will recurse through all files in our srcroot but also build up a
2352                 # "second hand" of symlinks to merge later
2353                 if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
2354                         return 1
2355
2356                 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore.  The rest are
2357                 # broken symlinks.  We'll merge them too.
2358                 lastlen = 0
2359                 while len(secondhand) and len(secondhand)!=lastlen:
2360                         # clear the thirdhand.  Anything from our second hand that
2361                         # couldn't get merged will be added to thirdhand.
2362
2363                         thirdhand = []
2364                         if self.mergeme(srcroot, destroot, outfile, thirdhand,
2365                                 secondhand, cfgfiledict, mymtime):
2366                                 return 1
2367
2368                         #swap hands
2369                         lastlen = len(secondhand)
2370
2371                         # our thirdhand now becomes our secondhand.  It's ok to throw
2372                         # away secondhand since thirdhand contains all the stuff that
2373                         # couldn't be merged.
2374                         secondhand = thirdhand
2375
2376                 if len(secondhand):
2377                         # force merge of remaining symlinks (broken or circular; oh well)
2378                         if self.mergeme(srcroot, destroot, outfile, None,
2379                                 secondhand, cfgfiledict, mymtime):
2380                                 return 1
2381                 self._md5_merge_map.clear()
2382
2383                 #restore umask
2384                 os.umask(prevmask)
2385
2386                 #if we opened it, close it
2387                 outfile.flush()
2388                 outfile.close()
2389
2390                 # write out our collection of md5sums
2391                 cfgfiledict.pop("IGNORE", None)
2392                 if cfgfiledict != cfgfiledict_orig:
2393                         ensure_dirs(os.path.dirname(conf_mem_file),
2394                                 gid=portage_gid, mode=02750, mask=02)
2395                         writedict(cfgfiledict, conf_mem_file)
2396
2397                 # These caches are populated during collision-protect and the data
2398                 # they contain is now invalid. It's very important to invalidate
2399                 # the contents_inodes cache so that FEATURES=unmerge-orphans
2400                 # doesn't unmerge anything that belongs to this package that has
2401                 # just been merged.
2402                 for dblnk in others_in_slot:
2403                         dblnk._clear_contents_cache()
2404                 self._clear_contents_cache()
2405
2406                 # If portage is reinstalling itself, remove the old
2407                 # version now since we want to use the temporary
2408                 # PORTAGE_BIN_PATH that will be removed when we return.
2409                 reinstall_self = False
2410                 if self.myroot == "/" and \
2411                         match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
2412                         reinstall_self = True
2413
2414                 autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes"
2415                 others_in_slot.append(self)  # self has just been merged
2416                 for dblnk in list(others_in_slot):
2417                         if dblnk is self:
2418                                 continue
2419                         if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
2420                                 continue
2421                         showMessage(">>> Safely unmerging already-installed instance...\n")
2422                         others_in_slot.remove(dblnk) # dblnk will unmerge itself now
2423                         dblnk.unmerge(trimworld=0, ldpath_mtimes=prev_mtimes,
2424                                 others_in_slot=others_in_slot)
2425                         # TODO: Check status and abort if necessary.
2426                         dblnk.delete()
2427                         showMessage(">>> Original instance of package unmerged safely.\n")
2428
2429                 if len(others_in_slot) > 1:
2430                         from portage.output import colorize
2431                         showMessage(colorize("WARN", "WARNING:")
2432                                 + " AUTOCLEAN is disabled.  This can cause serious"
2433                                 + " problems due to overlapping packages.\n",
2434                                 level=logging.WARN, noiselevel=-1)
2435
2436                 # We hold both directory locks.
2437                 self.dbdir = self.dbpkgdir
2438                 self.delete()
2439                 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
2440
2441                 # Check for file collisions with blocking packages
2442                 # and remove any colliding files from their CONTENTS
2443                 # since they now belong to this package.
2444                 self._clear_contents_cache()
2445                 contents = self.getcontents()
2446                 destroot_len = len(destroot) - 1
2447                 for blocker in blockers:
2448                         self.vartree.dbapi.removeFromContents(blocker, iter(contents),
2449                                 relative_paths=False)
2450
2451                 self.vartree.dbapi._add(self)
2452                 contents = self.getcontents()
2453
2454                 #do postinst script
2455                 self.settings["PORTAGE_UPDATE_ENV"] = \
2456                         os.path.join(self.dbpkgdir, "environment.bz2")
2457                 self.settings.backup_changes("PORTAGE_UPDATE_ENV")
2458                 try:
2459                         if scheduler is None:
2460                                 a = doebuild(myebuild, "postinst", destroot, self.settings,
2461                                         use_cache=0, tree=self.treetype, mydbapi=mydbapi,
2462                                         vartree=self.vartree)
2463                                 if a == os.EX_OK:
2464                                         showMessage(">>> %s %s\n" % (self.mycpv, "merged."))
2465                         else:
2466                                 a = scheduler.dblinkEbuildPhase(
2467                                         self, mydbapi, myebuild, "postinst")
2468                 finally:
2469                         self.settings.pop("PORTAGE_UPDATE_ENV", None)
2470
2471                 # XXX: Decide how to handle failures here.
2472                 if a != os.EX_OK:
2473                         showMessage("!!! FAILED postinst: "+str(a)+"\n",
2474                                 level=logging.ERROR, noiselevel=-1)
2475                         return a
2476
2477                 downgrade = False
2478                 for v in otherversions:
2479                         if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
2480                                 downgrade = True
2481
2482                 #update environment settings, library paths. DO NOT change symlinks.
2483                 env_update(makelinks=(not downgrade),
2484                         target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
2485                         contents=contents, env=self.settings.environ(),
2486                         writemsg_level=self._display_merge)
2487
2488                 return os.EX_OK
2489
2490         def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
2491                 """
2492                 
2493                 This function handles actual merging of the package contents to the livefs.
2494                 It also handles config protection.
2495                 
2496                 @param srcroot: Where are we copying files from (usually ${D})
2497                 @type srcroot: String (Path)
2498                 @param destroot: Typically ${ROOT}
2499                 @type destroot: String (Path)
2500                 @param outfile: File to log operations to
2501                 @type outfile: File Object
2502                 @param secondhand: A set of items to merge in pass two (usually
2503                 or symlinks that point to non-existing files that may get merged later)
2504                 @type secondhand: List
2505                 @param stufftomerge: Either a diretory to merge, or a list of items.
2506                 @type stufftomerge: String or List
2507                 @param cfgfiledict: { File:mtime } mapping for config_protected files
2508                 @type cfgfiledict: Dictionary
2509                 @param thismtime: The current time (typically long(time.time())
2510                 @type thismtime: Long
2511                 @rtype: None or Boolean
2512                 @returns:
2513                 1. True on failure
2514                 2. None otherwise
2515                 
2516                 """
2517
2518                 showMessage = self._display_merge
2519                 writemsg = self._display_merge
2520                 scheduler = self._scheduler
2521
2522                 from os.path import sep, join
2523                 srcroot = normalize_path(srcroot).rstrip(sep) + sep
2524                 destroot = normalize_path(destroot).rstrip(sep) + sep
2525                 
2526                 # this is supposed to merge a list of files.  There will be 2 forms of argument passing.
2527                 if isinstance(stufftomerge, basestring):
2528                         #A directory is specified.  Figure out protection paths, listdir() it and process it.
2529                         mergelist = os.listdir(join(srcroot, stufftomerge))
2530                         offset = stufftomerge
2531                 else:
2532                         mergelist = stufftomerge
2533                         offset = ""
2534
2535                 for i, x in enumerate(mergelist):
2536
2537                         if scheduler is not None and \
2538                                 0 == i % self._file_merge_yield_interval:
2539                                 scheduler.scheduleYield()
2540
2541                         mysrc = join(srcroot, offset, x)
2542                         mydest = join(destroot, offset, x)
2543                         # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
2544                         myrealdest = join(sep, offset, x)
2545                         # stat file once, test using S_* macros many times (faster that way)
2546                         try:
2547                                 mystat = os.lstat(mysrc)
2548                         except OSError, e:
2549                                 writemsg("\n")
2550                                 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
2551                                 writemsg(red("!!!        as existing is not capable of being stat'd. If you are using an\n"))
2552                                 writemsg(red("!!!        experimental kernel, please boot into a stable one, force an fsck,\n"))
2553                                 writemsg(red("!!!        and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
2554                                 writemsg(red("!!!        File:  ")+str(mysrc)+"\n", noiselevel=-1)
2555                                 writemsg(red("!!!        Error: ")+str(e)+"\n", noiselevel=-1)
2556                                 return 1
2557
2558                         mymode = mystat[stat.ST_MODE]
2559                         # handy variables; mydest is the target object on the live filesystems;
2560                         # mysrc is the source object in the temporary install dir
2561                         try:
2562                                 mydstat = os.lstat(mydest)
2563                                 mydmode = mydstat.st_mode
2564                         except OSError, e:
2565                                 if e.errno != errno.ENOENT:
2566                                         raise
2567                                 del e
2568                                 #dest file doesn't exist
2569                                 mydstat = None
2570                                 mydmode = None
2571
2572                         if stat.S_ISLNK(mymode):
2573                                 # we are merging a symbolic link
2574                                 myabsto = abssymlink(mysrc)
2575                                 if myabsto.startswith(srcroot):
2576                                         myabsto = myabsto[len(srcroot):]
2577                                 myabsto = myabsto.lstrip(sep)
2578                                 myto = os.readlink(mysrc)
2579                                 if self.settings and self.settings["D"]:
2580                                         if myto.startswith(self.settings["D"]):
2581                                                 myto = myto[len(self.settings["D"]):]
2582                                 # myrealto contains the path of the real file to which this symlink points.
2583                                 # we can simply test for existence of this file to see if the target has been merged yet
2584                                 myrealto = normalize_path(os.path.join(destroot, myabsto))
2585                                 if mydmode!=None:
2586                                         #destination exists
2587                                         if not stat.S_ISLNK(mydmode):
2588                                                 if stat.S_ISDIR(mydmode):
2589                                                         # directory in the way: we can't merge a symlink over a directory
2590                                                         # we won't merge this, continue with next file...
2591                                                         continue
2592
2593                                                 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
2594                                                         # Kill file blocking installation of symlink to dir #71787
2595                                                         pass
2596                                                 elif self.isprotected(mydest):
2597                                                         # Use md5 of the target in ${D} if it exists...
2598                                                         try:
2599                                                                 newmd5 = perform_md5(join(srcroot, myabsto))
2600                                                         except FileNotFound:
2601                                                                 # Maybe the target is merged already.
2602                                                                 try:
2603                                                                         newmd5 = perform_md5(myrealto)
2604                                                                 except FileNotFound:
2605                                                                         newmd5 = None
2606                                                         mydest = new_protect_filename(mydest, newmd5=newmd5)
2607
2608                                 # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
2609                                 if (secondhand != None) and (not os.path.exists(myrealto)):
2610                                         # either the target directory doesn't exist yet or the target file doesn't exist -- or
2611                                         # the target is a broken symlink.  We will add this file to our "second hand" and merge
2612                                         # it later.
2613                                         secondhand.append(mysrc[len(srcroot):])
2614                                         continue
2615                                 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
2616                                 mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings)
2617                                 if mymtime != None:
2618                                         showMessage(">>> %s -> %s\n" % (mydest, myto))
2619                                         outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
2620                                 else:
2621                                         showMessage("!!! Failed to move file.\n",
2622                                                 level=logging.ERROR, noiselevel=-1)
2623                                         showMessage("!!! %s -> %s\n" % (mydest, myto),
2624                                                 level=logging.ERROR, noiselevel=-1)
2625                                         return 1
2626                         elif stat.S_ISDIR(mymode):
2627                                 # we are merging a directory
2628                                 if mydmode != None:
2629                                         # destination exists
2630
2631                                         if bsd_chflags:
2632                                                 # Save then clear flags on dest.
2633                                                 dflags = mydstat.st_flags
2634                                                 if dflags != 0:
2635                                                         bsd_chflags.lchflags(mydest, 0)
2636
2637                                         if not os.access(mydest, os.W_OK):
2638                                                 pkgstuff = pkgsplit(self.pkg)
2639                                                 writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
2640                                                 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
2641                                                 writemsg("!!! You may start the merge process again by using ebuild:\n")
2642                                                 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
2643                                                 writemsg("!!! And finish by running this: env-update\n\n")
2644                                                 return 1
2645
2646                                         if stat.S_ISDIR(mydmode) or \
2647                                                 (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
2648                                                 # a symlink to an existing directory will work for us; keep it:
2649                                                 showMessage("--- %s/\n" % mydest)
2650                                                 if bsd_chflags:
2651                                                         bsd_chflags.lchflags(mydest, dflags)
2652                                         else:
2653                                                 # a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
2654                                                 if movefile(mydest, mydest+".backup", mysettings=self.settings) is None:
2655                                                         return 1
2656                                                 showMessage("bak %s %s.backup\n" % (mydest, mydest),
2657                                                         level=logging.ERROR, noiselevel=-1)
2658                                                 #now create our directory
2659                                                 if self.settings.selinux_enabled():
2660                                                         import selinux
2661                                                         sid = selinux.get_sid(mysrc)
2662                                                         selinux.secure_mkdir(mydest,sid)
2663                                                 else:
2664                                                         os.mkdir(mydest)
2665                                                 if bsd_chflags:
2666                                                         bsd_chflags.lchflags(mydest, dflags)
2667                                                 os.chmod(mydest, mystat[0])
2668                                                 os.chown(mydest, mystat[4], mystat[5])
2669                                                 showMessage(">>> %s/\n" % mydest)
2670                                 else:
2671                                         #destination doesn't exist
2672                                         if self.settings.selinux_enabled():
2673                                                 import selinux
2674                                                 sid = selinux.get_sid(mysrc)
2675                                                 selinux.secure_mkdir(mydest, sid)
2676                                         else:
2677                                                 os.mkdir(mydest)
2678                                         os.chmod(mydest, mystat[0])
2679                                         os.chown(mydest, mystat[4], mystat[5])
2680                                         showMessage(">>> %s/\n" % mydest)
2681                                 outfile.write("dir "+myrealdest+"\n")
2682                                 # recurse and merge this directory
2683                                 if self.mergeme(srcroot, destroot, outfile, secondhand,
2684                                         join(offset, x), cfgfiledict, thismtime):
2685                                         return 1
2686                         elif stat.S_ISREG(mymode):
2687                                 # we are merging a regular file
2688                                 mymd5 = perform_md5(mysrc, calc_prelink=1)
2689                                 # calculate config file protection stuff
2690                                 mydestdir = os.path.dirname(mydest)
2691                                 moveme = 1
2692                                 zing = "!!!"
2693                                 mymtime = None
2694                                 if mydmode != None:
2695                                         # destination file exists
2696                                         if stat.S_ISDIR(mydmode):
2697                                                 # install of destination is blocked by an existing directory with the same name
2698                                                 moveme = 0
2699                                                 showMessage("!!! %s\n" % mydest,
2700                                                         level=logging.ERROR, noiselevel=-1)
2701                                         elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
2702                                                 cfgprot = 0
2703                                                 # install of destination is blocked by an existing regular file,
2704                                                 # or by a symlink to an existing regular file;
2705                                                 # now, config file management may come into play.
2706                                                 # we only need to tweak mydest if cfg file management is in play.
2707                                                 if self.isprotected(mydest):
2708                                                         # we have a protection path; enable config file management.
2709                                                         destmd5 = perform_md5(mydest, calc_prelink=1)
2710                                                         if mymd5 == destmd5:
2711                                                                 #file already in place; simply update mtimes of destination
2712                                                                 moveme = 1
2713                                                         else:
2714                                                                 if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
2715                                                                         """ An identical update has previously been
2716                                                                         merged.  Skip it unless the user has chosen
2717                                                                         --noconfmem."""
2718                                                                         moveme = cfgfiledict["IGNORE"]
2719                                                                         cfgprot = cfgfiledict["IGNORE"]
2720                                                                         if not moveme:
2721                                                                                 zing = "---"
2722                                                                                 mymtime = long(mystat.st_mtime)
2723                                                                 else:
2724                                                                         moveme = 1
2725                                                                         cfgprot = 1
2726                                                         if moveme:
2727                                                                 # Merging a new file, so update confmem.
2728                                                                 cfgfiledict[myrealdest] = [mymd5]
2729                                                         elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
2730                                                                 """A previously remembered update has been
2731                                                                 accepted, so it is removed from confmem."""
2732                                                                 del cfgfiledict[myrealdest]
2733                                                 if cfgprot:
2734                                                         mydest = new_protect_filename(mydest, newmd5=mymd5)
2735
2736                                 # whether config protection or not, we merge the new file the
2737                                 # same way.  Unless moveme=0 (blocking directory)
2738                                 if moveme:
2739                                         hardlink_key = (mymd5, mystat.st_size,
2740                                                 mystat.st_mode, mystat.st_uid, mystat.st_gid)
2741                                         hardlink_candidates = self._md5_merge_map.get(hardlink_key)
2742                                         if hardlink_candidates is None:
2743                                                 hardlink_candidates = []
2744                                                 self._md5_merge_map[hardlink_key] = hardlink_candidates
2745                                         mymtime = movefile(mysrc, mydest, newmtime=thismtime,
2746                                                 sstat=mystat, mysettings=self.settings,
2747                                                 hardlink_candidates=hardlink_candidates)
2748                                         if mymtime is None:
2749                                                 return 1
2750                                         hardlink_candidates.append(mydest)
2751                                         zing = ">>>"
2752
2753                                 if mymtime != None:
2754                                         outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
2755                                 showMessage("%s %s\n" % (zing,mydest))
2756                         else:
2757                                 # we are merging a fifo or device node
2758                                 zing = "!!!"
2759                                 if mydmode is None:
2760                                         # destination doesn't exist
2761                                         if movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings) != None:
2762                                                 zing = ">>>"
2763                                         else:
2764                                                 return 1
2765                                 if stat.S_ISFIFO(mymode):
2766                                         outfile.write("fif %s\n" % myrealdest)
2767                                 else:
2768                                         outfile.write("dev %s\n" % myrealdest)
2769                                 showMessage(zing + " " + mydest + "\n")
2770
2771         def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2772                 mydbapi=None, prev_mtimes=None):
2773                 """
2774                 If portage is reinstalling itself, create temporary
2775                 copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
2776                 to avoid relying on the new versions which may be
2777                 incompatible. Register an atexit hook to clean up the
2778                 temporary directories. Pre-load elog modules here since
2779                 we won't be able to later if they get unmerged (happens
2780                 when namespace changes).
2781                 """
2782                 if self.vartree.dbapi._categories is not None:
2783                         self.vartree.dbapi._categories = None
2784                 if self.myroot == "/" and \
2785                         match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]) and \
2786                         not self.vartree.dbapi.cpv_exists(self.mycpv):
2787                         settings = self.settings
2788                         base_path_orig = os.path.dirname(settings["PORTAGE_BIN_PATH"])
2789                         from tempfile import mkdtemp
2790
2791                         # Make the temp directory inside PORTAGE_TMPDIR since, unlike
2792                         # /tmp, it can't be mounted with the "noexec" option.
2793                         base_path_tmp = mkdtemp("", "._portage_reinstall_.",
2794                                 settings["PORTAGE_TMPDIR"])
2795                         from portage.process import atexit_register
2796                         atexit_register(shutil.rmtree, base_path_tmp)
2797                         dir_perms = 0755
2798                         for subdir in "bin", "pym":
2799                                 var_name = "PORTAGE_%s_PATH" % subdir.upper()
2800                                 var_orig = settings[var_name]
2801                                 var_new = os.path.join(base_path_tmp, subdir)
2802                                 settings[var_name] = var_new
2803                                 settings.backup_changes(var_name)
2804                                 shutil.copytree(var_orig, var_new, symlinks=True)
2805                                 os.chmod(var_new, dir_perms)
2806                         os.chmod(base_path_tmp, dir_perms)
2807                         # This serves so pre-load the modules.
2808                         elog_process(self.mycpv, self.settings,
2809                                 phasefilter=filter_mergephases)
2810
2811                 return self._merge(mergeroot, inforoot,
2812                                 myroot, myebuild=myebuild, cleanup=cleanup,
2813                                 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2814
2815         def _merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
2816                 mydbapi=None, prev_mtimes=None):
2817                 retval = -1
2818                 self.lockdb()
2819                 try:
2820                         retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
2821                                 cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
2822
2823                         # Process ebuild logfiles
2824                         elog_process(self.mycpv, self.settings, phasefilter=filter_mergephases)
2825                         if retval == os.EX_OK and "noclean" not in self.settings.features:
2826                                 if myebuild is None:
2827                                         myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
2828
2829                                 if self._scheduler is None:
2830                                         doebuild(myebuild, "clean", myroot,
2831                                                 self.settings, tree=self.treetype,
2832                                                 mydbapi=mydbapi, vartree=self.vartree)
2833                                 else:
2834                                         self._scheduler.dblinkEbuildPhase(
2835                                                 self, mydbapi, myebuild, "clean")
2836                 finally:
2837                         self.unlockdb()
2838                 return retval
2839
2840         def getstring(self,name):
2841                 "returns contents of a file with whitespace converted to spaces"
2842                 if not os.path.exists(self.dbdir+"/"+name):
2843                         return ""
2844                 myfile = open(self.dbdir+"/"+name,"r")
2845                 mydata = myfile.read().split()
2846                 myfile.close()
2847                 return " ".join(mydata)
2848
2849         def copyfile(self,fname):
2850                 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
2851
2852         def getfile(self,fname):
2853                 if not os.path.exists(self.dbdir+"/"+fname):
2854                         return ""
2855                 myfile = open(self.dbdir+"/"+fname,"r")
2856                 mydata = myfile.read()
2857                 myfile.close()
2858                 return mydata
2859
2860         def setfile(self,fname,data):
2861                 write_atomic(os.path.join(self.dbdir, fname), data)
2862
2863         def getelements(self,ename):
2864                 if not os.path.exists(self.dbdir+"/"+ename):
2865                         return []
2866                 myelement = open(self.dbdir+"/"+ename,"r")
2867                 mylines = myelement.readlines()
2868                 myreturn = []
2869                 for x in mylines:
2870                         for y in x[:-1].split():
2871                                 myreturn.append(y)
2872                 myelement.close()
2873                 return myreturn
2874
2875         def setelements(self,mylist,ename):
2876                 myelement = open(self.dbdir+"/"+ename,"w")
2877                 for x in mylist:
2878                         myelement.write(x+"\n")
2879                 myelement.close()
2880
2881         def isregular(self):
2882                 "Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
2883                 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
2884
2885 def write_contents(contents, root, f):
2886         """
2887         Write contents to any file like object. The file will be left open.
2888         """
2889         root_len = len(root) - 1
2890         for filename in sorted(contents):
2891                 entry_data = contents[filename]
2892                 entry_type = entry_data[0]
2893                 relative_filename = filename[root_len:]
2894                 if entry_type == "obj":
2895                         entry_type, mtime, md5sum = entry_data
2896                         line = "%s %s %s %s\n" % \
2897                                 (entry_type, relative_filename, md5sum, mtime)
2898                 elif entry_type == "sym":
2899                         entry_type, mtime, link = entry_data
2900                         line = "%s %s -> %s %s\n" % \
2901                                 (entry_type, relative_filename, link, mtime)
2902                 else: # dir, dev, fif
2903                         line = "%s %s\n" % (entry_type, relative_filename)
2904                 f.write(line)
2905
2906 def tar_contents(contents, root, tar, protect=None, onProgress=None):
2907         from portage.util import normalize_path
2908         import tarfile
2909         root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
2910         id_strings = {}
2911         maxval = len(contents)
2912         curval = 0
2913         if onProgress:
2914                 onProgress(maxval, 0)
2915         paths = contents.keys()
2916         paths.sort()
2917         for path in paths:
2918                 curval += 1
2919                 try:
2920                         lst = os.lstat(path)
2921                 except OSError, e:
2922                         if e.errno != errno.ENOENT:
2923                                 raise
2924                         del e
2925                         if onProgress:
2926                                 onProgress(maxval, curval)
2927                         continue
2928                 contents_type = contents[path][0]
2929                 if path.startswith(root):
2930                         arcname = path[len(root):]
2931                 else:
2932                         raise ValueError("invalid root argument: '%s'" % root)
2933                 live_path = path
2934                 if 'dir' == contents_type and \
2935                         not stat.S_ISDIR(lst.st_mode) and \
2936                         os.path.isdir(live_path):
2937                         # Even though this was a directory in the original ${D}, it exists
2938                         # as a symlink to a directory in the live filesystem.  It must be
2939                         # recorded as a real directory in the tar file to ensure that tar
2940                         # can properly extract it's children.
2941                         live_path = os.path.realpath(live_path)
2942                 tarinfo = tar.gettarinfo(live_path, arcname)
2943                 # store numbers instead of real names like tar's --numeric-owner
2944                 tarinfo.uname = id_strings.setdefault(tarinfo.uid, str(tarinfo.uid))
2945                 tarinfo.gname = id_strings.setdefault(tarinfo.gid, str(tarinfo.gid))
2946
2947                 if stat.S_ISREG(lst.st_mode):
2948                         # break hardlinks due to bug #185305
2949                         tarinfo.type = tarfile.REGTYPE
2950                         if protect and protect(path):
2951                                 # Create an empty file as a place holder in order to avoid
2952                                 # potential collision-protect issues.
2953                                 tarinfo.size = 0
2954                                 tar.addfile(tarinfo)
2955                         else:
2956                                 f = open(path)
2957                                 try:
2958                                         tar.addfile(tarinfo, f)
2959                                 finally:
2960                                         f.close()
2961                 else:
2962                         tar.addfile(tarinfo)
2963                 if onProgress:
2964                         onProgress(maxval, curval)