--- /dev/null
+from dbapi import dbapi
+from portage import settings, config, auxdbkeys, auxdbkeylen, doebuild, eapi_is_supported, flatten, listdir
+from portage_data import portage_gid
+from portage_util import ensure_dirs, apply_recursive_permissions, writemsg
+from portage_versions import pkgsplit, catpksplit, best
+from portage_dep import use_reduce, paren_reduce, dep_expand, dep_getkey, dep_getslot, match_from_list, match_to_list
+from portage_manifest import Manifest
+from output import red
+
+import eclass_cache, portage_exception, portage_gpg, portage_locks, portage_checksum
+import os, sys
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self,porttree_root,mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ global settings
+ self.mysettings = config(clone=settings)
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.mysettings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.MARGINAL
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.TRUSTED
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = os.path.realpath(porttree_root)
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache.cache(self.porttree_root,
+ overlays=self.mysettings["PORTDIR_OVERLAY"].split())
+
+ self.metadb = {}
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache={}
+ self.frozen=0
+
+ self.porttrees = [self.porttree_root] + \
+ [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._init_cache_dirs()
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
+ for x in self.porttrees:
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
+ self._gvisible_aux_cache = {}
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+
+ try:
+ for mydir in (self.depcachedir,):
+ if ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise portage_exception.OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except portage_exception.PortageException, e:
+ pass
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ self.metadb = {}
+ self.auxdb = {}
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ mydigs = mydig.split("/")[:-1]
+ mydig = "/".join(mydigs)
+
+ mysplit = mycpv.split("/")
+ except SystemExit, e:
+ raise
+ except:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def findname2(self, mycpv, mytree=None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ """
+ if not mycpv:
+ return "",0
+ mysplit=mycpv.split("/")
+ psplit=pkgsplit(mysplit[1])
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees[:]
+ mytrees.reverse()
+ if psplit:
+ for x in mytrees:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ if os.access(file, os.R_OK):
+ return[file, x]
+ return None, 0
+
+ def aux_get(self, mycpv, mylist, mytree=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ global auxdbkeys,auxdbkeylen
+ cat,pkg = mycpv.split("/", 1)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
+ noiselevel=1)
+ writemsg("!!! %s\n" % myebuild, noiselevel=1)
+ raise KeyError(mycpv)
+
+ myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage_gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage_gpg.fileStats(myManifestPath)):
+ raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except portage_exception.InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except portage_exception.MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError,portage_exception.FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
+ noiselevel=-1)
+
+
+ if os.access(myebuild, os.R_OK):
+ emtime=os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
+ noiselevel=-1)
+ writemsg("!!! %s\n" % myebuild,
+ noiselevel=-1)
+ raise KeyError
+
+ try:
+ mydata = self.auxdb[mylocation][mycpv]
+ if emtime != long(mydata.get("_mtime_", 0)):
+ doregen = True
+ elif len(mydata.get("_eclasses_", [])) > 0:
+ doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
+ else:
+ doregen = False
+
+ except KeyError:
+ doregen = True
+ except CacheError:
+ doregen = True
+ try: del self.auxdb[mylocation][mycpv]
+ except KeyError: pass
+
+ writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
+
+ if doregen:
+ writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
+
+ if self.tmpfs:
+ mydbkey = self.tmpfs+"/aux_db_key_temp"
+ else:
+ mydbkey = self.depcachedir+"/aux_db_key_temp"
+
+ mylock = None
+ try:
+ mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
+ try:
+ os.unlink(mydbkey)
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.doebuild_settings.reset()
+ myret = doebuild(myebuild, "depend", "/",
+ self.doebuild_settings, dbkey=mydbkey, tree="porttree",
+ mydbapi=self)
+ if myret != os.EX_OK:
+ #depend returned non-zero exit code...
+ writemsg((red("\naux_get():") + \
+ " (0) Error in '%s'. (%s)\n" + \
+ " Check for syntax error or " + \
+ "corruption in the ebuild. (--debug)\n\n") % \
+ (myebuild, myret), noiselevel=-1)
+ raise KeyError(mycpv)
+
+ try:
+ mycent = open(mydbkey, "r")
+ os.unlink(mydbkey)
+ mylines = mycent.readlines()
+ mycent.close()
+ except (IOError, OSError):
+ writemsg((red("\naux_get():") + \
+ " (1) Error in '%s' ebuild.\n" + \
+ " Check for syntax error or " + \
+ "corruption in the ebuild. (--debug)\n\n") % myebuild,
+ noiselevel=-1)
+ raise KeyError(mycpv)
+ finally:
+ if mylock:
+ portage_locks.unlockfile(mylock)
+
+ mydata = {}
+ for x in range(0,len(mylines)):
+ if mylines[x][-1] == '\n':
+ mylines[x] = mylines[x][:-1]
+ mydata[auxdbkeys[x]] = mylines[x]
+
+ if "EAPI" not in mydata or not mydata["EAPI"].strip():
+ mydata["EAPI"] = "0"
+
+ if not eapi_is_supported(mydata["EAPI"]):
+ # if newer version, wipe everything and negate eapi
+ eapi = mydata["EAPI"]
+ mydata = {}
+ map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
+ mydata["EAPI"] = "-"+eapi
+
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
+ else:
+ mydata["_eclasses_"] = {}
+
+ del mydata["INHERITED"]
+
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][mycpv] = mydata
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = []
+ for x in mylist:
+ if x == "INHERITED":
+ returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
+ else:
+ returnme.append(mydata.get(x,""))
+
+ if "EAPI" in mylist:
+ idx = mylist.index("EAPI")
+ if not returnme[idx]:
+ returnme[idx] = "0"
+
+ return returnme
+
+ def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
+ if mysettings is None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
+ except (IOError,KeyError):
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ if useflags is None:
+ useflags = mysettings["USE"].split()
+
+ myurilist = paren_reduce(myuris)
+ myurilist = use_reduce(myurilist,uselist=useflags,matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self,mypkg,useflags=None,debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug: print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags is None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ if myfile not in checksums:
+ if debug:
+ writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
+ continue
+ file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError, e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ else:
+ existing_size = mystat.st_size
+ remaining_size = int(checksums[myfile]["size"]) - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ try:
+ ok, reason = portage_checksum.verify_all(
+ os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
+ except portage_exception.FileNotFound, e:
+ ok = False
+ reason = "File Not Found: '%s'" % str(e)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self,mypkg,useflags=None,debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
+ if filesdict is None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum+=filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2=mykey.split("/")
+ cps=catpkgsplit(mykey,silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0]+"/"+cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d={}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ mysplit=mycp.split("/")
+ d={}
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[mysplit[0]+"/"+x[:-7]] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache={}
+ self.frozen=0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
+ mykey=dep_getkey(mydep)
+
+ if level=="list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval=self.visible(self.cp_list(mykey))
+ myval=self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level=="bestmatch-visible":
+ #dep match -- best match of all visible packages
+ myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible matches (from xmatch()), then choose the best one
+ elif level=="bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ myval=best(match_from_list(mydep,mylist))
+ #no point is calling xmatch again since we're not caching list deps
+ elif level=="match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ myval=match_from_list(mydep,mylist)
+ elif level=="match-visible":
+ #dep match -- find all visible matches
+ myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible packages, then get the matching ones
+ elif level=="match-all":
+ #match *all* visible *and* masked packages
+ myval=match_from_list(mydep,self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle",level,"query!"
+ raise KeyError
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ myval = [cpv for cpv in myval \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ if self.frozen and (level not in ["match-list","bestmatch-list"]):
+ self.xcache[level][mydep]=myval
+ return myval
+
+ def match(self,mydep,use_cache=1):
+ return self.xmatch("match-visible",mydep)
+
+ def visible(self,mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist is None) or (len(mylist)==0):
+ return []
+ newlist=mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey=newlist[0]
+ cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:",mykey
+ return []
+ mycp=cpv[0]+"/"+cpv[1]
+ maskdict=self.mysettings.pmaskdict
+ unmaskdict=self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches=self.xmatch("match-all",x)
+ if mymatches is None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask=self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask=1
+ break
+ if unmask==0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict=self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches=self.xmatch("match-list",x,mylist=newlist)
+ if mymatches is None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos=0
+ while pos<len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+
+ accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ #we need to update this next line when we have fully integrated the new db api
+ auxerr=0
+ keys = None
+ eapi = None
+ aux_cache = self._gvisible_aux_cache.get(mycpv)
+ if aux_cache is not None:
+ keys, eapi = aux_cache
+ else:
+ try:
+ keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
+ except KeyError:
+ pass
+ except portage_exception.PortageException, e:
+ writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % mycpv,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+ self._gvisible_aux_cache[mycpv] = (keys, eapi)
+ if not keys:
+ # KEYWORDS=""
+ #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
+ continue
+ mygroups=keys.split()
+ # Repoman may modify this attribute as necessary.
+ pgroups = accept_keywords[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ if matches:
+ inc_pgroups = []
+ for x in pgroups:
+ if x != "-*" and x.startswith("-"):
+ try:
+ inc_pgroups.remove(x[1:])
+ except ValueError:
+ pass
+ if x not in inc_pgroups:
+ inc_pgroups.append(x)
+ pgroups = inc_pgroups
+ del inc_pgroups
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
+ noiselevel=-1)
+ match=1
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ elif gp[0] == "~":
+ hastesting = True
+ elif gp[0] != "-":
+ hasstable = True
+ if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
+ match=1
+ if match and eapi_is_supported(eapi):
+ newlist.append(mycpv)
+ return newlist
--- /dev/null
+from dbapi import dbapi
+from portage import settings, db, listdir, dblink
+from portage_const import VDB_PATH
+from portage_versions import pkgsplit, catpkgsplit
+from portage_util import write_atomic, writemsg, writems_stdout, grabfile
+from portage_dep import isjustname, isvalidatom, dep_getkey, dep_getslot, \
+ match_from_list, dep_expand
+from portage_update import fixdbentries
+
+import portage_exception
+import os, sys
+
+cptot=0
+class vardbapi(dbapi):
+ def __init__(self, root, categories=None, settings=None, vartree=None):
+ self.root = root[:]
+ #cache for category directory mtimes
+ self.mtdircache = {}
+ #cache for dependency checks
+ self.matchcache = {}
+ #cache for cp_list results
+ self.cpcache = {}
+ self.blockers = None
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings
+ if categories is None:
+ categories = settings.categories
+ self.categories = categories[:]
+ if vartree is None:
+ vartree = globals()["db"][root]["vartree"]
+ self.vartree = vartree
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.root+VDB_PATH+"/"+mykey)
+
+ def cpv_counter(self,mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ cdir=self.root+VDB_PATH+"/"+mycpv
+ cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted=0
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
+ counter=long(0)
+ corrupted=1
+ cfile.close()
+ elif os.path.exists(cdir):
+ mys = pkgsplit(mycpv)
+ myl = self.match(mys[0],use_cache=0)
+ print mys,myl
+ if len(myl) == 1:
+ try:
+ # Only one package... Counter doesn't matter.
+ write_atomic(cpath, "1")
+ counter = 1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
+ writemsg("!!! %s\n" % e, noiselevel=-1)
+ sys.exit(1)
+ else:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! remerge the package.\n", noiselevel=-1)
+ sys.exit(1)
+ else:
+ counter=long(0)
+ if corrupted:
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter
+
+ def cpv_inject(self,mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ os.makedirs(self.root+VDB_PATH+"/"+mycpv)
+ counter = self.counter_tick(self.root, mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
+
+ def isInjected(self,mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
+ return True
+ if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
+ return True
+ return False
+
+ def move_ent(self,mylist):
+ origcp=mylist[1]
+ newcp=mylist[2]
+
+ # sanity check
+ for cp in [origcp,newcp]:
+ if not (isvalidatom(cp) and isjustname(cp)):
+ raise portage_exception.InvalidPackageName(cp)
+ origmatches=self.match(origcp,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ mynewcat=newcp.split("/")[0]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ mycpsplit_new = catpkgsplit(mynewcpv)
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+ writemsg_stdout("@")
+ if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
+ #create the directory
+ os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
+ newpath=self.root+VDB_PATH+"/"+mynewcpv
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ os.rename(origpath, newpath)
+
+ # We need to rename the ebuild now.
+ old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
+ new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
+ if mycpsplit[3] != "r0":
+ old_eb_path += "-"+mycpsplit[3]
+ new_eb_path += "-"+mycpsplit[3]
+ if os.path.exists(old_eb_path+".ebuild"):
+ os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
+
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+ fixdbentries([mylist], newpath)
+
+ def update_ents(self, update_iter):
+ """Run fixdbentries on all installed packages (time consuming). Like
+ fixpackages, this should be run from a helper script and display
+ a progress indicator."""
+ dbdir = os.path.join(self.root, VDB_PATH)
+ for catdir in listdir(dbdir):
+ catdir = dbdir+"/"+catdir
+ if os.path.isdir(catdir):
+ for pkgdir in listdir(catdir):
+ pkgdir = catdir+"/"+pkgdir
+ if os.path.isdir(pkgdir):
+ fixdbentries(update_iter, pkgdir)
+
+ def move_slot_ent(self,mylist):
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+
+ if not isvalidatom(pkg):
+ raise portage_exception.InvalidAtom(pkg)
+
+ origmatches=self.match(pkg,use_cache=0)
+
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+
+ slot=grabfile(origpath+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ writemsg_stdout("s")
+ write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
+ except OSError:
+ mystat=0
+ if use_cache and self.cpcache.has_key(mycp):
+ cpc=self.cpcache[mycp]
+ if cpc[0]==mystat:
+ return cpc[1]
+ list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+
+ if (list is None):
+ return []
+ returnme=[]
+ for x in list:
+ if x.startswith("."):
+ continue
+ if x[0] == '-':
+ #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
+ continue
+ ps=pkgsplit(x)
+ if not ps:
+ self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if len(mysplit) > 1:
+ if ps[0]==mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ if use_cache:
+ self.cpcache[mycp]=[mystat,returnme]
+ elif self.cpcache.has_key(mycp):
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self,use_cache=1):
+ returnme=[]
+ basepath = self.root+VDB_PATH+"/"
+
+ for x in self.categories:
+ for y in listdir(basepath+x,EmptyOnError=1):
+ if y.startswith("."):
+ continue
+ subpath = x+"/"+y
+ # -MERGING- should never be a cpv, nor should files.
+ if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
+ returnme += [subpath]
+ return returnme
+
+ def cp_all(self,use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ mysplit=catpkgsplit(y)
+ if not mysplit:
+ self.invalidentry(self.root+VDB_PATH+"/"+y)
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return d.keys()
+
+ def checkblockers(self,origdep):
+ pass
+
+ def match(self,origdep,use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ if not use_cache:
+ if self.matchcache.has_key(mycat):
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ mymatch = match_from_list(mydep,
+ self.cp_list(mykey, use_cache=use_cache))
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ return mymatch
+ try:
+ curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except:
+ curmtime=0
+
+ if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
+ # clear cache entry
+ self.mtdircache[mycat]=curmtime
+ self.matchcache[mycat]={}
+ if not self.matchcache[mycat].has_key(mydep):
+ mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ myslot = dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ self.matchcache[mycat][mydep]=mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv):
+ return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
+
+ def aux_get(self, mycpv, wants):
+ mydir = os.path.join(self.root, VDB_PATH, mycpv)
+ if not os.path.isdir(mydir):
+ raise KeyError(mycpv)
+ results = []
+ for x in wants:
+ try:
+ myf = open(os.path.join(mydir, x), "r")
+ try:
+ myd = myf.read()
+ finally:
+ myf.close()
+ myd = " ".join(myd.split())
+ except IOError:
+ myd = ""
+ if x == "EAPI" and not myd:
+ results.append("0")
+ else:
+ results.append(myd)
+ return results
+
+ def aux_update(self, cpv, values):
+ cat, pkg = cpv.split("/")
+ mylink = dblink(cat, pkg, self.root, self.settings,
+ treetype="vartree", vartree=self.vartree)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ for k, v in values.iteritems():
+ mylink.setfile(k, v)
+
+ def counter_tick(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
+
+ def get_counter_tick_core(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
+
+ def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath=myroot+"var/cache/edb/counter"
+ changed=0
+ min_counter = 0
+ if mycpv:
+ mysplit = pkgsplit(mycpv)
+ for x in self.match(mysplit[0],use_cache=0):
+ if x==mycpv:
+ continue
+ try:
+ old_counter = long(self.aux_get(x,["COUNTER"])[0])
+ writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
+ except SystemExit, e:
+ raise
+ except:
+ old_counter = 0
+ writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
+ if old_counter > min_counter:
+ min_counter = old_counter
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption.
+ find_counter = ("find '%s' -type f -name COUNTER | " + \
+ "while read f; do echo $(<\"${f}\"); done | " + \
+ "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except (ValueError,OverflowError):
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
+ noiselevel=-1)
+ changed=1
+ except (ValueError,OverflowError):
+ writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
+ noiselevel=-1)
+ writemsg("!!! corrected/normalized so that portage can operate properly.\n",
+ noiselevel=-1)
+ writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
+ sys.exit(2)
+ cfile.close()
+ else:
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
+ noiselevel=-1)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Initializing global counter.\n", noiselevel=-1)
+ counter=long(0)
+ changed=1
+
+ if counter < min_counter:
+ counter = min_counter+1000
+ changed = 1
+
+ if incrementing or changed:
+
+ #increment counter
+ counter += 1
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter